summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--continuedev/src/continuedev/libs/constants/default_config.py74
-rw-r--r--continuedev/src/continuedev/libs/llm/ggml.py17
-rw-r--r--continuedev/src/continuedev/libs/llm/text_gen_interface.py161
-rw-r--r--continuedev/src/continuedev/plugins/policies/default.py10
-rw-r--r--continuedev/src/continuedev/plugins/steps/chat.py2
-rw-r--r--extension/react-app/src/components/ContinueButton.tsx25
-rw-r--r--extension/react-app/src/redux/slices/serverStateReducer.ts20
7 files changed, 211 insertions, 98 deletions
diff --git a/continuedev/src/continuedev/libs/constants/default_config.py b/continuedev/src/continuedev/libs/constants/default_config.py
index a22f941d..c72e4bcf 100644
--- a/continuedev/src/continuedev/libs/constants/default_config.py
+++ b/continuedev/src/continuedev/libs/constants/default_config.py
@@ -2,19 +2,13 @@ default_config = """\
\"\"\"
This is the Continue configuration file.
-See https://continue.dev/docs/customization to learn more.
+See https://continue.dev/docs/customization to for documentation of the available options.
\"\"\"
-import subprocess
-
-from continuedev.src.continuedev.core.main import Step
-from continuedev.src.continuedev.core.sdk import ContinueSDK
from continuedev.src.continuedev.core.models import Models
from continuedev.src.continuedev.core.config import CustomCommand, SlashCommand, ContinueConfig
from continuedev.src.continuedev.plugins.context_providers.github import GitHubIssuesContextProvider
-from continuedev.src.continuedev.plugins.context_providers.google import GoogleContextProvider
from continuedev.src.continuedev.libs.llm.maybe_proxy_openai import MaybeProxyOpenAI
-from continuedev.src.continuedev.plugins.policies.default import DefaultPolicy
from continuedev.src.continuedev.plugins.steps.open_config import OpenConfigStep
from continuedev.src.continuedev.plugins.steps.clear_history import ClearHistoryStep
@@ -26,66 +20,22 @@ from continuedev.src.continuedev.plugins.context_providers.search import SearchC
from continuedev.src.continuedev.plugins.context_providers.diff import DiffContextProvider
from continuedev.src.continuedev.plugins.context_providers.url import URLContextProvider
-class CommitMessageStep(Step):
- \"\"\"
- This is a Step, the building block of Continue.
- It can be used below as a slash command, so that
- run will be called when you type '/commit'.
- \"\"\"
- async def run(self, sdk: ContinueSDK):
-
- # Get the root directory of the workspace
- dir = sdk.ide.workspace_directory
-
- # Run git diff in that directory
- diff = subprocess.check_output(
- ["git", "diff"], cwd=dir).decode("utf-8")
-
- # Ask the LLM to write a commit message,
- # and set it as the description of this step
- self.description = await sdk.models.default.complete(
- f"{diff}\\n\\nWrite a short, specific (less than 50 chars) commit message about the above changes:")
-
-
config = ContinueConfig(
-
- # If set to False, we will not collect any usage data
- # See here to learn what anonymous data we collect: https://continue.dev/docs/telemetry
allow_anonymous_telemetry=True,
-
models=Models(
- # You can try Continue with limited free usage. Please eventually replace with your own API key.
- # Learn how to customize models here: https://continue.dev/docs/customization#change-the-default-llm
default=MaybeProxyOpenAI(api_key="", model="gpt-4"),
medium=MaybeProxyOpenAI(api_key="", model="gpt-3.5-turbo")
),
-
- # Set a system message with information that the LLM should always keep in mind
- # E.g. "Please give concise answers. Always respond in Spanish."
system_message=None,
-
- # Set temperature to any value between 0 and 1. Higher values will make the LLM
- # more creative, while lower values will make it more predictable.
temperature=0.5,
-
- # Custom commands let you map a prompt to a shortened slash command
- # They are like slash commands, but more easily defined - write just a prompt instead of a Step class
- # Their output will always be in chat form
custom_commands=[
- # CustomCommand(
- # name="test",
- # description="Write unit tests for the higlighted code",
- # prompt="Write a comprehensive set of unit tests for the selected code. It should setup, run tests that check for correctness including important edge cases, and teardown. Ensure that the tests are complete and sophisticated. Give the tests just as chat output, don't edit any file.",
- # )
+ CustomCommand(
+ name="test",
+ description="Write unit tests for the higlighted code",
+ prompt="Write a comprehensive set of unit tests for the selected code. It should setup, run tests that check for correctness including important edge cases, and teardown. Ensure that the tests are complete and sophisticated. Give the tests just as chat output, don't edit any file.",
+ )
],
-
- # Slash commands let you run a Step from a slash command
slash_commands=[
- # SlashCommand(
- # name="commit",
- # description="This is an example slash command. Use /config to edit it and create more",
- # step=CommitMessageStep,
- # )
SlashCommand(
name="edit",
description="Edit code in the current file or the highlighted code",
@@ -117,19 +67,11 @@ config = ContinueConfig(
step=ShareSessionStep,
)
],
-
- # Context providers let you quickly select context by typing '@'
- # Uncomment the following to
- # - quickly reference GitHub issues
- # - show Google search results to the LLM
context_providers=[
# GitHubIssuesContextProvider(
# repo_name="<your github username or organization>/<your repo name>",
# auth_token="<your github auth token>"
# ),
- # GoogleContextProvider(
- # serper_api_key="<your serper.dev api key>"
- # )
SearchContextProvider(),
DiffContextProvider(),
URLContextProvider(
@@ -138,9 +80,5 @@ config = ContinueConfig(
]
)
],
-
- # Policies hold the main logic that decides which Step to take next
- # You can use them to design agents, or deeply customize Continue
- policy=DefaultPolicy()
)
"""
diff --git a/continuedev/src/continuedev/libs/llm/ggml.py b/continuedev/src/continuedev/libs/llm/ggml.py
index b4548ff2..be82c445 100644
--- a/continuedev/src/continuedev/libs/llm/ggml.py
+++ b/continuedev/src/continuedev/libs/llm/ggml.py
@@ -30,7 +30,7 @@ class GGML(LLM):
self.write_log = write_log
async def stop(self):
- await self._client_session.close()
+ pass
@property
def name(self):
@@ -106,20 +106,25 @@ class GGML(LLM):
async with client_session.post(
f"{self.server_url}/v1/chat/completions",
json={"messages": messages, **args},
+ headers={"Content-Type": "application/json"},
) as resp:
# This is streaming application/json instaed of text/event-stream
async for line, end in resp.content.iter_chunks():
json_chunk = line.decode("utf-8")
- if json_chunk.startswith(": ping - ") or json_chunk.startswith(
- "data: [DONE]"
- ):
- continue
chunks = json_chunk.split("\n")
for chunk in chunks:
- if chunk.strip() != "":
+ if (
+ chunk.strip() == ""
+ or json_chunk.startswith(": ping - ")
+ or json_chunk.startswith("data: [DONE]")
+ ):
+ continue
+ try:
yield json.loads(chunk[6:])["choices"][0][
"delta"
] # {"role": "assistant", "content": "..."}
+ except:
+ pass
# Because quite often the first attempt fails, and it works thereafter
self.write_log(f"Prompt: \n\n{format_chat_messages(messages)}")
diff --git a/continuedev/src/continuedev/libs/llm/text_gen_interface.py b/continuedev/src/continuedev/libs/llm/text_gen_interface.py
new file mode 100644
index 00000000..380f7b48
--- /dev/null
+++ b/continuedev/src/continuedev/libs/llm/text_gen_interface.py
@@ -0,0 +1,161 @@
+import json
+from typing import Any, Callable, Coroutine, Dict, Generator, List, Optional, Union
+
+import websockets
+
+from ...core.main import ChatMessage
+from ..util.count_tokens import (
+ DEFAULT_ARGS,
+ compile_chat_messages,
+ count_tokens,
+ format_chat_messages,
+)
+from . import LLM
+
+
+class TextGenUI(LLM):
+ # this is model-specific
+ model: str = "text-gen-ui"
+ max_context_length: int = 2048
+ server_url: str = "http://localhost:5000"
+ streaming_url: str = "http://localhost:5005"
+ verify_ssl: Optional[bool] = None
+
+ requires_write_log = True
+
+ write_log: Optional[Callable[[str], None]] = None
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ async def start(self, write_log: Callable[[str], None], **kwargs):
+ self.write_log = write_log
+
+ async def stop(self):
+ pass
+
+ @property
+ def name(self):
+ return self.model
+
+ @property
+ def context_length(self):
+ return self.max_context_length
+
+ @property
+ def default_args(self):
+ return {**DEFAULT_ARGS, "model": self.name, "max_tokens": 1024}
+
+ def _transform_args(self, args):
+ args = {
+ **args,
+ "max_new_tokens": args.get("max_tokens", 1024),
+ }
+ args.pop("max_tokens", None)
+ return args
+
+ def count_tokens(self, text: str):
+ return count_tokens(self.name, text)
+
+ async def stream_complete(
+ self, prompt, with_history: List[ChatMessage] = None, **kwargs
+ ) -> Generator[Union[Any, List, Dict], None, None]:
+ args = self.default_args.copy()
+ args.update(kwargs)
+ args["stream"] = True
+
+ args = {**self.default_args, **kwargs}
+
+ self.write_log(f"Prompt: \n\n{prompt}")
+ completion = ""
+
+ ws_url = f"{self.streaming_url.replace('http://', 'ws://').replace('https://', 'wss://')}"
+ payload = json.dumps({"prompt": prompt, **self._transform_args(args)})
+ async with websockets.connect(
+ f"{ws_url}/api/v1/stream", ping_interval=None
+ ) as websocket:
+ await websocket.send(payload)
+
+ while True:
+ incoming_data = await websocket.recv()
+ incoming_data = json.loads(incoming_data)
+
+ match incoming_data["event"]:
+ case "text_stream":
+ completion += incoming_data["text"]
+ yield incoming_data["text"]
+ case "stream_end":
+ break
+
+ self.write_log(f"Completion: \n\n{completion}")
+
+ async def stream_chat(
+ self, messages: List[ChatMessage] = None, **kwargs
+ ) -> Generator[Union[Any, List, Dict], None, None]:
+ args = {**self.default_args, **kwargs}
+ messages = compile_chat_messages(
+ self.name,
+ messages,
+ self.context_length,
+ args["max_tokens"],
+ None,
+ functions=args.get("functions", None),
+ system_message=self.system_message,
+ )
+ args["stream"] = True
+
+ async def generator():
+ ws_url = f"{self.streaming_url.replace('http://', 'ws://').replace('https://', 'wss://')}"
+ history = list(map(lambda x: x["content"], messages))
+ payload = json.dumps(
+ {
+ "user_input": messages[-1]["content"],
+ "history": {"internal": [history], "visible": [history]},
+ **self._transform_args(args),
+ }
+ )
+ async with websockets.connect(
+ f"{ws_url}/api/v1/chat-stream", ping_interval=None
+ ) as websocket:
+ await websocket.send(payload)
+
+ prev = ""
+ while True:
+ incoming_data = await websocket.recv()
+ incoming_data = json.loads(incoming_data)
+
+ match incoming_data["event"]:
+ case "text_stream":
+ visible = incoming_data["history"]["visible"][-1]
+ if len(visible) > 0:
+ yield {
+ "role": "assistant",
+ "content": visible[-1].replace(prev, ""),
+ }
+ prev = visible[-1]
+ case "stream_end":
+ break
+
+ # Because quite often the first attempt fails, and it works thereafter
+ self.write_log(f"Prompt: \n\n{format_chat_messages(messages)}")
+ completion = ""
+ async for chunk in generator():
+ yield chunk
+ if "content" in chunk:
+ completion += chunk["content"]
+
+ self.write_log(f"Completion: \n\n{completion}")
+
+ async def complete(
+ self, prompt: str, with_history: List[ChatMessage] = None, **kwargs
+ ) -> Coroutine[Any, Any, str]:
+ generator = self.stream_chat(
+ [ChatMessage(role="user", content=prompt, summary=prompt)], **kwargs
+ )
+
+ completion = ""
+ async for chunk in generator:
+ if "content" in chunk:
+ completion += chunk["content"]
+
+ return completion
diff --git a/continuedev/src/continuedev/plugins/policies/default.py b/continuedev/src/continuedev/plugins/policies/default.py
index ef88c8d6..550defa9 100644
--- a/continuedev/src/continuedev/plugins/policies/default.py
+++ b/continuedev/src/continuedev/plugins/policies/default.py
@@ -1,15 +1,16 @@
+import os
from textwrap import dedent
from typing import Type, Union
from ...core.config import ContinueConfig
from ...core.main import History, Policy, Step
from ...core.observation import UserInputObservation
+from ...libs.util.paths import getServerFolderPath
from ..steps.chat import SimpleChatStep
from ..steps.core.core import MessageStep
from ..steps.custom_command import CustomCommandStep
from ..steps.main import EditHighlightedCodeStep
from ..steps.steps_on_startup import StepsOnStartupStep
-from ..steps.welcome import WelcomeStep
def parse_slash_command(inp: str, config: ContinueConfig) -> Union[None, Step]:
@@ -58,6 +59,12 @@ class DefaultPolicy(Policy):
def next(self, config: ContinueConfig, history: History) -> Step:
# At the very start, run initial Steps spcecified in the config
if history.get_current() is None:
+ shown_welcome_file = os.path.join(getServerFolderPath(), ".shown_welcome")
+ if os.path.exists(shown_welcome_file):
+ return StepsOnStartupStep()
+
+ with open(shown_welcome_file, "w") as f:
+ f.write("")
return (
MessageStep(
name="Welcome to Continue",
@@ -69,7 +76,6 @@ class DefaultPolicy(Policy):
- [Customize Continue](https://continue.dev/docs/customization) (e.g. use your own API key) by typing '/config'."""
),
)
- >> WelcomeStep()
>> StepsOnStartupStep()
)
diff --git a/continuedev/src/continuedev/plugins/steps/chat.py b/continuedev/src/continuedev/plugins/steps/chat.py
index ad09f193..cbd94fe2 100644
--- a/continuedev/src/continuedev/plugins/steps/chat.py
+++ b/continuedev/src/continuedev/plugins/steps/chat.py
@@ -95,7 +95,7 @@ class SimpleChatStep(Step):
self.name = remove_quotes_and_escapes(
await sdk.models.medium.complete(
- f"{self.description}\n\nHere is a short title for the above chat message (no more than 10 words):",
+ f'"{self.description}"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:',
max_tokens=20,
)
)
diff --git a/extension/react-app/src/components/ContinueButton.tsx b/extension/react-app/src/components/ContinueButton.tsx
index 6d03c820..632acc75 100644
--- a/extension/react-app/src/components/ContinueButton.tsx
+++ b/extension/react-app/src/components/ContinueButton.tsx
@@ -3,14 +3,16 @@ import { Button } from ".";
import { PlayIcon } from "@heroicons/react/24/outline";
import { useSelector } from "react-redux";
import { RootStore } from "../redux/store";
+import { useEffect, useState } from "react";
-let StyledButton = styled(Button)`
+let StyledButton = styled(Button)<{ color?: string | null }>`
margin: auto;
margin-top: 8px;
+ margin-bottom: 16px;
display: grid;
grid-template-columns: 30px 1fr;
align-items: center;
- background: #be1b55;
+ background: ${(props) => props.color || "#be1b55"};
&:hover {
transition-property: "background";
@@ -23,8 +25,27 @@ function ContinueButton(props: { onClick?: () => void; hidden?: boolean }) {
(state: RootStore) => state.config.vscMediaUrl
);
+ const [buttonColor, setButtonColor] = useState<string | null>(
+ localStorage.getItem("continueButtonColor")
+ );
+
+ useEffect(() => {
+ const handleStorageChange = (e: any) => {
+ if (e.key === "continueButtonColor") {
+ // Update your state or do whatever you need to do here
+ setButtonColor(e.newValue);
+ }
+ };
+
+ window.addEventListener("storage", handleStorageChange);
+
+ // Don't forget to cleanup the event listener
+ return () => window.removeEventListener("storage", handleStorageChange);
+ }, []);
+
return (
<StyledButton
+ color={buttonColor as any}
hidden={props.hidden}
style={{ fontSize: "10px" }}
className="m-auto press-start-2p"
diff --git a/extension/react-app/src/redux/slices/serverStateReducer.ts b/extension/react-app/src/redux/slices/serverStateReducer.ts
index cf26f094..904b0e76 100644
--- a/extension/react-app/src/redux/slices/serverStateReducer.ts
+++ b/extension/react-app/src/redux/slices/serverStateReducer.ts
@@ -3,25 +3,7 @@ import { FullState } from "../../../../schema/FullState";
const initialState: FullState = {
history: {
- timeline: [
- {
- step: {
- name: "Welcome to Continue",
- hide: false,
- description: `- Highlight code section and ask a question or give instructions
-- Use \`cmd+m\` (Mac) / \`ctrl+m\` (Windows) to open Continue
-- Use \`/help\` to ask questions about how to use Continue
-- [Customize Continue](https://continue.dev/docs/customization) (e.g. use your own API key) by typing '/config'.`,
- system_message: null,
- chat_context: [],
- manage_own_chat_context: false,
- message: "",
- },
- depth: 0,
- deleted: false,
- active: false,
- },
- ],
+ timeline: [],
current_index: 3,
} as any,
user_input_queue: [],