From b5fea3b7b5f19beaf5417d167105b4909eacb14c Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Thu, 15 Jun 2023 18:27:42 -0700 Subject: algorithm for cutting lines implemented --- continuedev/src/continuedev/core/config.py | 4 +- continuedev/src/continuedev/core/sdk.py | 16 +++- continuedev/src/continuedev/libs/llm/openai.py | 11 ++- continuedev/src/continuedev/libs/llm/prompters.py | 112 ---------------------- continuedev/src/continuedev/steps/core/core.py | 55 ++++++++++- 5 files changed, 75 insertions(+), 123 deletions(-) delete mode 100644 continuedev/src/continuedev/libs/llm/prompters.py diff --git a/continuedev/src/continuedev/core/config.py b/continuedev/src/continuedev/core/config.py index 23be8133..6a811412 100644 --- a/continuedev/src/continuedev/core/config.py +++ b/continuedev/src/continuedev/core/config.py @@ -19,8 +19,8 @@ class ContinueConfig(BaseModel): steps_on_startup: Optional[Dict[str, Dict]] = {} server_url: Optional[str] = None allow_anonymous_telemetry: Optional[bool] = True - default_model: Literal["gpt-3.5-turbo", - "gpt-4", "starcoder"] = 'gpt-4' + default_model: Literal["gpt-3.5-turbo", "gpt-3.5-turbo-16k", + "gpt-4"] = 'gpt-4' slash_commands: Optional[List[SlashCommand]] = [ # SlashCommand( # name="pytest", diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index 76f72d01..3e3c3bc5 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -44,6 +44,16 @@ class Models: return OpenAI(api_key=api_key, default_model="gpt-3.5-turbo") return asyncio.get_event_loop().run_until_complete(load_gpt35()) + @cached_property + def gpt3516k(self): + async def load_gpt3516k(): + api_key = await self.sdk.get_user_secret( + 'OPENAI_API_KEY', 'Enter your OpenAI API key, OR press enter to try for free') + if api_key == "": + return ProxyServer(self.sdk.ide.unique_id, "gpt-3.5-turbo-16k") + return OpenAI(api_key=api_key, default_model="gpt-3.5-turbo-16k") + return asyncio.get_event_loop().run_until_complete(load_gpt3516k()) + @cached_property def gpt4(self): async def load_gpt4(): @@ -59,6 +69,8 @@ class Models: return self.starcoder elif model_name == "gpt-3.5-turbo": return self.gpt35 + elif model_name == "gpt-3.5-turbo-16k": + return self.gpt3516k elif model_name == "gpt-4": return self.gpt4 else: @@ -174,10 +186,10 @@ class ContinueSDK(AbstractContinueSDK): highlighted_code = await self.ide.getHighlightedCode() if len(highlighted_code) == 0: # Get the full contents of all open files - files = await self.ide.getOpenFiles() + files = await self.sdk.ide.getOpenFiles() contents = {} for file in files: - contents[file] = await self.ide.readFile(file) + contents[file] = await self.sdk.ide.readFile(file) highlighted_code = [RangeInFile.from_entire_file( filepath, content) for filepath, content in contents.items()] diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index 180ea5f0..17d37035 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -10,12 +10,13 @@ from pydantic import BaseModel, validator import tiktoken MAX_TOKENS_FOR_MODEL = { - "gpt-3.5-turbo": 4097, - "gpt-4": 4097, + "gpt-3.5-turbo": 4096, + "gpt-3.5-turbo-16k": 16384, + "gpt-4": 8192 } DEFAULT_MAX_TOKENS = 2048 CHAT_MODELS = { - "gpt-3.5-turbo", "gpt-4" + "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4" } @@ -24,7 +25,7 @@ class OpenAI(LLM): completion_count: int = 0 default_model: str - def __init__(self, api_key: str, default_model: str = "gpt-3.5-turbo", system_message: str = None): + def __init__(self, api_key: str, default_model: str, system_message: str = None): self.api_key = api_key self.default_model = default_model self.system_message = system_message @@ -51,7 +52,7 @@ class OpenAI(LLM): return chat_history def with_system_message(self, system_message: Union[str, None]): - return OpenAI(api_key=self.api_key, system_message=system_message) + return OpenAI(api_key=self.api_key, default_model=self.default_model, system_message=system_message) def stream_chat(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: self.completion_count += 1 diff --git a/continuedev/src/continuedev/libs/llm/prompters.py b/continuedev/src/continuedev/libs/llm/prompters.py deleted file mode 100644 index 04e9885a..00000000 --- a/continuedev/src/continuedev/libs/llm/prompters.py +++ /dev/null @@ -1,112 +0,0 @@ -from typing import Any, Callable, List, Tuple, Union -from ..llm import LLM -from .openai import OpenAI - - -def cls_method_to_str(cls_name: str, init: str, method: str) -> str: - """Convert class and method info to formatted code""" - return f"""class {cls_name}: -{init} -{method}""" - - -# Prompter classes -class Prompter: - def __init__(self, llm: LLM = None): - if llm is None: - self.llm = OpenAI() - else: - self.llm = llm - - def _compile_prompt(self, inp: Any) -> Tuple[str, str, Union[str, None]]: - "Takes input and returns prompt, prefix, suffix" - raise NotImplementedError - - def complete(self, inp: Any, **kwargs) -> str: - prompt, prefix, suffix = self._compile_prompt(inp) - resp = self.llm.complete(prompt + prefix, suffix=suffix, **kwargs) - return prefix + resp + (suffix or "") - - def __call__(self, inp: Any, **kwargs) -> str: - return self.complete(inp, **kwargs) - - def parallel_complete(self, inps: List[Any]) -> List[str]: - prompts = [] - prefixes = [] - suffixes = [] - for inp in inps: - prompt, prefix, suffix = self._compile_prompt(inp) - prompts.append(prompt) - prefixes.append(prefix) - suffixes.append(suffix) - - resps = self.llm.parallel_complete( - [prompt + prefix for prompt, prefix in zip(prompts, prefixes)], suffixes=suffixes) - return [prefix + resp + (suffix or "") for prefix, resp, suffix in zip(prefixes, resps, suffixes)] - - -class MixedPrompter(Prompter): - def __init__(self, prompters: List[Prompter], router: Callable[[Any], int], llm: LLM = None): - super().__init__(llm=llm) - self.prompters = prompters - self.router = router - - def _compile_prompt(self, inp: Any) -> Tuple[str, str, Union[str, None]]: - prompter = self.prompters[self.router(inp)] - return prompter._compile_prompt(inp) - - def complete(self, inp: Any, **kwargs) -> str: - prompter = self.prompters[self.router(inp)] - return prompter.complete(inp, **kwargs) - - -class SimplePrompter(Prompter): - def __init__(self, prompt_fn: Callable[[Any], str], llm: LLM = None): - super().__init__(llm=llm) - self.prompt_fn = prompt_fn - - def _compile_prompt(self, inp: Any) -> Tuple[str, str, Union[str, None]]: - return self.prompt_fn(inp), "", None - - -class FormatStringPrompter(SimplePrompter): - """Pass a formatted string, and the input should be a dict with the keys to format""" - - def __init__(self, prompt: str, llm: LLM = None): - super().__init__(lambda inp: prompt.format(**inp), llm=llm) - - -class BasicCommentPrompter(SimplePrompter): - def __init__(self, comment: str, llm: LLM = None): - super().__init__(lambda inp: f"""{inp} - -# {comment}""", llm=llm) - - -class EditPrompter(Prompter): - def __init__(self, prompt_fn: Callable[[Any], Tuple[str, str]], llm: LLM = None): - super().__init__(llm=llm) - self.prompt_fn = prompt_fn - - def complete(self, inp: str, **kwargs) -> str: - inp, instruction = self.prompt_fn(inp) - return self.llm.edit(inp, instruction, **kwargs) - - def parallel_complete(self, inps: List[Any]) -> List[str]: - prompts = [] - instructions = [] - for inp in inps: - prompt, instruction = self.prompt_fn(inp) - prompts.append(prompt) - instructions.append(instruction) - - return self.llm.parallel_edit(prompts, instructions) - - -class InsertPrompter(Prompter): - def __init__(self, prompt_fn: Callable[[Any], Tuple[str, str, str]], llm: LLM = None): - super().__init__(llm=llm) - self.prompt_fn = prompt_fn - - def _compile_prompt(self, inp: Any) -> Tuple[str, str, Union[str, None]]: - return self.prompt_fn(inp) diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index aee5bc1d..ee3ef9a7 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -174,16 +174,67 @@ class DefaultModelEditCodeStep(Step): for rif in rif_with_contents: await sdk.ide.setFileOpen(rif.filepath) + model_to_use = sdk.config.default_model + full_file_contents = await sdk.ide.readFile(rif.filepath) + + full_file_contents_lst = full_file_contents.split("\n") + + max_start_line = rif.range.start.line + min_end_line = rif.range.end.line + cur_start_line = 0 + cur_end_line = len(full_file_contents_lst) + + if sdk.config.default_model == "gpt-4": + + total_tokens = sdk.models.gpt4.count_tokens(full_file_contents) + if total_tokens > sdk.models.gpt4.max_tokens: + while cur_end_line > min_end_line: + total_tokens -= len(full_file_contents_lst[cur_end_line]) + cur_end_line -= 1 + if total_tokens < sdk.models.gpt4.max_tokens: + break + + if total_tokens > sdk.models.gpt4.max_tokens: + while cur_start_line < max_start_line: + cur_start_line += 1 + total_tokens -= len(full_file_contents_lst[cur_start_line]) + if total_tokens < sdk.models.gpt4.max_tokens: + break + + elif sdk.config.default_model == "gpt-3.5-turbo" or sdk.config.default_model == "gpt-3.5-turbo-16k": + + if sdk.models.gpt35.count_tokens(full_file_contents) > sdk.models.gpt35.max_tokens: + + model_to_use = "gpt-3.5-turbo-16k" + + total_tokens = sdk.models.gpt3516k.count_tokens(full_file_contents) + if total_tokens > sdk.models.gpt3516k.max_tokens: + while cur_end_line > min_end_line: + total_tokens -= len(full_file_contents_lst[cur_end_line]) + cur_end_line -= 1 + if total_tokens < sdk.models.gpt4.max_tokens: + break + + if total_tokens > sdk.models.gpt3516k.max_tokens: + while cur_start_line < max_start_line: + total_tokens -= len(full_file_contents_lst[cur_start_line]) + cur_start_line += 1 + if total_tokens < sdk.models.gpt4.max_tokens: + break + else: + raise Exception("Unknown default model") + start_index, end_index = rif.range.indices_in_string( full_file_contents) + segs = [full_file_contents[:start_index], - full_file_contents[end_index:]] + full_file_contents[end_index:]] prompt = self._prompt.format( code=rif.contents, user_request=self.user_input, file_prefix=segs[0], file_suffix=segs[1]) - completion = str(sdk.models.default.complete(prompt, with_history=await sdk.get_chat_context())) + completion = str(model_to_use.complete(prompt, with_history=await sdk.get_chat_context())) eot_token = "<|endoftext|>" completion = completion.removesuffix(eot_token) -- cgit v1.2.3-70-g09d2 From dfa127d440f4971d0fea9dee27349f7c1e19b5cf Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Thu, 15 Jun 2023 22:37:35 -0700 Subject: creating segs from lines --- continuedev/src/continuedev/steps/core/core.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index ee3ef9a7..88dc8d72 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -224,12 +224,11 @@ class DefaultModelEditCodeStep(Step): break else: raise Exception("Unknown default model") + + code_before = "".join(full_file_contents_lst[min_end_line:]) + code_after = "".join(full_file_contents_lst[:max_start_line]) - start_index, end_index = rif.range.indices_in_string( - full_file_contents) - - segs = [full_file_contents[:start_index], - full_file_contents[end_index:]] + segs = [code_before, code_after] prompt = self._prompt.format( code=rif.contents, user_request=self.user_input, file_prefix=segs[0], file_suffix=segs[1]) -- cgit v1.2.3-70-g09d2 From c07a995dc92cea9cc374fbd8adacf86002bbb2e8 Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 16 Jun 2023 00:03:13 -0700 Subject: refactoring to use function --- continuedev/src/continuedev/steps/core/core.py | 50 ++++++++++++-------------- 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index 88dc8d72..ec0007a2 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -10,6 +10,7 @@ from ...models.filesystem_edit import EditDiff, FileEdit, FileEditWithFullConten from ...models.filesystem import FileSystem, RangeInFile, RangeInFileWithContents from ...core.observation import Observation, TextObservation, TracebackObservation, UserInputObservation from ...core.main import Step, SequentialStep +from ...libs.llm.openai import MAX_TOKENS_FOR_MODEL import difflib @@ -185,48 +186,43 @@ class DefaultModelEditCodeStep(Step): cur_start_line = 0 cur_end_line = len(full_file_contents_lst) - if sdk.config.default_model == "gpt-4": - - total_tokens = sdk.models.gpt4.count_tokens(full_file_contents) - if total_tokens > sdk.models.gpt4.max_tokens: + def cut_context(model_to_use, total_tokens): + + if total_tokens > MAX_TOKENS_FOR_MODEL[model_to_use]: while cur_end_line > min_end_line: total_tokens -= len(full_file_contents_lst[cur_end_line]) cur_end_line -= 1 - if total_tokens < sdk.models.gpt4.max_tokens: - break + if total_tokens < MAX_TOKENS_FOR_MODEL[model_to_use]: + return cur_start_line, cur_end_line - if total_tokens > sdk.models.gpt4.max_tokens: + if total_tokens > MAX_TOKENS_FOR_MODEL[model_to_use]: while cur_start_line < max_start_line: cur_start_line += 1 total_tokens -= len(full_file_contents_lst[cur_start_line]) - if total_tokens < sdk.models.gpt4.max_tokens: - break + if total_tokens < MAX_TOKENS_FOR_MODEL[model_to_use]: + return cur_start_line, cur_end_line + return cur_start_line, cur_end_line + else: + return cur_start_line, cur_end_line - elif sdk.config.default_model == "gpt-3.5-turbo" or sdk.config.default_model == "gpt-3.5-turbo-16k": + if model_to_use == "gpt-4": - if sdk.models.gpt35.count_tokens(full_file_contents) > sdk.models.gpt35.max_tokens: + total_tokens = sdk.models.gpt4.count_tokens(full_file_contents) + cur_start_line, cur_end_line = cut_context(model_to_use, total_tokens) - model_to_use = "gpt-3.5-turbo-16k" + elif model_to_use == "gpt-3.5-turbo" or model_to_use == "gpt-3.5-turbo-16k": + if sdk.models.gpt35.count_tokens(full_file_contents) > MAX_TOKENS_FOR_MODEL["gpt-3.5-turbo"]: + + model_to_use = "gpt-3.5-turbo-16k" total_tokens = sdk.models.gpt3516k.count_tokens(full_file_contents) - if total_tokens > sdk.models.gpt3516k.max_tokens: - while cur_end_line > min_end_line: - total_tokens -= len(full_file_contents_lst[cur_end_line]) - cur_end_line -= 1 - if total_tokens < sdk.models.gpt4.max_tokens: - break - - if total_tokens > sdk.models.gpt3516k.max_tokens: - while cur_start_line < max_start_line: - total_tokens -= len(full_file_contents_lst[cur_start_line]) - cur_start_line += 1 - if total_tokens < sdk.models.gpt4.max_tokens: - break + cur_start_line, cur_end_line = cut_context(model_to_use, total_tokens) + else: raise Exception("Unknown default model") - code_before = "".join(full_file_contents_lst[min_end_line:]) - code_after = "".join(full_file_contents_lst[:max_start_line]) + code_before = "".join(full_file_contents_lst[cur_end_line:]) + code_after = "".join(full_file_contents_lst[:cur_start_line]) segs = [code_before, code_after] -- cgit v1.2.3-70-g09d2 From 5bb395ce6b8fb6064919c1c885aa36ac70247090 Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 16 Jun 2023 12:47:20 -0700 Subject: getting it working --- continuedev/src/continuedev/libs/llm/openai.py | 12 ++++--- continuedev/src/continuedev/steps/core/core.py | 44 +++++++++++++------------- 2 files changed, 30 insertions(+), 26 deletions(-) diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index 17d37035..136e86b4 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -9,12 +9,12 @@ from ..llm import LLM from pydantic import BaseModel, validator import tiktoken +DEFAULT_MAX_TOKENS = 2048 MAX_TOKENS_FOR_MODEL = { - "gpt-3.5-turbo": 4096, - "gpt-3.5-turbo-16k": 16384, - "gpt-4": 8192 + "gpt-3.5-turbo": 4096 - DEFAULT_MAX_TOKENS, + "gpt-3.5-turbo-16k": 16384 - DEFAULT_MAX_TOKENS, + "gpt-4": 8192 - DEFAULT_MAX_TOKENS } -DEFAULT_MAX_TOKENS = 2048 CHAT_MODELS = { "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4" } @@ -32,6 +32,10 @@ class OpenAI(LLM): openai.api_key = api_key + @cached_property + def name(self): + return self.default_model + @cached_property def __encoding_for_model(self): aliases = { diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index ec0007a2..de6fa29a 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -175,8 +175,8 @@ class DefaultModelEditCodeStep(Step): for rif in rif_with_contents: await sdk.ide.setFileOpen(rif.filepath) - model_to_use = sdk.config.default_model - + model_to_use = sdk.models.default + full_file_contents = await sdk.ide.readFile(rif.filepath) full_file_contents_lst = full_file_contents.split("\n") @@ -184,45 +184,45 @@ class DefaultModelEditCodeStep(Step): max_start_line = rif.range.start.line min_end_line = rif.range.end.line cur_start_line = 0 - cur_end_line = len(full_file_contents_lst) + cur_end_line = len(full_file_contents_lst) - 1 - def cut_context(model_to_use, total_tokens): - - if total_tokens > MAX_TOKENS_FOR_MODEL[model_to_use]: + def cut_context(model_to_use, total_tokens, cur_start_line, cur_end_line): + + if total_tokens > MAX_TOKENS_FOR_MODEL[model_to_use.name]: while cur_end_line > min_end_line: - total_tokens -= len(full_file_contents_lst[cur_end_line]) + total_tokens -= model_to_use.count_tokens(full_file_contents_lst[cur_end_line]) cur_end_line -= 1 - if total_tokens < MAX_TOKENS_FOR_MODEL[model_to_use]: + if total_tokens < MAX_TOKENS_FOR_MODEL[model_to_use.name]: return cur_start_line, cur_end_line - if total_tokens > MAX_TOKENS_FOR_MODEL[model_to_use]: + if total_tokens > MAX_TOKENS_FOR_MODEL[model_to_use.name]: while cur_start_line < max_start_line: cur_start_line += 1 - total_tokens -= len(full_file_contents_lst[cur_start_line]) - if total_tokens < MAX_TOKENS_FOR_MODEL[model_to_use]: + total_tokens -= model_to_use.count_tokens(full_file_contents_lst[cur_end_line]) + if total_tokens < MAX_TOKENS_FOR_MODEL[model_to_use.name]: return cur_start_line, cur_end_line - return cur_start_line, cur_end_line - else: + return cur_start_line, cur_end_line - if model_to_use == "gpt-4": + if model_to_use.name == "gpt-4": - total_tokens = sdk.models.gpt4.count_tokens(full_file_contents) - cur_start_line, cur_end_line = cut_context(model_to_use, total_tokens) + total_tokens = model_to_use.count_tokens(full_file_contents) + cur_start_line, cur_end_line = cut_context(model_to_use, total_tokens, cur_start_line, cur_end_line) - elif model_to_use == "gpt-3.5-turbo" or model_to_use == "gpt-3.5-turbo-16k": + elif model_to_use.name == "gpt-3.5-turbo" or model_to_use.name == "gpt-3.5-turbo-16k": if sdk.models.gpt35.count_tokens(full_file_contents) > MAX_TOKENS_FOR_MODEL["gpt-3.5-turbo"]: - model_to_use = "gpt-3.5-turbo-16k" - total_tokens = sdk.models.gpt3516k.count_tokens(full_file_contents) - cur_start_line, cur_end_line = cut_context(model_to_use, total_tokens) + model_to_use = sdk.models.gpt3516k + total_tokens = model_to_use.count_tokens(full_file_contents) + cur_start_line, cur_end_line = cut_context(model_to_use, total_tokens, cur_start_line, cur_end_line) else: + raise Exception("Unknown default model") - code_before = "".join(full_file_contents_lst[cur_end_line:]) - code_after = "".join(full_file_contents_lst[:cur_start_line]) + code_before = "".join(full_file_contents_lst[cur_start_line:max_start_line]) + code_after = "".join(full_file_contents_lst[min_end_line:cur_end_line]) segs = [code_before, code_after] -- cgit v1.2.3-70-g09d2 From 833974d9a1dba2e5b5e98208ff1d907d93d1b57b Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 16 Jun 2023 14:14:20 -0700 Subject: removing catalog for sidebar --- docs/sidebars.js | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/sidebars.js b/docs/sidebars.js index 30b8ad3a..f9a5bdef 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -17,7 +17,6 @@ const sidebars = { "intro", "getting-started", "install", - "catalog", "how-continue-works", "telemetry", ], -- cgit v1.2.3-70-g09d2 From b1db80cf8ea3ae19c8c5361e399fe17f2fa1f1cd Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 16 Jun 2023 14:16:17 -0700 Subject: fixing google analytics bug --- docs/docusaurus.config.js | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 7b817bb9..e9433e67 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -36,11 +36,6 @@ const config = { presets: [ [ "classic", - { - gtag: { - trackingID: 'G-M3JWW8N2XQ', - }, - }, /** @type {import('@docusaurus/preset-classic').Options} */ ({ docs: { @@ -51,6 +46,9 @@ const config = { theme: { customCss: require.resolve("./src/css/custom.css"), }, + gtag: { + trackingID: 'G-M3JWW8N2XQ', + }, }), ], ], -- cgit v1.2.3-70-g09d2 From 257f58feb2d3c2df38d6fddd0f76c56ebd147b70 Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 16 Jun 2023 14:31:21 -0700 Subject: fixing bug with edit this page --- docs/docusaurus.config.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index e9433e67..7746a87b 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -41,7 +41,7 @@ const config = { docs: { routeBasePath: '/', sidebarPath: require.resolve("./sidebars.js"), - editUrl: "https://github.com/continuedev/continue/", + editUrl: "https://github.com/continuedev/continue/tree/main/docs", }, theme: { customCss: require.resolve("./src/css/custom.css"), -- cgit v1.2.3-70-g09d2 From 54210ef297defb8aa924a01214a9e1a54dbbef92 Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 16 Jun 2023 14:39:44 -0700 Subject: improved introduction --- docs/docs/intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/intro.md b/docs/docs/intro.md index e7d922c2..015d45af 100644 --- a/docs/docs/intro.md +++ b/docs/docs/intro.md @@ -11,7 +11,7 @@ **Continue is the open-source library for accelerating software development with language models** -You define the scenarios where Large Language Models LLMs like GPT-4 and StarCoder should act as an autopilot, helping you complete software development tasks. You use recipes created by others to automate more steps in your workflows. If a recipe does not exist or work exactly like you want, you can use the Continue SDK to create custom steps and compose them into personalized recipes. Whether you are using a recipe created by yourself or someone else, you can review, reverse, and rerun steps with the Continue GUI, which helps you guide the work done by LLMs and learn when to use and trust them. +You determine when Large Language Models (LLMs) like GPT-4 should act as an autopilot, helping you complete software development tasks. You use recipes created by others to automate more steps in your workflows. If a recipe does not exist or work exactly like you want, you can use the Continue SDK to create custom steps and compose them into personalized recipes. Whether you are using a recipe created by yourself or someone else, you can review, reverse, and rerun steps with the Continue GUI, which helps you guide the work done by LLMs and learn when to use and trust them. ## Why do developers use Continue? -- cgit v1.2.3-70-g09d2 From d15688a3d010192caaa3d041c23725eacf26e535 Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 16 Jun 2023 14:43:52 -0700 Subject: changing ) to GUI --- docs/docs/intro.md | 4 ++-- extension/package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/docs/intro.md b/docs/docs/intro.md index 015d45af..6795797e 100644 --- a/docs/docs/intro.md +++ b/docs/docs/intro.md @@ -11,10 +11,10 @@ **Continue is the open-source library for accelerating software development with language models** -You determine when Large Language Models (LLMs) like GPT-4 should act as an autopilot, helping you complete software development tasks. You use recipes created by others to automate more steps in your workflows. If a recipe does not exist or work exactly like you want, you can use the Continue SDK to create custom steps and compose them into personalized recipes. Whether you are using a recipe created by yourself or someone else, you can review, reverse, and rerun steps with the Continue GUI, which helps you guide the work done by LLMs and learn when to use and trust them. +You determine when Large Language Models (LLMs) like GPT-4 should act as an autopilot, helping you complete software development tasks. You open a file or highlight some code and then use slash commands like `/edit`, `/explain`, and `/comment` and naturual language instructions to tell the language model what to do. If an error or exception occurs when you run Python or JavaScript code, Continue will automatically tell you in plain English what to try to address it. You can also review, reverse, and rerun steps with the Continue GUI, which helps you guide the work done by LLMs and learn when to use and trust them. ## Why do developers use Continue? -Many developers have begun to use [GPT-3.5](https://platform.openai.com/docs/models/gpt-3-5) and [GPT-4](https://openai.com/research/gpt-4) through [ChatGPT](https://openai.com/blog/chatgpt) while coding; however, the experience is painful because of how much manual copying, pasting, and editing is required to provide the necessary context and incorporate the generated solutions into your codebase. Continue eliminates this pain by deeply integrating LLMs into your IDE amd workflows. +Many developers have begun to use [GPT-3.5](https://platform.openai.com/docs/models/gpt-3-5) and [GPT-4](https://openai.com/research/gpt-4) through [ChatGPT](https://openai.com/blog/chatgpt) while coding; however, the experience is painful because of how much manual copying, pasting, and editing is required to provide the necessary context and incorporate the generated solutions and advice into your codebase. Continue eliminates this pain by enabling LLMs to natively act in your IDE as you complete your workflows. Continue accelerates how developers build, ship, and maintain software, while giving them the control to define when LLMs should take actions and the confidence to trust LLMs. In short, it enables developers to do what they have always done: work together to create better and better abstractions that make it easier and easier to automate the repetitive work that people want computers to do. \ No newline at end of file diff --git a/extension/package.json b/extension/package.json index 8cf50d2a..ec348aa5 100644 --- a/extension/package.json +++ b/extension/package.json @@ -96,7 +96,7 @@ { "type": "webview", "id": "continue.continueGUIView", - "name": ")", + "name": "GUI", "visibility": "visible" } ] -- cgit v1.2.3-70-g09d2 From 9846f8f7e592fbcc110a0ec54bbabd934ecd080b Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 16 Jun 2023 14:51:33 -0700 Subject: getting started and install --- docs/docs/getting-started.md | 7 +++---- docs/docs/install.md | 7 ++----- docs/static/img/codespaces-install.png | Bin 0 -> 499343 bytes docs/static/img/vscode-install.png | Bin 0 -> 317438 bytes 4 files changed, 5 insertions(+), 9 deletions(-) create mode 100644 docs/static/img/codespaces-install.png create mode 100644 docs/static/img/vscode-install.png diff --git a/docs/docs/getting-started.md b/docs/docs/getting-started.md index 9824aadd..b7401106 100644 --- a/docs/docs/getting-started.md +++ b/docs/docs/getting-started.md @@ -12,10 +12,9 @@ We don't want to waste your time with install and env setup before you try Conti 2. Select the `Create new codespace` button and wait 30-90 seconds while it launches and installs the Continue extension. Once complete, it should look like this: -**TODO: Nate to add a screenshot of what Codespaces + Continue looks like when it is ready** +![codespaces-install](/img/codespaces-install.png) ## Next Steps - Read the `Getting Started` section of the `README.md` file that has been opened in your codespace, which you can also find [here](https://github.com/continuedev/continue-codespaces-demo/blob/main/README.md) - - If you're ready to download for VS Code, you can do so [here](https://marketplace.visualstudio.com/items?itemName=Continue.continue). + - Read the `Getting Started` section of the `README.md` file that has been opened in your codespace, which you can also find [here](https://github.com/continuedev/continue-codespaces-demo/blob/main/README.md) + - If you're ready to download for VS Code, you can do so [here](https://marketplace.visualstudio.com/items?itemName=Continue.continue) \ No newline at end of file diff --git a/docs/docs/install.md b/docs/docs/install.md index a042739f..ac0aef30 100644 --- a/docs/docs/install.md +++ b/docs/docs/install.md @@ -12,7 +12,7 @@ If you want to try Continue before installing locally, check out the [GitHub Cod 3. Once you do this, you will see a message in the bottom right hand corner of VS Code that says `Setting up Continue extension...`. After 30-90 seconds, the Continue extension will then open up. It should look like this when it is complete: -**TODO: Nate to add a screenshot of what Codespaces + Continue looks like when it is ready** +![vscode-install](/img/vscode-install.png) You can also open the Continue GUI with `cmd+shift+p` on Mac / `ctrl+shift+p` on Windows and then selecting `Continue: Open GUI` @@ -22,7 +22,4 @@ If you would like to install Continue from source, please [follow the instructio ## Next steps -**TODO: Nate to update VS Code install to have the same getting started as Codespaces** - -Read the `Getting Started` section of the `README.md` file that has been opened in VS Code, -which you can also find [here](https://github.com/continuedev/continue-codespaces-demo/blob/main/README.md) \ No newline at end of file +- Read the `Getting Started` section of the `README.md` [here](https://github.com/continuedev/continue-codespaces-demo/blob/main/README.md) \ No newline at end of file diff --git a/docs/static/img/codespaces-install.png b/docs/static/img/codespaces-install.png new file mode 100644 index 00000000..e960eff1 Binary files /dev/null and b/docs/static/img/codespaces-install.png differ diff --git a/docs/static/img/vscode-install.png b/docs/static/img/vscode-install.png new file mode 100644 index 00000000..17ac547a Binary files /dev/null and b/docs/static/img/vscode-install.png differ -- cgit v1.2.3-70-g09d2 From bb2ed6c1ba491dfda0ab58ff35c6946043c8372f Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 16 Jun 2023 14:59:28 -0700 Subject: updating landing page --- docs/src/components/HomepageFeatures/index.js | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/docs/src/components/HomepageFeatures/index.js b/docs/src/components/HomepageFeatures/index.js index 31df2b27..a76ea22e 100644 --- a/docs/src/components/HomepageFeatures/index.js +++ b/docs/src/components/HomepageFeatures/index.js @@ -4,32 +4,29 @@ import styles from "./styles.module.css"; const FeatureList = [ { - title: "Tell LLMs when to step in", + title: "Understand and edit code", Svg: require("@site/static/img/undraw_docusaurus_mountain.svg").default, description: ( <> - Seamlessly put your repetitive software development tasks - on autopilot by leveraging recipes created by others + Seamlessly ask language models to help you complete steps in your software development tasks ), }, { - title: "Write your own recipes", + title: "Customizable for your team", Svg: require("@site/static/img/undraw_docusaurus_tree.svg").default, description: ( <> - Use the Continue SDK to create custom steps and compose - them into recipes, guiding LLMs through common tasks + Define when and how LLMs should act to accelerate steps in your team-specific workflows ), }, { - title: "Wield LLMs with confidence", + title: "Breakdown your work step-by-step", Svg: require("@site/static/img/undraw_docusaurus_react.svg").default, description: ( <> - Use the Continue GUI to review, reverse, and rerun steps or even - entire recipes, allowing you to build trust in language models + Use the Continue GUI to review, reverse, and rerun steps that LLMs and you have taken together ), }, -- cgit v1.2.3-70-g09d2 From 9f756539fa80422942f220ebd81621dda302f036 Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 16 Jun 2023 15:14:46 -0700 Subject: verb instead of noun --- docs/src/components/HomepageFeatures/index.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/components/HomepageFeatures/index.js b/docs/src/components/HomepageFeatures/index.js index a76ea22e..6348f80a 100644 --- a/docs/src/components/HomepageFeatures/index.js +++ b/docs/src/components/HomepageFeatures/index.js @@ -22,7 +22,7 @@ const FeatureList = [ ), }, { - title: "Breakdown your work step-by-step", + title: "Break down your work step-by-step", Svg: require("@site/static/img/undraw_docusaurus_react.svg").default, description: ( <> -- cgit v1.2.3-70-g09d2 From 334996b9b0d9ac3cce149524883816b401ef2d5b Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 16 Jun 2023 15:48:09 -0700 Subject: addressing feedback --- continuedev/src/continuedev/core/sdk.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index 3e3c3bc5..f8bbcc53 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -40,7 +40,7 @@ class Models: api_key = await self.sdk.get_user_secret( 'OPENAI_API_KEY', 'Enter your OpenAI API key, OR press enter to try for free') if api_key == "": - return ProxyServer(self.sdk.ide.unique_id, "gpt-3.5-turbo") + return ProxyServer(self.ide.unique_id, "gpt-3.5-turbo") return OpenAI(api_key=api_key, default_model="gpt-3.5-turbo") return asyncio.get_event_loop().run_until_complete(load_gpt35()) @@ -50,7 +50,7 @@ class Models: api_key = await self.sdk.get_user_secret( 'OPENAI_API_KEY', 'Enter your OpenAI API key, OR press enter to try for free') if api_key == "": - return ProxyServer(self.sdk.ide.unique_id, "gpt-3.5-turbo-16k") + return ProxyServer(self.ide.unique_id, "gpt-3.5-turbo-16k") return OpenAI(api_key=api_key, default_model="gpt-3.5-turbo-16k") return asyncio.get_event_loop().run_until_complete(load_gpt3516k()) @@ -60,7 +60,7 @@ class Models: api_key = await self.sdk.get_user_secret( 'OPENAI_API_KEY', 'Enter your OpenAI API key, OR press enter to try for free') if api_key == "": - return ProxyServer(self.sdk.ide.unique_id, "gpt-4") + return ProxyServer(self.ide.unique_id, "gpt-4") return OpenAI(api_key=api_key, default_model="gpt-4") return asyncio.get_event_loop().run_until_complete(load_gpt4()) -- cgit v1.2.3-70-g09d2 From cfbc9e8faa678e503682b887863964b5c2e7b03c Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 16 Jun 2023 15:51:16 -0700 Subject: more self.sdk.ide --- continuedev/src/continuedev/core/sdk.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index f8bbcc53..cbaaf21a 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -186,10 +186,10 @@ class ContinueSDK(AbstractContinueSDK): highlighted_code = await self.ide.getHighlightedCode() if len(highlighted_code) == 0: # Get the full contents of all open files - files = await self.sdk.ide.getOpenFiles() + files = await self.ide.getOpenFiles() contents = {} for file in files: - contents[file] = await self.sdk.ide.readFile(file) + contents[file] = await self.ide.readFile(file) highlighted_code = [RangeInFile.from_entire_file( filepath, content) for filepath, content in contents.items()] -- cgit v1.2.3-70-g09d2 From c980e01d2f9328d5c37df14bea02f84a4890bc6a Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 16 Jun 2023 15:54:15 -0700 Subject: reverting back to before --- continuedev/src/continuedev/core/sdk.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index cbaaf21a..9f31c4c2 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -40,7 +40,7 @@ class Models: api_key = await self.sdk.get_user_secret( 'OPENAI_API_KEY', 'Enter your OpenAI API key, OR press enter to try for free') if api_key == "": - return ProxyServer(self.ide.unique_id, "gpt-3.5-turbo") + return ProxyServer(self.sdk.ide.unique_id, "gpt-3.5-turbo") return OpenAI(api_key=api_key, default_model="gpt-3.5-turbo") return asyncio.get_event_loop().run_until_complete(load_gpt35()) @@ -50,7 +50,7 @@ class Models: api_key = await self.sdk.get_user_secret( 'OPENAI_API_KEY', 'Enter your OpenAI API key, OR press enter to try for free') if api_key == "": - return ProxyServer(self.ide.unique_id, "gpt-3.5-turbo-16k") + return ProxyServer(self.sdk.ide.unique_id, "gpt-3.5-turbo-16k") return OpenAI(api_key=api_key, default_model="gpt-3.5-turbo-16k") return asyncio.get_event_loop().run_until_complete(load_gpt3516k()) @@ -60,7 +60,7 @@ class Models: api_key = await self.sdk.get_user_secret( 'OPENAI_API_KEY', 'Enter your OpenAI API key, OR press enter to try for free') if api_key == "": - return ProxyServer(self.ide.unique_id, "gpt-4") + return ProxyServer(self.sdk.ide.unique_id, "gpt-4") return OpenAI(api_key=api_key, default_model="gpt-4") return asyncio.get_event_loop().run_until_complete(load_gpt4()) -- cgit v1.2.3-70-g09d2