summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNate Sesti <sestinj@gmail.com>2023-08-05 00:07:11 -0700
committerNate Sesti <sestinj@gmail.com>2023-08-05 00:07:11 -0700
commit68233071dd0d97a353a66fe5627d69f97a389ca8 (patch)
tree2e1904c10a7db0a21abc9f37cd28bad2a1934e7c
parent9620ba0154bde0778a0fe453d9dab29ae9a8082d (diff)
downloadsncontinue-68233071dd0d97a353a66fe5627d69f97a389ca8.tar.gz
sncontinue-68233071dd0d97a353a66fe5627d69f97a389ca8.tar.bz2
sncontinue-68233071dd0d97a353a66fe5627d69f97a389ca8.zip
fix: :bug: set api_keys in config.py, fix spawn error handling
-rw-r--r--continuedev/src/continuedev/libs/constants/default_config.py6
-rw-r--r--continuedev/src/continuedev/libs/llm/anthropic.py4
-rw-r--r--continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py8
-rw-r--r--continuedev/src/continuedev/libs/llm/openai.py4
-rw-r--r--continuedev/src/continuedev/plugins/steps/chat.py27
-rw-r--r--docs/docs/customization.md21
-rw-r--r--extension/package.json7
-rw-r--r--extension/src/activation/activate.ts3
-rw-r--r--extension/src/activation/environmentSetup.ts40
9 files changed, 78 insertions, 42 deletions
diff --git a/continuedev/src/continuedev/libs/constants/default_config.py b/continuedev/src/continuedev/libs/constants/default_config.py
index 73c8eeba..d3922091 100644
--- a/continuedev/src/continuedev/libs/constants/default_config.py
+++ b/continuedev/src/continuedev/libs/constants/default_config.py
@@ -51,8 +51,10 @@ config = ContinueConfig(
allow_anonymous_telemetry=True,
models=Models(
- default=MaybeProxyOpenAI(model="gpt-4"),
- medium=MaybeProxyOpenAI(model="gpt-3.5-turbo")
+ # You can try Continue with limited free usage. Please eventually replace with your own API key.
+ # Learn how to customize models here: https://continue.dev/docs/customization#change-the-default-llm
+ default=MaybeProxyOpenAI(api_key="", model="gpt-4"),
+ medium=MaybeProxyOpenAI(api_key="", model="gpt-3.5-turbo")
),
# Set a system message with information that the LLM should always keep in mind
diff --git a/continuedev/src/continuedev/libs/llm/anthropic.py b/continuedev/src/continuedev/libs/llm/anthropic.py
index ac5c56a4..9d7bc93f 100644
--- a/continuedev/src/continuedev/libs/llm/anthropic.py
+++ b/continuedev/src/continuedev/libs/llm/anthropic.py
@@ -8,9 +8,9 @@ from ..util.count_tokens import compile_chat_messages, DEFAULT_ARGS, count_token
class AnthropicLLM(LLM):
+ api_key: str
model: str = "claude-2"
- requires_api_key: str = "ANTHROPIC_API_KEY"
requires_write_log = True
_async_client: AsyncAnthropic = None
@@ -21,7 +21,7 @@ class AnthropicLLM(LLM):
async def start(self, *, api_key: Optional[str] = None, write_log: Callable[[str], None], **kwargs):
self.write_log = write_log
- self._async_client = AsyncAnthropic(api_key=api_key)
+ self._async_client = AsyncAnthropic(api_key=self.api_key)
async def stop(self):
pass
diff --git a/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py b/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py
index edf58fd7..fbc2c43f 100644
--- a/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py
+++ b/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py
@@ -8,8 +8,8 @@ from .openai import OpenAI
class MaybeProxyOpenAI(LLM):
model: str
+ api_key: Optional[str] = None
- requires_api_key: Optional[str] = "OPENAI_API_KEY"
requires_write_log: bool = True
requires_unique_id: bool = True
system_message: Union[str, None] = None
@@ -25,12 +25,12 @@ class MaybeProxyOpenAI(LLM):
return self.llm.context_length
async def start(self, *, api_key: Optional[str] = None, unique_id: str, write_log: Callable[[str], None]):
- if api_key is None or api_key.strip() == "":
+ if self.api_key is None or self.api_key.strip() == "":
self.llm = ProxyServer(model=self.model)
else:
- self.llm = OpenAI(model=self.model)
+ self.llm = OpenAI(api_key=self.api_key, model=self.model)
- await self.llm.start(api_key=api_key, write_log=write_log, unique_id=unique_id)
+ await self.llm.start(write_log=write_log, unique_id=unique_id)
async def stop(self):
await self.llm.stop()
diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py
index e5cd0428..93c13094 100644
--- a/continuedev/src/continuedev/libs/llm/openai.py
+++ b/continuedev/src/continuedev/libs/llm/openai.py
@@ -27,21 +27,19 @@ MAX_TOKENS_FOR_MODEL = {
class OpenAI(LLM):
+ api_key: str
model: str
openai_server_info: Optional[OpenAIServerInfo] = None
verify_ssl: bool = True
ca_bundle_path: Optional[str] = None
- requires_api_key = "OPENAI_API_KEY"
requires_write_log = True
system_message: Optional[str] = None
write_log: Optional[Callable[[str], None]] = None
- api_key: str = None
async def start(self, *, api_key: Optional[str] = None, write_log: Callable[[str], None], **kwargs):
self.write_log = write_log
- self.api_key = api_key
openai.api_key = self.api_key
if self.openai_server_info is not None:
diff --git a/continuedev/src/continuedev/plugins/steps/chat.py b/continuedev/src/continuedev/plugins/steps/chat.py
index 455d5a13..8e494ad0 100644
--- a/continuedev/src/continuedev/plugins/steps/chat.py
+++ b/continuedev/src/continuedev/plugins/steps/chat.py
@@ -1,4 +1,5 @@
import json
+from textwrap import dedent
from typing import Any, Coroutine, List
from pydantic import Field
@@ -10,6 +11,7 @@ from ...core.main import FunctionCall, Models
from ...core.main import ChatMessage, Step, step_to_json_schema
from ...core.sdk import ContinueSDK
from ...libs.llm.openai import OpenAI
+from ...libs.llm.maybe_proxy_openai import MaybeProxyOpenAI
import openai
import os
from dotenv import load_dotenv
@@ -19,6 +21,8 @@ load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY
+FREE_USAGE_STEP_NAME = "Please enter OpenAI API key"
+
class SimpleChatStep(Step):
name: str = "Generating Response..."
@@ -27,6 +31,29 @@ class SimpleChatStep(Step):
messages: List[ChatMessage] = None
async def run(self, sdk: ContinueSDK):
+ # Check if proxy server API key
+ if isinstance(sdk.models.default, MaybeProxyOpenAI) and (sdk.models.default.api_key is None or sdk.models.default.api_key.strip() == "") and len(list(filter(lambda x: not x.step.hide, sdk.history.timeline))) >= 10 and len(list(filter(lambda x: x.step.name == FREE_USAGE_STEP_NAME, sdk.history.timeline))) == 0:
+ await sdk.run_step(MessageStep(
+ name=FREE_USAGE_STEP_NAME,
+ message=dedent("""\
+ To make it easier to use Continue, you're getting limited free usage. When you have the chance, please enter your own OpenAI key in `~/.continue/config.py`. You can open the file by using the '/config' slash command in the text box below.
+
+ Here's an example of how to edit the file:
+ ```python
+ ...
+ config=ContinueConfig(
+ ...
+ models=Models(
+ default=MaybeProxyOpenAI(api_key="<API_KEY>", model="gpt-4"),
+ medium=MaybeProxyOpenAI(api_key="<API_KEY>", model="gpt-3.5-turbo")
+ )
+ )
+ ```
+
+ You can also learn more about customizations [here](https://continue.dev/docs/customization).
+ """),
+ ))
+
messages = self.messages or await sdk.get_chat_context()
generator = sdk.models.default.stream_chat(
diff --git a/docs/docs/customization.md b/docs/docs/customization.md
index 8fe57fdf..cfee1e65 100644
--- a/docs/docs/customization.md
+++ b/docs/docs/customization.md
@@ -27,10 +27,19 @@ With the `MaybeProxyOpenAI` `LLM`, new users can try out Continue with GPT-4 usi
Once you are using Continue regularly though, you will need to add an OpenAI API key that has access to GPT-4 by following these steps:
1. Copy your API key from https://platform.openai.com/account/api-keys
-2. Use the cmd+, (Mac) / ctrl+, (Windows) to open your VS Code settings
-3. Type "Continue" in the search bar
-4. Click Edit in settings.json under Continue: OpenAI_API_KEY" section
-5. Paste your API key as the value for "continue.OPENAI_API_KEY" in settings.json
+2. Open `~/.continue/config.py`. You can do this by using the '/config' command in Continue
+3. Change the default LLMs to look like this:
+
+```python
+API_KEY = "<API_KEY>"
+config = ContinueConfig(
+ ...
+ models=Models(
+ default=MaybeProxyOpenAI(model="gpt-4", api_key=API_KEY),
+ medium=MaybeProxyOpenAI(model="gpt-3.5-turbo", api_key=API_KEY)
+ )
+)
+```
The `MaybeProxyOpenAI` class will automatically switch to using your API key instead of ours. If you'd like to explicitly use one or the other, you can use the `ProxyServer` or `OpenAI` classes instead.
@@ -46,7 +55,7 @@ from continuedev.src.continuedev.libs.llm.anthropic import AnthropicLLM
config = ContinueConfig(
...
models=Models(
- default=AnthropicLLM(model="claude-2")
+ default=AnthropicLLM(api_key="<API_KEY>", model="claude-2")
)
)
```
@@ -101,7 +110,7 @@ from continuedev.src.continuedev.libs.llm.openai import OpenAI, OpenAIServerInfo
config = ContinueConfig(
...
models=Models(
- default=OpenAI(model="gpt-3.5-turbo", server_info=OpenAIServerInfo(
+ default=OpenAI(api_key="my-api-key", model="gpt-3.5-turbo", server_info=OpenAIServerInfo(
api_base="https://my-azure-openai-instance.openai.azure.com/"
engine="my-azure-openai-deployment",
api_version="2023-03-15-preview",
diff --git a/extension/package.json b/extension/package.json
index d4de7f2a..d4235d48 100644
--- a/extension/package.json
+++ b/extension/package.json
@@ -47,12 +47,7 @@
"continue.serverUrl": {
"type": "string",
"default": "http://localhost:65432",
- "description": "The URL of the Continue server. Only change this if you are running the server manually. If you want to use an LLM hosted at a custom URL, please see https://continue.dev/docs/customization#change-the-default-llm."
- },
- "continue.OPENAI_API_KEY": {
- "type": "string",
- "default": null,
- "description": "The OpenAI API key to use for code generation. Leave empty to get limited free usage of Continue."
+ "description": "The URL of the Continue server. Only change this if you are running the server manually. If you want to use an LLM hosted at a custom URL, please see https://continue.dev/docs/customization#change-the-default-llm. All other configuration is done in `~/.continue/config.py`, which you can access by using the '/config' slash command."
}
}
},
diff --git a/extension/src/activation/activate.ts b/extension/src/activation/activate.ts
index 560b970c..831d1160 100644
--- a/extension/src/activation/activate.ts
+++ b/extension/src/activation/activate.ts
@@ -54,9 +54,8 @@ export async function activateExtension(context: vscode.ExtensionContext) {
registerAllCommands(context);
registerQuickFixProvider();
- // Start the server and display loader if taking > 2 seconds
+ // Start the server
const sessionIdPromise = (async () => {
- // Start the server and set serverStarted to true when done
await startContinuePythonServer();
console.log("Continue server started");
diff --git a/extension/src/activation/environmentSetup.ts b/extension/src/activation/environmentSetup.ts
index d41cb289..1ca32841 100644
--- a/extension/src/activation/environmentSetup.ts
+++ b/extension/src/activation/environmentSetup.ts
@@ -231,23 +231,29 @@ export async function startContinuePythonServer() {
let delay = 1000; // Delay between each attempt in milliseconds
const spawnChild = () => {
- const child = spawn(destination, {
- detached: true,
- stdio: "ignore",
- });
-
- child.on("error", (err: any) => {
- if (attempts < maxAttempts) {
- attempts++;
- console.log(
- `Error caught (likely EBUSY). Retrying attempt ${attempts}...`
- );
- setTimeout(spawnChild, delay);
- } else {
- console.error("Failed to start subprocess.", err);
- }
- });
- child.unref();
+ const retry = () => {
+ attempts++;
+ console.log(
+ `Error caught (likely EBUSY). Retrying attempt ${attempts}...`
+ );
+ setTimeout(spawnChild, delay);
+ };
+ try {
+ const child = spawn(destination, {
+ detached: true,
+ stdio: "ignore",
+ });
+ child.on("error", (err: any) => {
+ if (attempts < maxAttempts) {
+ retry();
+ } else {
+ console.error("Failed to start subprocess.", err);
+ }
+ });
+ child.unref();
+ } catch (e: any) {
+ retry();
+ }
};
spawnChild();