diff options
Diffstat (limited to 'continuedev')
5 files changed, 38 insertions, 11 deletions
diff --git a/continuedev/src/continuedev/libs/constants/default_config.py b/continuedev/src/continuedev/libs/constants/default_config.py index 73c8eeba..d3922091 100644 --- a/continuedev/src/continuedev/libs/constants/default_config.py +++ b/continuedev/src/continuedev/libs/constants/default_config.py @@ -51,8 +51,10 @@ config = ContinueConfig( allow_anonymous_telemetry=True, models=Models( - default=MaybeProxyOpenAI(model="gpt-4"), - medium=MaybeProxyOpenAI(model="gpt-3.5-turbo") + # You can try Continue with limited free usage. Please eventually replace with your own API key. + # Learn how to customize models here: https://continue.dev/docs/customization#change-the-default-llm + default=MaybeProxyOpenAI(api_key="", model="gpt-4"), + medium=MaybeProxyOpenAI(api_key="", model="gpt-3.5-turbo") ), # Set a system message with information that the LLM should always keep in mind diff --git a/continuedev/src/continuedev/libs/llm/anthropic.py b/continuedev/src/continuedev/libs/llm/anthropic.py index ac5c56a4..9d7bc93f 100644 --- a/continuedev/src/continuedev/libs/llm/anthropic.py +++ b/continuedev/src/continuedev/libs/llm/anthropic.py @@ -8,9 +8,9 @@ from ..util.count_tokens import compile_chat_messages, DEFAULT_ARGS, count_token class AnthropicLLM(LLM): + api_key: str model: str = "claude-2" - requires_api_key: str = "ANTHROPIC_API_KEY" requires_write_log = True _async_client: AsyncAnthropic = None @@ -21,7 +21,7 @@ class AnthropicLLM(LLM): async def start(self, *, api_key: Optional[str] = None, write_log: Callable[[str], None], **kwargs): self.write_log = write_log - self._async_client = AsyncAnthropic(api_key=api_key) + self._async_client = AsyncAnthropic(api_key=self.api_key) async def stop(self): pass diff --git a/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py b/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py index edf58fd7..fbc2c43f 100644 --- a/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py +++ b/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py @@ -8,8 +8,8 @@ from .openai import OpenAI class MaybeProxyOpenAI(LLM): model: str + api_key: Optional[str] = None - requires_api_key: Optional[str] = "OPENAI_API_KEY" requires_write_log: bool = True requires_unique_id: bool = True system_message: Union[str, None] = None @@ -25,12 +25,12 @@ class MaybeProxyOpenAI(LLM): return self.llm.context_length async def start(self, *, api_key: Optional[str] = None, unique_id: str, write_log: Callable[[str], None]): - if api_key is None or api_key.strip() == "": + if self.api_key is None or self.api_key.strip() == "": self.llm = ProxyServer(model=self.model) else: - self.llm = OpenAI(model=self.model) + self.llm = OpenAI(api_key=self.api_key, model=self.model) - await self.llm.start(api_key=api_key, write_log=write_log, unique_id=unique_id) + await self.llm.start(write_log=write_log, unique_id=unique_id) async def stop(self): await self.llm.stop() diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index e5cd0428..93c13094 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -27,21 +27,19 @@ MAX_TOKENS_FOR_MODEL = { class OpenAI(LLM): + api_key: str model: str openai_server_info: Optional[OpenAIServerInfo] = None verify_ssl: bool = True ca_bundle_path: Optional[str] = None - requires_api_key = "OPENAI_API_KEY" requires_write_log = True system_message: Optional[str] = None write_log: Optional[Callable[[str], None]] = None - api_key: str = None async def start(self, *, api_key: Optional[str] = None, write_log: Callable[[str], None], **kwargs): self.write_log = write_log - self.api_key = api_key openai.api_key = self.api_key if self.openai_server_info is not None: diff --git a/continuedev/src/continuedev/plugins/steps/chat.py b/continuedev/src/continuedev/plugins/steps/chat.py index 455d5a13..8e494ad0 100644 --- a/continuedev/src/continuedev/plugins/steps/chat.py +++ b/continuedev/src/continuedev/plugins/steps/chat.py @@ -1,4 +1,5 @@ import json +from textwrap import dedent from typing import Any, Coroutine, List from pydantic import Field @@ -10,6 +11,7 @@ from ...core.main import FunctionCall, Models from ...core.main import ChatMessage, Step, step_to_json_schema from ...core.sdk import ContinueSDK from ...libs.llm.openai import OpenAI +from ...libs.llm.maybe_proxy_openai import MaybeProxyOpenAI import openai import os from dotenv import load_dotenv @@ -19,6 +21,8 @@ load_dotenv() OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") openai.api_key = OPENAI_API_KEY +FREE_USAGE_STEP_NAME = "Please enter OpenAI API key" + class SimpleChatStep(Step): name: str = "Generating Response..." @@ -27,6 +31,29 @@ class SimpleChatStep(Step): messages: List[ChatMessage] = None async def run(self, sdk: ContinueSDK): + # Check if proxy server API key + if isinstance(sdk.models.default, MaybeProxyOpenAI) and (sdk.models.default.api_key is None or sdk.models.default.api_key.strip() == "") and len(list(filter(lambda x: not x.step.hide, sdk.history.timeline))) >= 10 and len(list(filter(lambda x: x.step.name == FREE_USAGE_STEP_NAME, sdk.history.timeline))) == 0: + await sdk.run_step(MessageStep( + name=FREE_USAGE_STEP_NAME, + message=dedent("""\ + To make it easier to use Continue, you're getting limited free usage. When you have the chance, please enter your own OpenAI key in `~/.continue/config.py`. You can open the file by using the '/config' slash command in the text box below. + + Here's an example of how to edit the file: + ```python + ... + config=ContinueConfig( + ... + models=Models( + default=MaybeProxyOpenAI(api_key="<API_KEY>", model="gpt-4"), + medium=MaybeProxyOpenAI(api_key="<API_KEY>", model="gpt-3.5-turbo") + ) + ) + ``` + + You can also learn more about customizations [here](https://continue.dev/docs/customization). + """), + )) + messages = self.messages or await sdk.get_chat_context() generator = sdk.models.default.stream_chat( |