diff options
-rw-r--r-- | continuedev/src/continuedev/libs/constants/default_config.py | 3 | ||||
-rw-r--r-- | continuedev/src/continuedev/libs/llm/ggml.py | 13 | ||||
-rw-r--r-- | continuedev/src/continuedev/libs/llm/openai.py | 8 | ||||
-rw-r--r-- | extension/package.json | 2 |
4 files changed, 17 insertions, 9 deletions
diff --git a/continuedev/src/continuedev/libs/constants/default_config.py b/continuedev/src/continuedev/libs/constants/default_config.py index 238dc1da..8526df21 100644 --- a/continuedev/src/continuedev/libs/constants/default_config.py +++ b/continuedev/src/continuedev/libs/constants/default_config.py @@ -2,8 +2,7 @@ default_config = """\ \"\"\" This is the Continue configuration file. -If you aren't getting strong typing on these imports, -be sure to select the Python interpreter in ~/.continue/server/env. +See https://continue.dev/docs/customization to learn more. \"\"\" import subprocess diff --git a/continuedev/src/continuedev/libs/llm/ggml.py b/continuedev/src/continuedev/libs/llm/ggml.py index 7742e8c3..2f131354 100644 --- a/continuedev/src/continuedev/libs/llm/ggml.py +++ b/continuedev/src/continuedev/libs/llm/ggml.py @@ -8,12 +8,12 @@ from ...core.main import ChatMessage from ..llm import LLM from ..util.count_tokens import compile_chat_messages, DEFAULT_ARGS, count_tokens -SERVER_URL = "http://localhost:8000" - class GGML(LLM): # this is model-specific max_context_length: int = 2048 + server_url: str = "http://localhost:8000" + verify_ssl: bool = True _client_session: aiohttp.ClientSession = None @@ -21,7 +21,8 @@ class GGML(LLM): arbitrary_types_allowed = True async def start(self, **kwargs): - self._client_session = aiohttp.ClientSession() + self._client_session = aiohttp.ClientSession( + connector=aiohttp.TCPConnector(verify_ssl=self.verify_ssl)) async def stop(self): await self._client_session.close() @@ -50,7 +51,7 @@ class GGML(LLM): messages = compile_chat_messages( self.name, with_history, self.context_length, args["max_tokens"], prompt, functions=args.get("functions", None), system_message=self.system_message) - async with self._client_session.post(f"{SERVER_URL}/v1/completions", json={ + async with self._client_session.post(f"{self.server_url}/v1/completions", json={ "messages": messages, **args }) as resp: @@ -67,7 +68,7 @@ class GGML(LLM): self.name, messages, self.context_length, args["max_tokens"], None, functions=args.get("functions", None), system_message=self.system_message) args["stream"] = True - async with self._client_session.post(f"{SERVER_URL}/v1/chat/completions", json={ + async with self._client_session.post(f"{self.server_url}/v1/chat/completions", json={ "messages": messages, **args }) as resp: @@ -88,7 +89,7 @@ class GGML(LLM): async def complete(self, prompt: str, with_history: List[ChatMessage] = None, **kwargs) -> Coroutine[Any, Any, str]: args = {**self.default_args, **kwargs} - async with self._client_session.post(f"{SERVER_URL}/v1/completions", json={ + async with self._client_session.post(f"{self.server_url}/v1/completions", json={ "messages": compile_chat_messages(args["model"], with_history, self.context_length, args["max_tokens"], prompt, functions=None, system_message=self.system_message), **args }) as resp: diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index 30343331..e5cd0428 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -29,6 +29,8 @@ MAX_TOKENS_FOR_MODEL = { class OpenAI(LLM): model: str openai_server_info: Optional[OpenAIServerInfo] = None + verify_ssl: bool = True + ca_bundle_path: Optional[str] = None requires_api_key = "OPENAI_API_KEY" requires_write_log = True @@ -49,6 +51,12 @@ class OpenAI(LLM): if self.openai_server_info.api_version is not None: openai.api_version = self.openai_server_info.api_version + if self.verify_ssl == False: + openai.verify_ssl_certs = False + + if self.ca_bundle_path is not None: + openai.ca_bundle_path = self.ca_bundle_path + async def stop(self): pass diff --git a/extension/package.json b/extension/package.json index 18783391..fcaae2d6 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.249", + "version": "0.0.250", "publisher": "Continue", "engines": { "vscode": "^1.67.0" |