summaryrefslogtreecommitdiff
path: root/continuedev
diff options
context:
space:
mode:
authorNate Sesti <sestinj@gmail.com>2023-09-14 10:54:08 -0700
committerNate Sesti <sestinj@gmail.com>2023-09-14 10:54:08 -0700
commit1018cd47306f95dde35e1a0cc6b2a830444af389 (patch)
treece897ff72cb44456eae93037c6c915d5606b8734 /continuedev
parent405e0946b2a1d17bd4c85754e237e91d3c055a0b (diff)
downloadsncontinue-1018cd47306f95dde35e1a0cc6b2a830444af389.tar.gz
sncontinue-1018cd47306f95dde35e1a0cc6b2a830444af389.tar.bz2
sncontinue-1018cd47306f95dde35e1a0cc6b2a830444af389.zip
feat: :adhesive_bandage: ca_bundle_path for maybeproxyopenai
Diffstat (limited to 'continuedev')
-rw-r--r--continuedev/src/continuedev/libs/llm/__init__.py8
-rw-r--r--continuedev/src/continuedev/libs/llm/ggml.py4
-rw-r--r--continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py13
-rw-r--r--continuedev/src/continuedev/libs/llm/openai.py4
-rw-r--r--continuedev/src/continuedev/libs/llm/proxy_server.py7
-rw-r--r--continuedev/src/continuedev/libs/util/count_tokens.py2
6 files changed, 23 insertions, 15 deletions
diff --git a/continuedev/src/continuedev/libs/llm/__init__.py b/continuedev/src/continuedev/libs/llm/__init__.py
index 8c61ba43..ac4742c7 100644
--- a/continuedev/src/continuedev/libs/llm/__init__.py
+++ b/continuedev/src/continuedev/libs/llm/__init__.py
@@ -75,6 +75,10 @@ class LLM(ContinueBaseModel):
verify_ssl: Optional[bool] = Field(
None, description="Whether to verify SSL certificates for requests."
)
+ ca_bundle_path: str = Field(
+ None,
+ description="Path to a custom CA bundle to use when making the HTTP request",
+ )
prompt_templates: dict = Field(
{},
description='A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the "edit" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.',
@@ -123,7 +127,9 @@ class LLM(ContinueBaseModel):
"description": "A function that is called upon every prompt and completion, by default to log to the file which can be viewed by clicking on the magnifying glass."
},
"api_key": {"description": "The API key for the LLM provider."},
- "verify_ssl": {"description": "Whether to verify SSL certificates for requests."}
+ "verify_ssl": {
+ "description": "Whether to verify SSL certificates for requests."
+ },
}
def dict(self, **kwargs):
diff --git a/continuedev/src/continuedev/libs/llm/ggml.py b/continuedev/src/continuedev/libs/llm/ggml.py
index f20d8b45..2fd123bd 100644
--- a/continuedev/src/continuedev/libs/llm/ggml.py
+++ b/continuedev/src/continuedev/libs/llm/ggml.py
@@ -42,10 +42,6 @@ class GGML(LLM):
None,
description="Proxy URL to use when making the HTTP request",
)
- ca_bundle_path: str = Field(
- None,
- description="Path to a custom CA bundle to use when making the HTTP request",
- )
model: str = Field(
"ggml", description="The name of the model to use (optional for the GGML class)"
)
diff --git a/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py b/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py
index 5814f2aa..3fdcb42e 100644
--- a/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py
+++ b/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py
@@ -45,9 +45,18 @@ class MaybeProxyOpenAI(LLM):
):
await super().start(write_log=write_log, unique_id=unique_id)
if self.api_key is None or self.api_key.strip() == "":
- self.llm = ProxyServer(model=self.model, verify_ssl=self.verify_ssl)
+ self.llm = ProxyServer(
+ model=self.model,
+ verify_ssl=self.verify_ssl,
+ ca_bundle_path=self.ca_bundle_path,
+ )
else:
- self.llm = OpenAI(api_key=self.api_key, model=self.model, verify_ssl=self.verify_ssl)
+ self.llm = OpenAI(
+ api_key=self.api_key,
+ model=self.model,
+ verify_ssl=self.verify_ssl,
+ ca_bundle_path=self.ca_bundle_path,
+ )
await self.llm.start(write_log=write_log, unique_id=unique_id)
diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py
index c7ef9d95..6afa4e77 100644
--- a/continuedev/src/continuedev/libs/llm/openai.py
+++ b/continuedev/src/continuedev/libs/llm/openai.py
@@ -60,10 +60,6 @@ class OpenAI(LLM):
description="OpenAI API key",
)
- ca_bundle_path: Optional[str] = Field(
- None, description="Path to CA bundle to use for requests."
- )
-
proxy: Optional[str] = Field(None, description="Proxy URL to use for requests.")
api_base: Optional[str] = Field(None, description="OpenAI API base URL.")
diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py
index 3efa805f..294c1713 100644
--- a/continuedev/src/continuedev/libs/llm/proxy_server.py
+++ b/continuedev/src/continuedev/libs/llm/proxy_server.py
@@ -10,9 +10,6 @@ from ...core.main import ChatMessage
from ..llm import LLM
from ..util.telemetry import posthog_logger
-ca_bundle_path = certifi.where()
-ssl_context = ssl.create_default_context(cafile=ca_bundle_path)
-
# SERVER_URL = "http://127.0.0.1:8080"
SERVER_URL = "https://proxy-server-l6vsfbzhba-uw.a.run.app"
@@ -41,6 +38,10 @@ class ProxyServer(LLM):
timeout=aiohttp.ClientTimeout(total=self.timeout),
)
else:
+ ca_bundle_path = (
+ certifi.where() if self.ca_bundle_path is None else self.ca_bundle_path
+ )
+ ssl_context = ssl.create_default_context(cafile=ca_bundle_path)
self._client_session = aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl_context=ssl_context),
timeout=aiohttp.ClientTimeout(total=self.timeout),
diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py
index 1c1e020e..4def3198 100644
--- a/continuedev/src/continuedev/libs/util/count_tokens.py
+++ b/continuedev/src/continuedev/libs/util/count_tokens.py
@@ -29,7 +29,7 @@ def encoding_for_model(model_name: str):
try:
return tiktoken.encoding_for_model(aliases.get(model_name, model_name))
- except:
+ except Exception as _:
return tiktoken.encoding_for_model("gpt-3.5-turbo")
except Exception as e:
if not already_saw_import_err: