From 1018cd47306f95dde35e1a0cc6b2a830444af389 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Thu, 14 Sep 2023 10:54:08 -0700 Subject: feat: :adhesive_bandage: ca_bundle_path for maybeproxyopenai --- continuedev/src/continuedev/libs/llm/__init__.py | 8 +++++++- continuedev/src/continuedev/libs/llm/ggml.py | 4 ---- continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py | 13 +++++++++++-- continuedev/src/continuedev/libs/llm/openai.py | 4 ---- continuedev/src/continuedev/libs/llm/proxy_server.py | 7 ++++--- continuedev/src/continuedev/libs/util/count_tokens.py | 2 +- docs/docs/reference/Models/anthropic.md | 2 +- docs/docs/reference/Models/ggml.md | 4 ++-- docs/docs/reference/Models/hf_inference_api.md | 2 +- docs/docs/reference/Models/hf_tgi.md | 2 +- docs/docs/reference/Models/llamacpp.md | 2 +- docs/docs/reference/Models/maybe_proxy_openai.md | 2 +- docs/docs/reference/Models/ollama.md | 2 +- docs/docs/reference/Models/openai.md | 4 ++-- docs/docs/reference/Models/queued.md | 2 +- docs/docs/reference/Models/replicate.md | 2 +- docs/docs/reference/Models/text_gen_interface.md | 2 +- docs/docs/reference/Models/together.md | 2 +- docs/docs/reference/config.md | 2 +- extension/react-app/src/components/TextDialog.tsx | 11 +++++------ 20 files changed, 43 insertions(+), 36 deletions(-) diff --git a/continuedev/src/continuedev/libs/llm/__init__.py b/continuedev/src/continuedev/libs/llm/__init__.py index 8c61ba43..ac4742c7 100644 --- a/continuedev/src/continuedev/libs/llm/__init__.py +++ b/continuedev/src/continuedev/libs/llm/__init__.py @@ -75,6 +75,10 @@ class LLM(ContinueBaseModel): verify_ssl: Optional[bool] = Field( None, description="Whether to verify SSL certificates for requests." ) + ca_bundle_path: str = Field( + None, + description="Path to a custom CA bundle to use when making the HTTP request", + ) prompt_templates: dict = Field( {}, description='A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the "edit" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.', @@ -123,7 +127,9 @@ class LLM(ContinueBaseModel): "description": "A function that is called upon every prompt and completion, by default to log to the file which can be viewed by clicking on the magnifying glass." }, "api_key": {"description": "The API key for the LLM provider."}, - "verify_ssl": {"description": "Whether to verify SSL certificates for requests."} + "verify_ssl": { + "description": "Whether to verify SSL certificates for requests." + }, } def dict(self, **kwargs): diff --git a/continuedev/src/continuedev/libs/llm/ggml.py b/continuedev/src/continuedev/libs/llm/ggml.py index f20d8b45..2fd123bd 100644 --- a/continuedev/src/continuedev/libs/llm/ggml.py +++ b/continuedev/src/continuedev/libs/llm/ggml.py @@ -42,10 +42,6 @@ class GGML(LLM): None, description="Proxy URL to use when making the HTTP request", ) - ca_bundle_path: str = Field( - None, - description="Path to a custom CA bundle to use when making the HTTP request", - ) model: str = Field( "ggml", description="The name of the model to use (optional for the GGML class)" ) diff --git a/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py b/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py index 5814f2aa..3fdcb42e 100644 --- a/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py +++ b/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py @@ -45,9 +45,18 @@ class MaybeProxyOpenAI(LLM): ): await super().start(write_log=write_log, unique_id=unique_id) if self.api_key is None or self.api_key.strip() == "": - self.llm = ProxyServer(model=self.model, verify_ssl=self.verify_ssl) + self.llm = ProxyServer( + model=self.model, + verify_ssl=self.verify_ssl, + ca_bundle_path=self.ca_bundle_path, + ) else: - self.llm = OpenAI(api_key=self.api_key, model=self.model, verify_ssl=self.verify_ssl) + self.llm = OpenAI( + api_key=self.api_key, + model=self.model, + verify_ssl=self.verify_ssl, + ca_bundle_path=self.ca_bundle_path, + ) await self.llm.start(write_log=write_log, unique_id=unique_id) diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index c7ef9d95..6afa4e77 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -60,10 +60,6 @@ class OpenAI(LLM): description="OpenAI API key", ) - ca_bundle_path: Optional[str] = Field( - None, description="Path to CA bundle to use for requests." - ) - proxy: Optional[str] = Field(None, description="Proxy URL to use for requests.") api_base: Optional[str] = Field(None, description="OpenAI API base URL.") diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py index 3efa805f..294c1713 100644 --- a/continuedev/src/continuedev/libs/llm/proxy_server.py +++ b/continuedev/src/continuedev/libs/llm/proxy_server.py @@ -10,9 +10,6 @@ from ...core.main import ChatMessage from ..llm import LLM from ..util.telemetry import posthog_logger -ca_bundle_path = certifi.where() -ssl_context = ssl.create_default_context(cafile=ca_bundle_path) - # SERVER_URL = "http://127.0.0.1:8080" SERVER_URL = "https://proxy-server-l6vsfbzhba-uw.a.run.app" @@ -41,6 +38,10 @@ class ProxyServer(LLM): timeout=aiohttp.ClientTimeout(total=self.timeout), ) else: + ca_bundle_path = ( + certifi.where() if self.ca_bundle_path is None else self.ca_bundle_path + ) + ssl_context = ssl.create_default_context(cafile=ca_bundle_path) self._client_session = aiohttp.ClientSession( connector=aiohttp.TCPConnector(ssl_context=ssl_context), timeout=aiohttp.ClientTimeout(total=self.timeout), diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py index 1c1e020e..4def3198 100644 --- a/continuedev/src/continuedev/libs/util/count_tokens.py +++ b/continuedev/src/continuedev/libs/util/count_tokens.py @@ -29,7 +29,7 @@ def encoding_for_model(model_name: str): try: return tiktoken.encoding_for_model(aliases.get(model_name, model_name)) - except: + except Exception as _: return tiktoken.encoding_for_model("gpt-3.5-turbo") except Exception as e: if not already_saw_import_err: diff --git a/docs/docs/reference/Models/anthropic.md b/docs/docs/reference/Models/anthropic.md index 25f258ae..8c3eea5c 100644 --- a/docs/docs/reference/Models/anthropic.md +++ b/docs/docs/reference/Models/anthropic.md @@ -25,4 +25,4 @@ Claude 2 is not yet publicly released. You can request early access [here](https ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/ggml.md b/docs/docs/reference/Models/ggml.md index 239485c8..63abed85 100644 --- a/docs/docs/reference/Models/ggml.md +++ b/docs/docs/reference/Models/ggml.md @@ -23,8 +23,8 @@ config = ContinueConfig( ## Properties - + ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/hf_inference_api.md b/docs/docs/reference/Models/hf_inference_api.md index e3f1ed7c..223d195a 100644 --- a/docs/docs/reference/Models/hf_inference_api.md +++ b/docs/docs/reference/Models/hf_inference_api.md @@ -26,4 +26,4 @@ config = ContinueConfig( ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/hf_tgi.md b/docs/docs/reference/Models/hf_tgi.md index f44c9b24..0a18d1bb 100644 --- a/docs/docs/reference/Models/hf_tgi.md +++ b/docs/docs/reference/Models/hf_tgi.md @@ -12,4 +12,4 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/llamacpp.md b/docs/docs/reference/Models/llamacpp.md index 82260035..e399a21c 100644 --- a/docs/docs/reference/Models/llamacpp.md +++ b/docs/docs/reference/Models/llamacpp.md @@ -31,4 +31,4 @@ config = ContinueConfig( ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/maybe_proxy_openai.md b/docs/docs/reference/Models/maybe_proxy_openai.md index 712f1cba..2d5ee288 100644 --- a/docs/docs/reference/Models/maybe_proxy_openai.md +++ b/docs/docs/reference/Models/maybe_proxy_openai.md @@ -33,4 +33,4 @@ These classes support any models available through the OpenAI API, assuming your ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/ollama.md b/docs/docs/reference/Models/ollama.md index 234db171..eebd1861 100644 --- a/docs/docs/reference/Models/ollama.md +++ b/docs/docs/reference/Models/ollama.md @@ -23,4 +23,4 @@ config = ContinueConfig( ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/openai.md b/docs/docs/reference/Models/openai.md index ec5d2955..ff4df4c3 100644 --- a/docs/docs/reference/Models/openai.md +++ b/docs/docs/reference/Models/openai.md @@ -32,8 +32,8 @@ Options for serving models locally with an OpenAI-compatible server include: ## Properties - + ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/queued.md b/docs/docs/reference/Models/queued.md index b38f78cc..91a60c67 100644 --- a/docs/docs/reference/Models/queued.md +++ b/docs/docs/reference/Models/queued.md @@ -25,4 +25,4 @@ config = ContinueConfig( ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/replicate.md b/docs/docs/reference/Models/replicate.md index 05a804f0..9e2ee1d1 100644 --- a/docs/docs/reference/Models/replicate.md +++ b/docs/docs/reference/Models/replicate.md @@ -28,4 +28,4 @@ If you don't specify the `model` parameter, it will default to `replicate/llama- ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/text_gen_interface.md b/docs/docs/reference/Models/text_gen_interface.md index 2d0dd8b5..49ebd81c 100644 --- a/docs/docs/reference/Models/text_gen_interface.md +++ b/docs/docs/reference/Models/text_gen_interface.md @@ -25,4 +25,4 @@ config = ContinueConfig( ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/together.md b/docs/docs/reference/Models/together.md index dcb9e76c..9024899c 100644 --- a/docs/docs/reference/Models/together.md +++ b/docs/docs/reference/Models/together.md @@ -27,4 +27,4 @@ config = ContinueConfig( ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/config.md b/docs/docs/reference/config.md index 54664a7e..ba872059 100644 --- a/docs/docs/reference/config.md +++ b/docs/docs/reference/config.md @@ -8,7 +8,7 @@ Continue can be deeply customized by editing the `ContinueConfig` object in `~/. ## Properties - + ### Inherited Properties diff --git a/extension/react-app/src/components/TextDialog.tsx b/extension/react-app/src/components/TextDialog.tsx index 835b62f3..acf9675a 100644 --- a/extension/react-app/src/components/TextDialog.tsx +++ b/extension/react-app/src/components/TextDialog.tsx @@ -84,12 +84,11 @@ const TextDialog = (props: { }} > - {typeof props.message === "string" && - (typeof props.message === "string" ? ( - {props.message || ""} - ) : ( - props.message - ))} + {typeof props.message === "string" ? ( + {props.message || ""} + ) : ( + props.message + )} -- cgit v1.2.3-70-g09d2