From e0522b92cfa80491718de07928ce6a31850dab70 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Wed, 13 Sep 2023 23:03:58 -0700 Subject: feat: :sparkles: verify_ssl option for all LLMs --- continuedev/src/continuedev/libs/llm/__init__.py | 4 ++ continuedev/src/continuedev/libs/llm/ggml.py | 4 -- continuedev/src/continuedev/libs/llm/hf_tgi.py | 4 -- continuedev/src/continuedev/libs/llm/llamacpp.py | 4 -- .../src/continuedev/libs/llm/maybe_proxy_openai.py | 4 +- continuedev/src/continuedev/libs/llm/openai.py | 4 -- .../src/continuedev/libs/llm/proxy_server.py | 2 +- .../src/continuedev/libs/llm/text_gen_interface.py | 3 -- continuedev/src/continuedev/libs/llm/together.py | 4 -- docs/docs/reference/Models/anthropic.md | 2 +- docs/docs/reference/Models/ggml.md | 4 +- docs/docs/reference/Models/hf_inference_api.md | 2 +- docs/docs/reference/Models/hf_tgi.md | 4 +- docs/docs/reference/Models/llamacpp.md | 4 +- docs/docs/reference/Models/maybe_proxy_openai.md | 2 +- docs/docs/reference/Models/ollama.md | 2 +- docs/docs/reference/Models/openai.md | 4 +- docs/docs/reference/Models/queued.md | 2 +- docs/docs/reference/Models/replicate.md | 2 +- docs/docs/reference/Models/text_gen_interface.md | 4 +- docs/docs/reference/Models/together.md | 4 +- docs/docs/reference/config.md | 2 +- extension/react-app/src/components/ProgressBar.tsx | 2 +- extension/react-app/src/components/TextDialog.tsx | 43 +++------------------- extension/react-app/src/pages/gui.tsx | 2 +- 25 files changed, 33 insertions(+), 85 deletions(-) diff --git a/continuedev/src/continuedev/libs/llm/__init__.py b/continuedev/src/continuedev/libs/llm/__init__.py index 11d81b3f..8c61ba43 100644 --- a/continuedev/src/continuedev/libs/llm/__init__.py +++ b/continuedev/src/continuedev/libs/llm/__init__.py @@ -72,6 +72,9 @@ class LLM(ContinueBaseModel): 300, description="Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.", ) + verify_ssl: Optional[bool] = Field( + None, description="Whether to verify SSL certificates for requests." + ) prompt_templates: dict = Field( {}, description='A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the "edit" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.', @@ -120,6 +123,7 @@ class LLM(ContinueBaseModel): "description": "A function that is called upon every prompt and completion, by default to log to the file which can be viewed by clicking on the magnifying glass." }, "api_key": {"description": "The API key for the LLM provider."}, + "verify_ssl": {"description": "Whether to verify SSL certificates for requests."} } def dict(self, **kwargs): diff --git a/continuedev/src/continuedev/libs/llm/ggml.py b/continuedev/src/continuedev/libs/llm/ggml.py index 0ab43703..f20d8b45 100644 --- a/continuedev/src/continuedev/libs/llm/ggml.py +++ b/continuedev/src/continuedev/libs/llm/ggml.py @@ -38,10 +38,6 @@ class GGML(LLM): "http://localhost:8000", description="URL of the OpenAI-compatible server where the model is being served", ) - verify_ssl: Optional[bool] = Field( - None, - description="Whether SSL certificates should be verified when making the HTTP request", - ) proxy: Optional[str] = Field( None, description="Proxy URL to use when making the HTTP request", diff --git a/continuedev/src/continuedev/libs/llm/hf_tgi.py b/continuedev/src/continuedev/libs/llm/hf_tgi.py index 6b7f21e7..168ef025 100644 --- a/continuedev/src/continuedev/libs/llm/hf_tgi.py +++ b/continuedev/src/continuedev/libs/llm/hf_tgi.py @@ -15,10 +15,6 @@ class HuggingFaceTGI(LLM): server_url: str = Field( "http://localhost:8080", description="URL of your TGI server" ) - verify_ssl: Optional[bool] = Field( - None, - description="Whether SSL certificates should be verified when making the HTTP request", - ) template_messages: Callable[[List[ChatMessage]], str] = code_llama_template_messages diff --git a/continuedev/src/continuedev/libs/llm/llamacpp.py b/continuedev/src/continuedev/libs/llm/llamacpp.py index 10dfcad8..60d9961f 100644 --- a/continuedev/src/continuedev/libs/llm/llamacpp.py +++ b/continuedev/src/continuedev/libs/llm/llamacpp.py @@ -35,10 +35,6 @@ class LlamaCpp(LLM): model: str = "llamacpp" server_url: str = Field("http://localhost:8080", description="URL of the server") - verify_ssl: Optional[bool] = Field( - None, - description="Whether SSL certificates should be verified when making the HTTP request", - ) llama_cpp_args: Dict[str, Any] = Field( {"stop": ["[INST]"]}, diff --git a/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py b/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py index 69def48e..5814f2aa 100644 --- a/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py +++ b/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py @@ -45,9 +45,9 @@ class MaybeProxyOpenAI(LLM): ): await super().start(write_log=write_log, unique_id=unique_id) if self.api_key is None or self.api_key.strip() == "": - self.llm = ProxyServer(model=self.model) + self.llm = ProxyServer(model=self.model, verify_ssl=self.verify_ssl) else: - self.llm = OpenAI(api_key=self.api_key, model=self.model) + self.llm = OpenAI(api_key=self.api_key, model=self.model, verify_ssl=self.verify_ssl) await self.llm.start(write_log=write_log, unique_id=unique_id) diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index 744c07d2..c7ef9d95 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -60,10 +60,6 @@ class OpenAI(LLM): description="OpenAI API key", ) - verify_ssl: Optional[bool] = Field( - None, description="Whether to verify SSL certificates for requests." - ) - ca_bundle_path: Optional[str] = Field( None, description="Path to CA bundle to use for requests." ) diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py index d62fafa7..032464be 100644 --- a/continuedev/src/continuedev/libs/llm/proxy_server.py +++ b/continuedev/src/continuedev/libs/llm/proxy_server.py @@ -36,7 +36,7 @@ class ProxyServer(LLM): ): await super().start(**kwargs) self._client_session = aiohttp.ClientSession( - connector=aiohttp.TCPConnector(ssl_context=ssl_context), + connector=aiohttp.TCPConnector(ssl_context=ssl_context, verify_ssl=self.verify_ssl), timeout=aiohttp.ClientTimeout(total=self.timeout), ) self.context_length = MAX_TOKENS_FOR_MODEL[self.model] diff --git a/continuedev/src/continuedev/libs/llm/text_gen_interface.py b/continuedev/src/continuedev/libs/llm/text_gen_interface.py index 1090d7dd..28b2bfae 100644 --- a/continuedev/src/continuedev/libs/llm/text_gen_interface.py +++ b/continuedev/src/continuedev/libs/llm/text_gen_interface.py @@ -35,9 +35,6 @@ class TextGenUI(LLM): "http://localhost:5005", description="URL of your TextGenUI streaming server (separate from main server URL)", ) - verify_ssl: Optional[bool] = Field( - None, description="Whether to verify SSL certificates for requests." - ) prompt_templates = { "edit": simplest_edit_prompt, diff --git a/continuedev/src/continuedev/libs/llm/together.py b/continuedev/src/continuedev/libs/llm/together.py index a381abab..257f9a8f 100644 --- a/continuedev/src/continuedev/libs/llm/together.py +++ b/continuedev/src/continuedev/libs/llm/together.py @@ -38,10 +38,6 @@ class TogetherLLM(LLM): "https://api.together.xyz", description="The base URL for your Together API instance", ) - verify_ssl: Optional[bool] = Field( - None, - description="Whether SSL certificates should be verified when making the HTTP request", - ) _client_session: aiohttp.ClientSession = None diff --git a/docs/docs/reference/Models/anthropic.md b/docs/docs/reference/Models/anthropic.md index 8fec179a..25f258ae 100644 --- a/docs/docs/reference/Models/anthropic.md +++ b/docs/docs/reference/Models/anthropic.md @@ -25,4 +25,4 @@ Claude 2 is not yet publicly released. You can request early access [here](https ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/ggml.md b/docs/docs/reference/Models/ggml.md index fbaf12d0..239485c8 100644 --- a/docs/docs/reference/Models/ggml.md +++ b/docs/docs/reference/Models/ggml.md @@ -23,8 +23,8 @@ config = ContinueConfig( ## Properties - + ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/hf_inference_api.md b/docs/docs/reference/Models/hf_inference_api.md index 605813be..e3f1ed7c 100644 --- a/docs/docs/reference/Models/hf_inference_api.md +++ b/docs/docs/reference/Models/hf_inference_api.md @@ -26,4 +26,4 @@ config = ContinueConfig( ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/hf_tgi.md b/docs/docs/reference/Models/hf_tgi.md index b6eb61d7..f44c9b24 100644 --- a/docs/docs/reference/Models/hf_tgi.md +++ b/docs/docs/reference/Models/hf_tgi.md @@ -8,8 +8,8 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; ## Properties - + ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/llamacpp.md b/docs/docs/reference/Models/llamacpp.md index 0bb06e74..82260035 100644 --- a/docs/docs/reference/Models/llamacpp.md +++ b/docs/docs/reference/Models/llamacpp.md @@ -27,8 +27,8 @@ config = ContinueConfig( ## Properties - + ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/maybe_proxy_openai.md b/docs/docs/reference/Models/maybe_proxy_openai.md index 22ac2382..712f1cba 100644 --- a/docs/docs/reference/Models/maybe_proxy_openai.md +++ b/docs/docs/reference/Models/maybe_proxy_openai.md @@ -33,4 +33,4 @@ These classes support any models available through the OpenAI API, assuming your ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/ollama.md b/docs/docs/reference/Models/ollama.md index 9792ee52..234db171 100644 --- a/docs/docs/reference/Models/ollama.md +++ b/docs/docs/reference/Models/ollama.md @@ -23,4 +23,4 @@ config = ContinueConfig( ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/openai.md b/docs/docs/reference/Models/openai.md index 0ade1a8f..ec5d2955 100644 --- a/docs/docs/reference/Models/openai.md +++ b/docs/docs/reference/Models/openai.md @@ -32,8 +32,8 @@ Options for serving models locally with an OpenAI-compatible server include: ## Properties - + ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/queued.md b/docs/docs/reference/Models/queued.md index e253da09..b38f78cc 100644 --- a/docs/docs/reference/Models/queued.md +++ b/docs/docs/reference/Models/queued.md @@ -25,4 +25,4 @@ config = ContinueConfig( ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/replicate.md b/docs/docs/reference/Models/replicate.md index 0c93a758..05a804f0 100644 --- a/docs/docs/reference/Models/replicate.md +++ b/docs/docs/reference/Models/replicate.md @@ -28,4 +28,4 @@ If you don't specify the `model` parameter, it will default to `replicate/llama- ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/text_gen_interface.md b/docs/docs/reference/Models/text_gen_interface.md index 21404960..2d0dd8b5 100644 --- a/docs/docs/reference/Models/text_gen_interface.md +++ b/docs/docs/reference/Models/text_gen_interface.md @@ -21,8 +21,8 @@ config = ContinueConfig( ## Properties - + ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/Models/together.md b/docs/docs/reference/Models/together.md index ec1ebb9c..dcb9e76c 100644 --- a/docs/docs/reference/Models/together.md +++ b/docs/docs/reference/Models/together.md @@ -23,8 +23,8 @@ config = ContinueConfig( ## Properties - + ### Inherited Properties - \ No newline at end of file + \ No newline at end of file diff --git a/docs/docs/reference/config.md b/docs/docs/reference/config.md index b26d8cde..54664a7e 100644 --- a/docs/docs/reference/config.md +++ b/docs/docs/reference/config.md @@ -8,7 +8,7 @@ Continue can be deeply customized by editing the `ContinueConfig` object in `~/. ## Properties - + ### Inherited Properties diff --git a/extension/react-app/src/components/ProgressBar.tsx b/extension/react-app/src/components/ProgressBar.tsx index b4a2efc9..4efee776 100644 --- a/extension/react-app/src/components/ProgressBar.tsx +++ b/extension/react-app/src/components/ProgressBar.tsx @@ -44,7 +44,7 @@ const ProgressBar = ({ completed, total }: ProgressBarProps) => { return ( <> diff --git a/extension/react-app/src/components/TextDialog.tsx b/extension/react-app/src/components/TextDialog.tsx index a9fcbb8f..835b62f3 100644 --- a/extension/react-app/src/components/TextDialog.tsx +++ b/extension/react-app/src/components/TextDialog.tsx @@ -85,44 +85,11 @@ const TextDialog = (props: { > {typeof props.message === "string" && - props.message.includes("Continue uses GPT-4") ? ( -
-

- Continue uses GPT-4 by default, but works with any model. If - you'd like to keep your code completely private, there are few - options: -

- -

- Run a local model with ggml:{" "} - - 5 minute quickstart - -

- -

- Use Azure OpenAI service, which is GDPR and HIPAA compliant: - - Tutorial - -

- -

- If you already have an LLM deployed on your own infrastructure, - or would like to do so, please contact us at hi@continue.dev. -

-
- ) : typeof props.message === "string" ? ( - {props.message || ""} - ) : ( - props.message - )} + (typeof props.message === "string" ? ( + {props.message || ""} + ) : ( + props.message + ))} diff --git a/extension/react-app/src/pages/gui.tsx b/extension/react-app/src/pages/gui.tsx index a52e1ffc..35531f8c 100644 --- a/extension/react-app/src/pages/gui.tsx +++ b/extension/react-app/src/pages/gui.tsx @@ -265,7 +265,7 @@ function GUI(props: GUIProps) { Continue's OpenAI API key. To keep using Continue, you can either use your own API key, or use a local LLM. To read more about the options, see our{" "} - + documentation . If you're just looking for fastest way to keep going, type -- cgit v1.2.3-70-g09d2