summaryrefslogtreecommitdiff
path: root/continuedev
diff options
context:
space:
mode:
Diffstat (limited to 'continuedev')
-rw-r--r--continuedev/src/continuedev/libs/llm/__init__.py8
-rw-r--r--continuedev/src/continuedev/libs/llm/ggml.py4
-rw-r--r--continuedev/src/continuedev/libs/llm/llamacpp.py2
-rw-r--r--continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py13
-rw-r--r--continuedev/src/continuedev/libs/llm/openai.py4
-rw-r--r--continuedev/src/continuedev/libs/llm/prompts/edit.py10
-rw-r--r--continuedev/src/continuedev/libs/llm/proxy_server.py7
-rw-r--r--continuedev/src/continuedev/libs/util/count_tokens.py2
-rw-r--r--continuedev/src/continuedev/models/reference/test.py2
-rw-r--r--continuedev/src/continuedev/plugins/steps/core/core.py1
10 files changed, 29 insertions, 24 deletions
diff --git a/continuedev/src/continuedev/libs/llm/__init__.py b/continuedev/src/continuedev/libs/llm/__init__.py
index 8c61ba43..ac4742c7 100644
--- a/continuedev/src/continuedev/libs/llm/__init__.py
+++ b/continuedev/src/continuedev/libs/llm/__init__.py
@@ -75,6 +75,10 @@ class LLM(ContinueBaseModel):
verify_ssl: Optional[bool] = Field(
None, description="Whether to verify SSL certificates for requests."
)
+ ca_bundle_path: str = Field(
+ None,
+ description="Path to a custom CA bundle to use when making the HTTP request",
+ )
prompt_templates: dict = Field(
{},
description='A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the "edit" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.',
@@ -123,7 +127,9 @@ class LLM(ContinueBaseModel):
"description": "A function that is called upon every prompt and completion, by default to log to the file which can be viewed by clicking on the magnifying glass."
},
"api_key": {"description": "The API key for the LLM provider."},
- "verify_ssl": {"description": "Whether to verify SSL certificates for requests."}
+ "verify_ssl": {
+ "description": "Whether to verify SSL certificates for requests."
+ },
}
def dict(self, **kwargs):
diff --git a/continuedev/src/continuedev/libs/llm/ggml.py b/continuedev/src/continuedev/libs/llm/ggml.py
index f20d8b45..2fd123bd 100644
--- a/continuedev/src/continuedev/libs/llm/ggml.py
+++ b/continuedev/src/continuedev/libs/llm/ggml.py
@@ -42,10 +42,6 @@ class GGML(LLM):
None,
description="Proxy URL to use when making the HTTP request",
)
- ca_bundle_path: str = Field(
- None,
- description="Path to a custom CA bundle to use when making the HTTP request",
- )
model: str = Field(
"ggml", description="The name of the model to use (optional for the GGML class)"
)
diff --git a/continuedev/src/continuedev/libs/llm/llamacpp.py b/continuedev/src/continuedev/libs/llm/llamacpp.py
index 60d9961f..c795bd15 100644
--- a/continuedev/src/continuedev/libs/llm/llamacpp.py
+++ b/continuedev/src/continuedev/libs/llm/llamacpp.py
@@ -1,5 +1,5 @@
import json
-from typing import Any, Callable, Dict, Optional
+from typing import Any, Callable, Dict
import aiohttp
from pydantic import Field
diff --git a/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py b/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py
index 5814f2aa..3fdcb42e 100644
--- a/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py
+++ b/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py
@@ -45,9 +45,18 @@ class MaybeProxyOpenAI(LLM):
):
await super().start(write_log=write_log, unique_id=unique_id)
if self.api_key is None or self.api_key.strip() == "":
- self.llm = ProxyServer(model=self.model, verify_ssl=self.verify_ssl)
+ self.llm = ProxyServer(
+ model=self.model,
+ verify_ssl=self.verify_ssl,
+ ca_bundle_path=self.ca_bundle_path,
+ )
else:
- self.llm = OpenAI(api_key=self.api_key, model=self.model, verify_ssl=self.verify_ssl)
+ self.llm = OpenAI(
+ api_key=self.api_key,
+ model=self.model,
+ verify_ssl=self.verify_ssl,
+ ca_bundle_path=self.ca_bundle_path,
+ )
await self.llm.start(write_log=write_log, unique_id=unique_id)
diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py
index c7ef9d95..6afa4e77 100644
--- a/continuedev/src/continuedev/libs/llm/openai.py
+++ b/continuedev/src/continuedev/libs/llm/openai.py
@@ -60,10 +60,6 @@ class OpenAI(LLM):
description="OpenAI API key",
)
- ca_bundle_path: Optional[str] = Field(
- None, description="Path to CA bundle to use for requests."
- )
-
proxy: Optional[str] = Field(None, description="Proxy URL to use for requests.")
api_base: Optional[str] = Field(None, description="OpenAI API base URL.")
diff --git a/continuedev/src/continuedev/libs/llm/prompts/edit.py b/continuedev/src/continuedev/libs/llm/prompts/edit.py
index b4892669..7da5a192 100644
--- a/continuedev/src/continuedev/libs/llm/prompts/edit.py
+++ b/continuedev/src/continuedev/libs/llm/prompts/edit.py
@@ -2,19 +2,18 @@ from textwrap import dedent
simplified_edit_prompt = dedent(
"""\
- [INST] Consider the following code:
+ Consider the following code:
```
{{code_to_edit}}
```
Edit the code to perfectly satisfy the following user request:
{{user_input}}
- Output nothing except for the code. No code block, no English explanation, no start/end tags.
- [/INST]"""
+ Output nothing except for the code. No code block, no English explanation, no start/end tags."""
)
simplest_edit_prompt = dedent(
"""\
- [INST] Here is the code before editing:
+ Here is the code before editing:
```
{{code_to_edit}}
```
@@ -22,8 +21,7 @@ simplest_edit_prompt = dedent(
Here is the edit requested:
"{{user_input}}"
- Here is the code after editing:
- [/INST]"""
+ Here is the code after editing:"""
)
codellama_infill_edit_prompt = "{{file_prefix}}<FILL>{{file_suffix}}"
diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py
index 3efa805f..294c1713 100644
--- a/continuedev/src/continuedev/libs/llm/proxy_server.py
+++ b/continuedev/src/continuedev/libs/llm/proxy_server.py
@@ -10,9 +10,6 @@ from ...core.main import ChatMessage
from ..llm import LLM
from ..util.telemetry import posthog_logger
-ca_bundle_path = certifi.where()
-ssl_context = ssl.create_default_context(cafile=ca_bundle_path)
-
# SERVER_URL = "http://127.0.0.1:8080"
SERVER_URL = "https://proxy-server-l6vsfbzhba-uw.a.run.app"
@@ -41,6 +38,10 @@ class ProxyServer(LLM):
timeout=aiohttp.ClientTimeout(total=self.timeout),
)
else:
+ ca_bundle_path = (
+ certifi.where() if self.ca_bundle_path is None else self.ca_bundle_path
+ )
+ ssl_context = ssl.create_default_context(cafile=ca_bundle_path)
self._client_session = aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl_context=ssl_context),
timeout=aiohttp.ClientTimeout(total=self.timeout),
diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py
index 1c1e020e..4def3198 100644
--- a/continuedev/src/continuedev/libs/util/count_tokens.py
+++ b/continuedev/src/continuedev/libs/util/count_tokens.py
@@ -29,7 +29,7 @@ def encoding_for_model(model_name: str):
try:
return tiktoken.encoding_for_model(aliases.get(model_name, model_name))
- except:
+ except Exception as _:
return tiktoken.encoding_for_model("gpt-3.5-turbo")
except Exception as e:
if not already_saw_import_err:
diff --git a/continuedev/src/continuedev/models/reference/test.py b/continuedev/src/continuedev/models/reference/test.py
index 1c7608ec..1cebfc36 100644
--- a/continuedev/src/continuedev/models/reference/test.py
+++ b/continuedev/src/continuedev/models/reference/test.py
@@ -54,7 +54,7 @@ def docs_from_schema(schema, filepath, ignore_properties=[], inherited=[]):
if only_required != required or prop in ignore_properties:
return ""
required = "true" if required else "false"
- return f"""<ClassPropertyRef name='{prop}' details='{html.escape(json.dumps(details))}' required={{{required}}} default="{html.escape(str(details.get("default", "")))}"/>"""
+ return f"""<ClassPropertyRef name='{prop}' details='{html.escape(json.dumps(details))}' required={{{required}}} default="{html.escape(str(details.get("default", "")))}"/>\n"""
for prop, details in schema["properties"].items():
property = add_property(prop, details, True)
diff --git a/continuedev/src/continuedev/plugins/steps/core/core.py b/continuedev/src/continuedev/plugins/steps/core/core.py
index 5e1f1cd4..61de6578 100644
--- a/continuedev/src/continuedev/plugins/steps/core/core.py
+++ b/continuedev/src/continuedev/plugins/steps/core/core.py
@@ -624,7 +624,6 @@ Please output the code to be inserted at the cursor in order to fulfill the user
generator = model_to_use.stream_complete(
rendered,
- raw=True,
temperature=sdk.config.temperature,
max_tokens=min(max_tokens, model_to_use.context_length // 2),
)