summaryrefslogtreecommitdiff
path: root/continuedev
diff options
context:
space:
mode:
authorTy Dunn <ty@tydunn.com>2023-06-20 18:55:48 -0700
committerGitHub <noreply@github.com>2023-06-20 18:55:48 -0700
commit728ab16a96fe9733d89282841a40114ecb1a12ab (patch)
tree4a9fb02059020c07b4c55ef13a40aa6dc5fb10df /continuedev
parent560949871ba49c67b5379b615c86a82df08ee45d (diff)
parent61b278510cf8ab05976d77e1c6a32d0ac6068a0f (diff)
downloadsncontinue-728ab16a96fe9733d89282841a40114ecb1a12ab.tar.gz
sncontinue-728ab16a96fe9733d89282841a40114ecb1a12ab.tar.bz2
sncontinue-728ab16a96fe9733d89282841a40114ecb1a12ab.zip
Merge pull request #134 from continuedev/large-error
fixing hardcoded stream chat
Diffstat (limited to 'continuedev')
-rw-r--r--continuedev/src/continuedev/libs/llm/openai.py6
-rw-r--r--continuedev/src/continuedev/steps/core/core.py4
2 files changed, 4 insertions, 6 deletions
diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py
index 5d65eb22..095cbe51 100644
--- a/continuedev/src/continuedev/libs/llm/openai.py
+++ b/continuedev/src/continuedev/libs/llm/openai.py
@@ -38,9 +38,7 @@ class OpenAI(LLM):
@cached_property
def __encoding_for_model(self):
- aliases = {
- "gpt-3.5-turbo": "gpt3"
- }
+ aliases = {}
return tiktoken.encoding_for_model(self.default_model)
def count_tokens(self, text: str):
@@ -88,7 +86,7 @@ class OpenAI(LLM):
args = {"max_tokens": DEFAULT_MAX_TOKENS, "temperature": 0.5, "top_p": 1,
"frequency_penalty": 0, "presence_penalty": 0} | kwargs
args["stream"] = True
- args["model"] = "gpt-3.5-turbo"
+ args["model"] = self.default_model
async for chunk in await openai.ChatCompletion.acreate(
messages=self.compile_chat_messages(with_history, prompt),
diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py
index 09f9facc..71a5b5b2 100644
--- a/continuedev/src/continuedev/steps/core/core.py
+++ b/continuedev/src/continuedev/steps/core/core.py
@@ -213,7 +213,7 @@ class DefaultModelEditCodeStep(Step):
if model_to_use.name == "gpt-4":
- total_tokens = model_to_use.count_tokens(full_file_contents)
+ total_tokens = model_to_use.count_tokens(full_file_contents + self._prompt)
cur_start_line, cur_end_line = cut_context(
model_to_use, total_tokens, cur_start_line, cur_end_line)
@@ -223,7 +223,7 @@ class DefaultModelEditCodeStep(Step):
model_to_use = sdk.models.gpt3516k
total_tokens = model_to_use.count_tokens(
- full_file_contents)
+ full_file_contents + self._prompt)
cur_start_line, cur_end_line = cut_context(
model_to_use, total_tokens, cur_start_line, cur_end_line)