summaryrefslogtreecommitdiff
path: root/continuedev
diff options
context:
space:
mode:
authorNate Sesti <sestinj@gmail.com>2023-07-02 15:47:14 -0700
committerNate Sesti <sestinj@gmail.com>2023-07-02 15:47:14 -0700
commit738a150a2c5503721e3bc75bd3fdc25590513883 (patch)
treeaecf17927b3ba4eb0652795ed4791d784e9343a0 /continuedev
parentbad7fc4bd0280a7ec0586eed955911a10c16eb01 (diff)
downloadsncontinue-738a150a2c5503721e3bc75bd3fdc25590513883.tar.gz
sncontinue-738a150a2c5503721e3bc75bd3fdc25590513883.tar.bz2
sncontinue-738a150a2c5503721e3bc75bd3fdc25590513883.zip
no more gpt-3.5-turbo
Diffstat (limited to 'continuedev')
-rw-r--r--continuedev/src/continuedev/steps/chat.py2
-rw-r--r--continuedev/src/continuedev/steps/core/core.py2
2 files changed, 2 insertions, 2 deletions
diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py
index 9bb75ab4..2efef37d 100644
--- a/continuedev/src/continuedev/steps/chat.py
+++ b/continuedev/src/continuedev/steps/chat.py
@@ -36,7 +36,7 @@ class SimpleChatStep(Step):
summary=self.user_input
))
- async for chunk in sdk.models.default.stream_chat(messages):
+ async for chunk in sdk.models.gpt4.stream_chat(messages):
if sdk.current_step_was_deleted():
return
diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py
index a84263cc..345d99bc 100644
--- a/continuedev/src/continuedev/steps/core/core.py
+++ b/continuedev/src/continuedev/steps/core/core.py
@@ -172,7 +172,7 @@ class DefaultModelEditCodeStep(Step):
# We don't know here all of the functions being passed in.
# We care because if this prompt itself goes over the limit, then the entire message will have to be cut from the completion.
# Overflow won't happen, but prune_chat_messages in count_tokens.py will cut out this whole thing, instead of us cutting out only as many lines as we need.
- model_to_use = sdk.models.default
+ model_to_use = sdk.models.gpt4
BUFFER_FOR_FUNCTIONS = 400
total_tokens = model_to_use.count_tokens(