From 738a150a2c5503721e3bc75bd3fdc25590513883 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Sun, 2 Jul 2023 15:47:14 -0700 Subject: no more gpt-3.5-turbo --- continuedev/src/continuedev/steps/chat.py | 2 +- continuedev/src/continuedev/steps/core/core.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'continuedev') diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py index 9bb75ab4..2efef37d 100644 --- a/continuedev/src/continuedev/steps/chat.py +++ b/continuedev/src/continuedev/steps/chat.py @@ -36,7 +36,7 @@ class SimpleChatStep(Step): summary=self.user_input )) - async for chunk in sdk.models.default.stream_chat(messages): + async for chunk in sdk.models.gpt4.stream_chat(messages): if sdk.current_step_was_deleted(): return diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index a84263cc..345d99bc 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -172,7 +172,7 @@ class DefaultModelEditCodeStep(Step): # We don't know here all of the functions being passed in. # We care because if this prompt itself goes over the limit, then the entire message will have to be cut from the completion. # Overflow won't happen, but prune_chat_messages in count_tokens.py will cut out this whole thing, instead of us cutting out only as many lines as we need. - model_to_use = sdk.models.default + model_to_use = sdk.models.gpt4 BUFFER_FOR_FUNCTIONS = 400 total_tokens = model_to_use.count_tokens( -- cgit v1.2.3-70-g09d2