diff options
Diffstat (limited to 'continuedev/src')
| -rw-r--r-- | continuedev/src/continuedev/steps/chat.py | 2 | ||||
| -rw-r--r-- | continuedev/src/continuedev/steps/core/core.py | 2 | 
2 files changed, 2 insertions, 2 deletions
| diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py index 9bb75ab4..2efef37d 100644 --- a/continuedev/src/continuedev/steps/chat.py +++ b/continuedev/src/continuedev/steps/chat.py @@ -36,7 +36,7 @@ class SimpleChatStep(Step):              summary=self.user_input          )) -        async for chunk in sdk.models.default.stream_chat(messages): +        async for chunk in sdk.models.gpt4.stream_chat(messages):              if sdk.current_step_was_deleted():                  return diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index a84263cc..345d99bc 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -172,7 +172,7 @@ class DefaultModelEditCodeStep(Step):          # We don't know here all of the functions being passed in.          # We care because if this prompt itself goes over the limit, then the entire message will have to be cut from the completion.          # Overflow won't happen, but prune_chat_messages in count_tokens.py will cut out this whole thing, instead of us cutting out only as many lines as we need. -        model_to_use = sdk.models.default +        model_to_use = sdk.models.gpt4          BUFFER_FOR_FUNCTIONS = 400          total_tokens = model_to_use.count_tokens( | 
