From 5cb80a064fe49ae64f5c15e1e4d130d7925a61f8 Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Wed, 28 Jun 2023 23:10:44 -0700 Subject: 1000 -> max tokens --- continuedev/src/continuedev/libs/util/count_tokens.py | 2 +- continuedev/src/continuedev/steps/core/core.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py index 047a47e4..8b06fef9 100644 --- a/continuedev/src/continuedev/libs/util/count_tokens.py +++ b/continuedev/src/continuedev/libs/util/count_tokens.py @@ -83,7 +83,7 @@ def compile_chat_messages(model: str, msgs: List[ChatMessage], prompt: Union[str prompt_tokens += count_tokens(model, json.dumps(function)) msgs = prune_chat_history(model, - msgs, MAX_TOKENS_FOR_MODEL[model], prompt_tokens + 1000 + count_tokens(model, system_message)) + msgs, MAX_TOKENS_FOR_MODEL[model], prompt_tokens + DEFAULT_MAX_TOKENS + count_tokens(model, system_message)) history = [] if system_message: history.append({ diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index ad72212d..a712c12f 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -165,7 +165,7 @@ class DefaultModelEditCodeStep(Step): # Overflow won't happen, but prune_chat_messages in count_tokens.py will cut out this whole thing, instead of us cutting out only as many lines as we need. model_to_use = sdk.models.default - BUFFER_FOR_FUNCTIONS = 200 + BUFFER_FOR_FUNCTIONS = 400 total_tokens = model_to_use.count_tokens( full_file_contents + self._prompt + self.user_input) + BUFFER_FOR_FUNCTIONS + DEFAULT_MAX_TOKENS -- cgit v1.2.3-70-g09d2