summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTy Dunn <ty@tydunn.com>2023-06-28 23:10:44 -0700
committerTy Dunn <ty@tydunn.com>2023-06-28 23:10:44 -0700
commit5cb80a064fe49ae64f5c15e1e4d130d7925a61f8 (patch)
treeed3e4184c95c81810ee395d39f7966b6d67128e0
parent3b99cd4358328cd7d309b27f9cf2d6162cd523e0 (diff)
downloadsncontinue-5cb80a064fe49ae64f5c15e1e4d130d7925a61f8.tar.gz
sncontinue-5cb80a064fe49ae64f5c15e1e4d130d7925a61f8.tar.bz2
sncontinue-5cb80a064fe49ae64f5c15e1e4d130d7925a61f8.zip
1000 -> max tokens
-rw-r--r--continuedev/src/continuedev/libs/util/count_tokens.py2
-rw-r--r--continuedev/src/continuedev/steps/core/core.py2
2 files changed, 2 insertions, 2 deletions
diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py
index 047a47e4..8b06fef9 100644
--- a/continuedev/src/continuedev/libs/util/count_tokens.py
+++ b/continuedev/src/continuedev/libs/util/count_tokens.py
@@ -83,7 +83,7 @@ def compile_chat_messages(model: str, msgs: List[ChatMessage], prompt: Union[str
prompt_tokens += count_tokens(model, json.dumps(function))
msgs = prune_chat_history(model,
- msgs, MAX_TOKENS_FOR_MODEL[model], prompt_tokens + 1000 + count_tokens(model, system_message))
+ msgs, MAX_TOKENS_FOR_MODEL[model], prompt_tokens + DEFAULT_MAX_TOKENS + count_tokens(model, system_message))
history = []
if system_message:
history.append({
diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py
index ad72212d..a712c12f 100644
--- a/continuedev/src/continuedev/steps/core/core.py
+++ b/continuedev/src/continuedev/steps/core/core.py
@@ -165,7 +165,7 @@ class DefaultModelEditCodeStep(Step):
# Overflow won't happen, but prune_chat_messages in count_tokens.py will cut out this whole thing, instead of us cutting out only as many lines as we need.
model_to_use = sdk.models.default
- BUFFER_FOR_FUNCTIONS = 200
+ BUFFER_FOR_FUNCTIONS = 400
total_tokens = model_to_use.count_tokens(
full_file_contents + self._prompt + self.user_input) + BUFFER_FOR_FUNCTIONS + DEFAULT_MAX_TOKENS