summaryrefslogtreecommitdiff
path: root/continuedev/src
diff options
context:
space:
mode:
authorNate Sesti <sestinj@gmail.com>2023-06-29 09:51:55 -0700
committerNate Sesti <sestinj@gmail.com>2023-06-29 09:51:55 -0700
commit481aa6dc53422746ce87259c4a22b4b1f6bef7ea (patch)
tree92e5e1a8c1713feef36dbf6a5aaac9e47b7fe804 /continuedev/src
parent43dc3459997d79d112a2776a8e95546580112460 (diff)
parent70051c10f7a4afca5224799d86036591cb937543 (diff)
downloadsncontinue-481aa6dc53422746ce87259c4a22b4b1f6bef7ea.tar.gz
sncontinue-481aa6dc53422746ce87259c4a22b4b1f6bef7ea.tar.bz2
sncontinue-481aa6dc53422746ce87259c4a22b4b1f6bef7ea.zip
Merge branch 'main' of https://github.com/continuedev/continue
Diffstat (limited to 'continuedev/src')
-rw-r--r--continuedev/src/continuedev/libs/util/count_tokens.py2
-rw-r--r--continuedev/src/continuedev/steps/core/core.py2
2 files changed, 2 insertions, 2 deletions
diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py
index 047a47e4..8b06fef9 100644
--- a/continuedev/src/continuedev/libs/util/count_tokens.py
+++ b/continuedev/src/continuedev/libs/util/count_tokens.py
@@ -83,7 +83,7 @@ def compile_chat_messages(model: str, msgs: List[ChatMessage], prompt: Union[str
prompt_tokens += count_tokens(model, json.dumps(function))
msgs = prune_chat_history(model,
- msgs, MAX_TOKENS_FOR_MODEL[model], prompt_tokens + 1000 + count_tokens(model, system_message))
+ msgs, MAX_TOKENS_FOR_MODEL[model], prompt_tokens + DEFAULT_MAX_TOKENS + count_tokens(model, system_message))
history = []
if system_message:
history.append({
diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py
index 1cbf3816..c8acd7c5 100644
--- a/continuedev/src/continuedev/steps/core/core.py
+++ b/continuedev/src/continuedev/steps/core/core.py
@@ -166,7 +166,7 @@ class DefaultModelEditCodeStep(Step):
# Overflow won't happen, but prune_chat_messages in count_tokens.py will cut out this whole thing, instead of us cutting out only as many lines as we need.
model_to_use = sdk.models.default
- BUFFER_FOR_FUNCTIONS = 200
+ BUFFER_FOR_FUNCTIONS = 400
total_tokens = model_to_use.count_tokens(
full_file_contents + self._prompt + self.user_input) + BUFFER_FOR_FUNCTIONS + DEFAULT_MAX_TOKENS