summaryrefslogtreecommitdiff
path: root/continuedev/src/continuedev/steps
diff options
context:
space:
mode:
authorNate Sesti <sestinj@gmail.com>2023-06-25 22:35:37 -0700
committerNate Sesti <sestinj@gmail.com>2023-06-25 22:35:37 -0700
commit0f4dc5f28c567e8ae5f2e9e5feedb72ac35265ea (patch)
tree962d4c4ba37a3da04706b328a30d2fa43529cd50 /continuedev/src/continuedev/steps
parent2caff9f639611a66d9e88ec6d84b36efe8327b91 (diff)
downloadsncontinue-0f4dc5f28c567e8ae5f2e9e5feedb72ac35265ea.tar.gz
sncontinue-0f4dc5f28c567e8ae5f2e9e5feedb72ac35265ea.tar.bz2
sncontinue-0f4dc5f28c567e8ae5f2e9e5feedb72ac35265ea.zip
context overflow fix
Diffstat (limited to 'continuedev/src/continuedev/steps')
-rw-r--r--continuedev/src/continuedev/steps/core/core.py22
1 files changed, 6 insertions, 16 deletions
diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py
index f146c94a..24f00d36 100644
--- a/continuedev/src/continuedev/steps/core/core.py
+++ b/continuedev/src/continuedev/steps/core/core.py
@@ -211,26 +211,16 @@ class DefaultModelEditCodeStep(Step):
return cur_start_line, cur_end_line
- if model_to_use.name == "gpt-4":
-
- total_tokens = model_to_use.count_tokens(
- full_file_contents + self._prompt)
- cur_start_line, cur_end_line = cut_context(
- model_to_use, total_tokens, cur_start_line, cur_end_line)
-
- elif model_to_use.name == "gpt-3.5-turbo" or model_to_use.name == "gpt-3.5-turbo-16k":
-
+ model_to_use = sdk.models.default
+ if model_to_use.name == "gpt-3.5-turbo":
if sdk.models.gpt35.count_tokens(full_file_contents) > MAX_TOKENS_FOR_MODEL["gpt-3.5-turbo"]:
-
model_to_use = sdk.models.gpt3516k
- total_tokens = model_to_use.count_tokens(
- full_file_contents + self._prompt)
- cur_start_line, cur_end_line = cut_context(
- model_to_use, total_tokens, cur_start_line, cur_end_line)
- else:
+ total_tokens = model_to_use.count_tokens(
+ full_file_contents + self._prompt + self.user_input)
- raise Exception("Unknown default model")
+ cur_start_line, cur_end_line = cut_context(
+ model_to_use, total_tokens, cur_start_line, cur_end_line)
code_before = "\n".join(
full_file_contents_lst[cur_start_line:max_start_line])