diff options
author | Nate Sesti <sestinj@gmail.com> | 2023-07-17 12:48:10 -0700 |
---|---|---|
committer | Nate Sesti <sestinj@gmail.com> | 2023-07-17 12:48:10 -0700 |
commit | 36a2b72db549e2dde5a28d06c87df036a4e3afa0 (patch) | |
tree | 26e9459f5add34d9c8b77eaae672bd30e52c42bd /continuedev | |
parent | 436225436ef8379687a80e0b9595ddd4b488d946 (diff) | |
download | sncontinue-36a2b72db549e2dde5a28d06c87df036a4e3afa0.tar.gz sncontinue-36a2b72db549e2dde5a28d06c87df036a4e3afa0.tar.bz2 sncontinue-36a2b72db549e2dde5a28d06c87df036a4e3afa0.zip |
float -> int hot fix
Diffstat (limited to 'continuedev')
-rw-r--r-- | continuedev/src/continuedev/steps/core/core.py | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index ea09f475..2b049ecc 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -162,7 +162,8 @@ class DefaultModelEditCodeStep(Step): if self._previous_contents.strip() == self._new_contents.strip(): description = "No edits were made" else: - changes = '\n'.join(difflib.ndiff(self._previous_contents.splitlines(), self._new_contents.splitlines())) + changes = '\n'.join(difflib.ndiff( + self._previous_contents.splitlines(), self._new_contents.splitlines())) description = await models.gpt3516k.complete(dedent(f"""\ Diff summary: "{self.user_input}" @@ -181,8 +182,8 @@ class DefaultModelEditCodeStep(Step): # We care because if this prompt itself goes over the limit, then the entire message will have to be cut from the completion. # Overflow won't happen, but prune_chat_messages in count_tokens.py will cut out this whole thing, instead of us cutting out only as many lines as we need. model_to_use = sdk.models.default - max_tokens = MAX_TOKENS_FOR_MODEL.get( - model_to_use.name, DEFAULT_MAX_TOKENS) / 2 + max_tokens = int(MAX_TOKENS_FOR_MODEL.get( + model_to_use.name, DEFAULT_MAX_TOKENS) / 2) TOKENS_TO_BE_CONSIDERED_LARGE_RANGE = 1200 if model_to_use.count_tokens(rif.contents) > TOKENS_TO_BE_CONSIDERED_LARGE_RANGE: |