diff options
author | Nate Sesti <sestinj@gmail.com> | 2023-08-14 12:45:29 -0700 |
---|---|---|
committer | Nate Sesti <sestinj@gmail.com> | 2023-08-14 12:45:29 -0700 |
commit | 1c288f7749747c6b1908ae16c977f80e5597d2ca (patch) | |
tree | d2047553c11854f67eab1d6e4e42089b2e7d19a4 /continuedev/src | |
parent | 30befdb263a5f2f794869e82666c9edff4b10cd6 (diff) | |
download | sncontinue-1c288f7749747c6b1908ae16c977f80e5597d2ca.tar.gz sncontinue-1c288f7749747c6b1908ae16c977f80e5597d2ca.tar.bz2 sncontinue-1c288f7749747c6b1908ae16c977f80e5597d2ca.zip |
fix: :bug: MAX_TOKENS_FOR_MODEL bug fix, more testing
Diffstat (limited to 'continuedev/src')
-rw-r--r-- | continuedev/src/continuedev/libs/llm/openai.py | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index 5f1ef7fa..7eb516a3 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -22,11 +22,12 @@ CHAT_MODELS = { MAX_TOKENS_FOR_MODEL = { "gpt-3.5-turbo": 4096, "gpt-3.5-turbo-0613": 4096, - "gpt-3.5-turbo-16k": 16384, + "gpt-3.5-turbo-16k": 16_384, "gpt-4": 8192, - "gpt-35-turbo-16k": 16384, + "gpt-35-turbo-16k": 16_384, "gpt-35-turbo-0613": 4096, "gpt-35-turbo": 4096, + "gpt-4-32k": 32_768 } @@ -67,7 +68,7 @@ class OpenAI(LLM): @property def context_length(self): - return MAX_TOKENS_FOR_MODEL[self.model] + return MAX_TOKENS_FOR_MODEL.get(self.model, 4096) @property def default_args(self): |