From a947148bb726cda4bff68605661680e6041b0094 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Thu, 15 Jun 2023 11:12:27 -0700 Subject: gpt-4 is a chat model! --- continuedev/src/continuedev/libs/llm/openai.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'continuedev') diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index 6f620ba0..8fec2046 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -14,6 +14,9 @@ MAX_TOKENS_FOR_MODEL = { "gpt-4": 4097, } DEFAULT_MAX_TOKENS = 2048 +CHAT_MODELS = { + "gpt-3.5-turbo", "gpt-4" +} class OpenAI(LLM): @@ -87,7 +90,7 @@ class OpenAI(LLM): "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "suffix": None} | kwargs args["stream"] = True - if args["model"] == "gpt-3.5-turbo": + if args["model"] in CHAT_MODELS: generator = openai.ChatCompletion.create( messages=self.compile_chat_messages(with_history, prompt), **args, @@ -109,7 +112,7 @@ class OpenAI(LLM): args = {"model": self.default_model, "max_tokens": DEFAULT_MAX_TOKENS, "temperature": 0.5, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "stream": False} | kwargs - if args["model"] == "gpt-3.5-turbo": + if args["model"] in CHAT_MODELS: resp = openai.ChatCompletion.create( messages=self.compile_chat_messages(with_history, prompt), **args, -- cgit v1.2.3-70-g09d2