summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNate Sesti <sestinj@gmail.com>2023-06-15 11:12:27 -0700
committerNate Sesti <sestinj@gmail.com>2023-06-15 11:12:27 -0700
commit6de892f12959a43c74372f1eba40ec2f53b8c537 (patch)
treea8fbc3339a3b88d2b3e9dc2ad1fbc65cfe4829ec
parent066491e72cd73742125dce27ca367c2482aa105b (diff)
downloadsncontinue-6de892f12959a43c74372f1eba40ec2f53b8c537.tar.gz
sncontinue-6de892f12959a43c74372f1eba40ec2f53b8c537.tar.bz2
sncontinue-6de892f12959a43c74372f1eba40ec2f53b8c537.zip
gpt-4 is a chat model!
-rw-r--r--continuedev/src/continuedev/libs/llm/openai.py7
1 files changed, 5 insertions, 2 deletions
diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py
index 6f620ba0..8fec2046 100644
--- a/continuedev/src/continuedev/libs/llm/openai.py
+++ b/continuedev/src/continuedev/libs/llm/openai.py
@@ -14,6 +14,9 @@ MAX_TOKENS_FOR_MODEL = {
"gpt-4": 4097,
}
DEFAULT_MAX_TOKENS = 2048
+CHAT_MODELS = {
+ "gpt-3.5-turbo", "gpt-4"
+}
class OpenAI(LLM):
@@ -87,7 +90,7 @@ class OpenAI(LLM):
"top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "suffix": None} | kwargs
args["stream"] = True
- if args["model"] == "gpt-3.5-turbo":
+ if args["model"] in CHAT_MODELS:
generator = openai.ChatCompletion.create(
messages=self.compile_chat_messages(with_history, prompt),
**args,
@@ -109,7 +112,7 @@ class OpenAI(LLM):
args = {"model": self.default_model, "max_tokens": DEFAULT_MAX_TOKENS, "temperature": 0.5, "top_p": 1,
"frequency_penalty": 0, "presence_penalty": 0, "stream": False} | kwargs
- if args["model"] == "gpt-3.5-turbo":
+ if args["model"] in CHAT_MODELS:
resp = openai.ChatCompletion.create(
messages=self.compile_chat_messages(with_history, prompt),
**args,