diff options
Diffstat (limited to 'continuedev/src')
| -rw-r--r-- | continuedev/src/continuedev/libs/llm/openai.py | 6 | ||||
| -rw-r--r-- | continuedev/src/continuedev/steps/core/core.py | 4 | 
2 files changed, 4 insertions, 6 deletions
| diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index 5d65eb22..095cbe51 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -38,9 +38,7 @@ class OpenAI(LLM):      @cached_property      def __encoding_for_model(self): -        aliases = { -            "gpt-3.5-turbo": "gpt3" -        } +        aliases = {}          return tiktoken.encoding_for_model(self.default_model)      def count_tokens(self, text: str): @@ -88,7 +86,7 @@ class OpenAI(LLM):          args = {"max_tokens": DEFAULT_MAX_TOKENS, "temperature": 0.5, "top_p": 1,                  "frequency_penalty": 0, "presence_penalty": 0} | kwargs          args["stream"] = True -        args["model"] = "gpt-3.5-turbo" +        args["model"] = self.default_model          async for chunk in await openai.ChatCompletion.acreate(              messages=self.compile_chat_messages(with_history, prompt), diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index 09f9facc..71a5b5b2 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -213,7 +213,7 @@ class DefaultModelEditCodeStep(Step):              if model_to_use.name == "gpt-4": -                total_tokens = model_to_use.count_tokens(full_file_contents) +                total_tokens = model_to_use.count_tokens(full_file_contents + self._prompt)                  cur_start_line, cur_end_line = cut_context(                      model_to_use, total_tokens, cur_start_line, cur_end_line) @@ -223,7 +223,7 @@ class DefaultModelEditCodeStep(Step):                      model_to_use = sdk.models.gpt3516k                      total_tokens = model_to_use.count_tokens( -                        full_file_contents) +                        full_file_contents + self._prompt)                      cur_start_line, cur_end_line = cut_context(                          model_to_use, total_tokens, cur_start_line, cur_end_line) | 
