From ff2a3978a1e2c95a4e288b56411bf0c32b86757b Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Fri, 29 Sep 2023 12:12:17 -0700 Subject: feat: :sparkles: add max_tokens option to LLM class --- continuedev/src/continuedev/libs/llm/__init__.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/continuedev/src/continuedev/libs/llm/__init__.py b/continuedev/src/continuedev/libs/llm/__init__.py index 28f614c7..e6a90ef7 100644 --- a/continuedev/src/continuedev/libs/llm/__init__.py +++ b/continuedev/src/continuedev/libs/llm/__init__.py @@ -71,6 +71,10 @@ class LLM(ContinueBaseModel): ..., description="The name of the model to be used (e.g. gpt-4, codellama)" ) + max_tokens: int = Field( + DEFAULT_MAX_TOKENS, description="The maximum number of tokens to generate." + ) + stop_tokens: Optional[List[str]] = Field( None, description="Tokens that will stop the completion." ) @@ -237,7 +241,7 @@ class LLM(ContinueBaseModel): presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, stop=stop or self.stop_tokens, - max_tokens=max_tokens, + max_tokens=max_tokens or self.max_tokens, functions=functions, ) @@ -288,7 +292,7 @@ class LLM(ContinueBaseModel): presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, stop=stop or self.stop_tokens, - max_tokens=max_tokens, + max_tokens=max_tokens or self.max_tokens, functions=functions, ) @@ -337,7 +341,7 @@ class LLM(ContinueBaseModel): presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, stop=stop or self.stop_tokens, - max_tokens=max_tokens, + max_tokens=max_tokens or self.max_tokens, functions=functions, ) -- cgit v1.2.3-70-g09d2