summaryrefslogtreecommitdiff
path: root/continuedev
diff options
context:
space:
mode:
Diffstat (limited to 'continuedev')
-rw-r--r--continuedev/src/continuedev/libs/llm/__init__.py10
1 files changed, 7 insertions, 3 deletions
diff --git a/continuedev/src/continuedev/libs/llm/__init__.py b/continuedev/src/continuedev/libs/llm/__init__.py
index baeb9d1a..b2eecab6 100644
--- a/continuedev/src/continuedev/libs/llm/__init__.py
+++ b/continuedev/src/continuedev/libs/llm/__init__.py
@@ -68,6 +68,10 @@ class LLM(ContinueBaseModel):
..., description="The name of the model to be used (e.g. gpt-4, codellama)"
)
+ stop_tokens: Optional[List[str]] = Field(
+ None, description="Tokens that will stop the completion."
+ )
+
timeout: Optional[int] = Field(
300,
description="Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.",
@@ -204,7 +208,7 @@ class LLM(ContinueBaseModel):
top_k=top_k,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
- stop=stop,
+ stop=stop or self.stop_tokens,
max_tokens=max_tokens,
functions=functions,
)
@@ -251,7 +255,7 @@ class LLM(ContinueBaseModel):
top_k=top_k,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
- stop=stop,
+ stop=stop or self.stop_tokens,
max_tokens=max_tokens,
functions=functions,
)
@@ -296,7 +300,7 @@ class LLM(ContinueBaseModel):
top_k=top_k,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
- stop=stop,
+ stop=stop or self.stop_tokens,
max_tokens=max_tokens,
functions=functions,
)