summaryrefslogtreecommitdiff
path: root/server/continuedev/libs/llm/queued.py
diff options
context:
space:
mode:
authorNate Sesti <33237525+sestinj@users.noreply.github.com>2023-10-09 18:37:27 -0700
committerGitHub <noreply@github.com>2023-10-09 18:37:27 -0700
commitf09150617ed2454f3074bcf93f53aae5ae637d40 (patch)
tree5cfe614a64d921dfe58b049f426d67a8b832c71f /server/continuedev/libs/llm/queued.py
parent985304a213f620cdff3f8f65f74ed7e3b79be29d (diff)
downloadsncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.gz
sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.bz2
sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.zip
Preview (#541)
* Strong typing (#533) * refactor: :recycle: get rid of continuedev.src.continuedev structure * refactor: :recycle: switching back to server folder * feat: :sparkles: make config.py imports shorter * feat: :bookmark: publish as pre-release vscode extension * refactor: :recycle: refactor and add more completion params to ui * build: :building_construction: download from preview S3 * fix: :bug: fix paths * fix: :green_heart: package:pre-release * ci: :green_heart: more time for tests * fix: :green_heart: fix build scripts * fix: :bug: fix import in run.py * fix: :bookmark: update version to try again * ci: 💚 Update package.json version [skip ci] * refactor: :fire: don't check for old extensions version * fix: :bug: small bug fixes * fix: :bug: fix config.py import paths * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: platform-specific builds test #1 * feat: :green_heart: ship with binary * fix: :green_heart: fix copy statement to include.exe for windows * fix: :green_heart: cd extension before packaging * chore: :loud_sound: count tokens generated * fix: :green_heart: remove npm_config_arch * fix: :green_heart: publish as pre-release! * chore: :bookmark: update version * perf: :green_heart: hardcode distro paths * fix: :bug: fix yaml syntax error * chore: :bookmark: update version * fix: :green_heart: update permissions and version * feat: :bug: kill old server if needed * feat: :lipstick: update marketplace icon for pre-release * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: auto-reload for config.py * feat: :wrench: update default config.py imports * feat: :sparkles: codelens in config.py * feat: :sparkles: select model param count from UI * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: more model options, ollama error handling * perf: :zap: don't show server loading immediately * fix: :bug: fixing small UI details * ci: 💚 Update package.json version [skip ci] * feat: :rocket: headers param on LLM class * fix: :bug: fix headers for openai.;y * feat: :sparkles: highlight code on cmd+shift+L * ci: 💚 Update package.json version [skip ci] * feat: :lipstick: sticky top bar in gui.tsx * fix: :loud_sound: websocket logging and horizontal scrollbar * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: allow AzureOpenAI Service through GGML * ci: 💚 Update package.json version [skip ci] * fix: :bug: fix automigration * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: upload binaries in ci, download apple silicon * chore: :fire: remove notes * fix: :green_heart: use curl to download binary * fix: :green_heart: set permissions on apple silicon binary * fix: :green_heart: testing * fix: :green_heart: cleanup file * fix: :green_heart: fix preview.yaml * fix: :green_heart: only upload once per binary * fix: :green_heart: install rosetta * ci: :green_heart: download binary after tests * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: prepare ci for merge to main --------- Co-authored-by: GitHub Action <action@github.com>
Diffstat (limited to 'server/continuedev/libs/llm/queued.py')
-rw-r--r--server/continuedev/libs/llm/queued.py77
1 files changed, 77 insertions, 0 deletions
diff --git a/server/continuedev/libs/llm/queued.py b/server/continuedev/libs/llm/queued.py
new file mode 100644
index 00000000..2db749eb
--- /dev/null
+++ b/server/continuedev/libs/llm/queued.py
@@ -0,0 +1,77 @@
+import asyncio
+from typing import Any, List, Union
+
+from pydantic import Field
+
+from ...core.main import ChatMessage
+from .base import LLM, CompletionOptions
+
+
+class QueuedLLM(LLM):
+ """
+ QueuedLLM exists to make up for LLM servers that cannot handle multiple requests at once. It uses a lock to ensure that only one request is being processed at a time.
+
+ If you are already using another LLM class and are experiencing this problem, you can just wrap it with the QueuedLLM class like this:
+
+ ```python title="~/.continue/config.py"
+ from continuedev.libs.llm.queued import QueuedLLM
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=QueuedLLM(llm=<OTHER_LLM_CLASS>)
+ )
+ )
+ ```
+ """
+
+ llm: LLM = Field(..., description="The LLM to wrap with a lock")
+ _lock: asyncio.Lock
+
+ model: str = "queued"
+
+ def dict(self, **kwargs):
+ return self.llm.dict(**kwargs)
+
+ async def start(self, *args, **kwargs):
+ await super().start(*args, **kwargs)
+ await self.llm.start(*args, **kwargs)
+ self._lock = asyncio.Lock()
+ self.model = self.llm.model
+ self.template_messages = self.llm.template_messages
+ self.prompt_templates = self.llm.prompt_templates
+ self.context_length = self.llm.context_length
+
+ async def stop(self):
+ await self.llm.stop()
+
+ def collect_args(self, options: CompletionOptions):
+ return self.llm.collect_args(options)
+
+ def compile_chat_messages(
+ self,
+ options: CompletionOptions,
+ msgs: List[ChatMessage],
+ functions: Union[List[Any], None] = None,
+ ):
+ return self.llm.compile_chat_messages(options, msgs, functions)
+
+ def template_prompt_like_messages(self, prompt: str) -> str:
+ return self.llm.template_prompt_like_messages(prompt)
+
+ async def _complete(self, prompt: str, options: CompletionOptions):
+ async with self._lock:
+ resp = await self.llm._complete(prompt, options)
+ return resp
+
+ async def _stream_complete(self, prompt: str, options: CompletionOptions):
+ async with self._lock:
+ async for chunk in self.llm._stream_complete(prompt, options):
+ yield chunk
+
+ async def _stream_chat(
+ self, messages: List[ChatMessage], options: CompletionOptions
+ ):
+ async with self._lock:
+ async for chunk in self.llm._stream_chat(messages, options):
+ yield chunk