summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py2
-rw-r--r--continuedev/src/continuedev/libs/llm/queued.py55
2 files changed, 56 insertions, 1 deletions
diff --git a/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py b/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py
index 084c57fd..07e27349 100644
--- a/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py
+++ b/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py
@@ -18,7 +18,7 @@ class MaybeProxyOpenAI(LLM):
async def start(
self, write_log: Callable[[str], None] = None, unique_id: Optional[str] = None
):
- await super().start(write_log=lambda *args, **kwargs: None, unique_id=unique_id)
+ await super().start(write_log=write_log, unique_id=unique_id)
if self.api_key is None or self.api_key.strip() == "":
self.llm = ProxyServer(model=self.model)
else:
diff --git a/continuedev/src/continuedev/libs/llm/queued.py b/continuedev/src/continuedev/libs/llm/queued.py
new file mode 100644
index 00000000..9e6e0180
--- /dev/null
+++ b/continuedev/src/continuedev/libs/llm/queued.py
@@ -0,0 +1,55 @@
+import asyncio
+from typing import Any, List
+
+from ...core.main import ChatMessage
+from . import LLM, CompletionOptions
+
+
+class QueuedLLM(LLM):
+ llm: LLM
+ _lock: asyncio.Lock
+
+ model: str = "queued"
+
+ def dict(self, **kwargs):
+ return self.llm.dict(**kwargs)
+
+ async def start(self, *args, **kwargs):
+ await super().start(*args, **kwargs)
+ await self.llm.start(*args, **kwargs)
+ self._lock = asyncio.Lock()
+ self.model = self.llm.model
+
+ async def stop(self):
+ await self.llm.stop()
+
+ def collect_args(self, options: CompletionOptions):
+ return self.llm.collect_args(options)
+
+ def compile_chat_messages(
+ self,
+ options: CompletionOptions,
+ msgs: List[ChatMessage],
+ functions: List[Any] | None = None,
+ ):
+ return self.llm.compile_chat_messages(options, msgs, functions)
+
+ def template_prompt_like_messages(self, prompt: str) -> str:
+ return self.llm.template_prompt_like_messages(prompt)
+
+ async def _complete(self, prompt: str, options: CompletionOptions):
+ async with self._lock:
+ resp = await self.llm._complete(prompt, options)
+ return resp
+
+ async def _stream_complete(self, prompt: str, options: CompletionOptions):
+ async with self._lock:
+ async for chunk in self.llm._stream_complete(prompt, options):
+ yield chunk
+
+ async def _stream_chat(
+ self, messages: List[ChatMessage], options: CompletionOptions
+ ):
+ async with self._lock:
+ async for chunk in self.llm._stream_chat(messages, options):
+ yield chunk