summaryrefslogtreecommitdiff
path: root/server/continuedev/libs/llm/proxy_server.py
diff options
context:
space:
mode:
authorNate Sesti <33237525+sestinj@users.noreply.github.com>2023-10-09 18:37:27 -0700
committerGitHub <noreply@github.com>2023-10-09 18:37:27 -0700
commitf09150617ed2454f3074bcf93f53aae5ae637d40 (patch)
tree5cfe614a64d921dfe58b049f426d67a8b832c71f /server/continuedev/libs/llm/proxy_server.py
parent985304a213f620cdff3f8f65f74ed7e3b79be29d (diff)
downloadsncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.gz
sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.bz2
sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.zip
Preview (#541)
* Strong typing (#533) * refactor: :recycle: get rid of continuedev.src.continuedev structure * refactor: :recycle: switching back to server folder * feat: :sparkles: make config.py imports shorter * feat: :bookmark: publish as pre-release vscode extension * refactor: :recycle: refactor and add more completion params to ui * build: :building_construction: download from preview S3 * fix: :bug: fix paths * fix: :green_heart: package:pre-release * ci: :green_heart: more time for tests * fix: :green_heart: fix build scripts * fix: :bug: fix import in run.py * fix: :bookmark: update version to try again * ci: 💚 Update package.json version [skip ci] * refactor: :fire: don't check for old extensions version * fix: :bug: small bug fixes * fix: :bug: fix config.py import paths * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: platform-specific builds test #1 * feat: :green_heart: ship with binary * fix: :green_heart: fix copy statement to include.exe for windows * fix: :green_heart: cd extension before packaging * chore: :loud_sound: count tokens generated * fix: :green_heart: remove npm_config_arch * fix: :green_heart: publish as pre-release! * chore: :bookmark: update version * perf: :green_heart: hardcode distro paths * fix: :bug: fix yaml syntax error * chore: :bookmark: update version * fix: :green_heart: update permissions and version * feat: :bug: kill old server if needed * feat: :lipstick: update marketplace icon for pre-release * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: auto-reload for config.py * feat: :wrench: update default config.py imports * feat: :sparkles: codelens in config.py * feat: :sparkles: select model param count from UI * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: more model options, ollama error handling * perf: :zap: don't show server loading immediately * fix: :bug: fixing small UI details * ci: 💚 Update package.json version [skip ci] * feat: :rocket: headers param on LLM class * fix: :bug: fix headers for openai.;y * feat: :sparkles: highlight code on cmd+shift+L * ci: 💚 Update package.json version [skip ci] * feat: :lipstick: sticky top bar in gui.tsx * fix: :loud_sound: websocket logging and horizontal scrollbar * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: allow AzureOpenAI Service through GGML * ci: 💚 Update package.json version [skip ci] * fix: :bug: fix automigration * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: upload binaries in ci, download apple silicon * chore: :fire: remove notes * fix: :green_heart: use curl to download binary * fix: :green_heart: set permissions on apple silicon binary * fix: :green_heart: testing * fix: :green_heart: cleanup file * fix: :green_heart: fix preview.yaml * fix: :green_heart: only upload once per binary * fix: :green_heart: install rosetta * ci: :green_heart: download binary after tests * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: prepare ci for merge to main --------- Co-authored-by: GitHub Action <action@github.com>
Diffstat (limited to 'server/continuedev/libs/llm/proxy_server.py')
-rw-r--r--server/continuedev/libs/llm/proxy_server.py108
1 files changed, 108 insertions, 0 deletions
diff --git a/server/continuedev/libs/llm/proxy_server.py b/server/continuedev/libs/llm/proxy_server.py
new file mode 100644
index 00000000..7c3462eb
--- /dev/null
+++ b/server/continuedev/libs/llm/proxy_server.py
@@ -0,0 +1,108 @@
+import json
+import traceback
+from typing import List
+
+import aiohttp
+
+from ...core.main import ChatMessage
+from ..util.telemetry import posthog_logger
+from .base import LLM
+
+# SERVER_URL = "http://127.0.0.1:8080"
+SERVER_URL = "https://proxy-server-l6vsfbzhba-uw.a.run.app"
+
+MAX_TOKENS_FOR_MODEL = {
+ "gpt-3.5-turbo": 4096,
+ "gpt-3.5-turbo-0613": 4096,
+ "gpt-3.5-turbo-16k": 16384,
+ "gpt-4": 8192,
+}
+
+
+class ProxyServer(LLM):
+ _client_session: aiohttp.ClientSession
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ async def start(
+ self,
+ **kwargs,
+ ):
+ await super().start(**kwargs)
+ self._client_session = self.create_client_session()
+
+ self.context_length = MAX_TOKENS_FOR_MODEL[self.model]
+
+ async def stop(self):
+ await self._client_session.close()
+
+ def get_headers(self):
+ return {"unique_id": self.unique_id}
+
+ async def _complete(self, prompt: str, options):
+ args = self.collect_args(options)
+
+ async with self._client_session.post(
+ f"{SERVER_URL}/complete",
+ json={"messages": [{"role": "user", "content": prompt}], **args},
+ headers=self.get_headers(),
+ proxy=self.proxy,
+ ) as resp:
+ resp_text = await resp.text()
+ if resp.status != 200:
+ raise Exception(resp_text)
+
+ return resp_text
+
+ async def _stream_chat(self, messages: List[ChatMessage], options):
+ args = self.collect_args(options)
+ async with self._client_session.post(
+ f"{SERVER_URL}/stream_chat",
+ json={"messages": messages, **args},
+ headers=self.get_headers(),
+ proxy=self.proxy,
+ ) as resp:
+ if resp.status != 200:
+ raise Exception(await resp.text())
+
+ async for line in resp.content.iter_chunks():
+ if line[1]:
+ try:
+ json_chunk = line[0].decode("utf-8")
+ json_chunk = "{}" if json_chunk == "" else json_chunk
+ chunks = json_chunk.split("\n")
+ for chunk in chunks:
+ if chunk.strip() != "":
+ loaded_chunk = json.loads(chunk)
+ yield loaded_chunk
+
+ except Exception as e:
+ posthog_logger.capture_event(
+ "proxy_server_parse_error",
+ {
+ "error_title": "Proxy server stream_chat parsing failed",
+ "error_message": "\n".join(
+ traceback.format_exception(e)
+ ),
+ },
+ )
+ else:
+ break
+
+ async def _stream_complete(self, prompt, options):
+ args = self.collect_args(options)
+
+ async with self._client_session.post(
+ f"{SERVER_URL}/stream_complete",
+ json={"messages": [{"role": "user", "content": prompt}], **args},
+ headers=self.get_headers(),
+ proxy=self.proxy,
+ ) as resp:
+ if resp.status != 200:
+ raise Exception(await resp.text())
+
+ async for line in resp.content.iter_any():
+ if line:
+ decoded_line = line.decode("utf-8")
+ yield decoded_line