summaryrefslogtreecommitdiff
path: root/server/continuedev/libs/llm/text_gen_interface.py
diff options
context:
space:
mode:
authorNate Sesti <33237525+sestinj@users.noreply.github.com>2023-10-09 18:37:27 -0700
committerGitHub <noreply@github.com>2023-10-09 18:37:27 -0700
commitf09150617ed2454f3074bcf93f53aae5ae637d40 (patch)
tree5cfe614a64d921dfe58b049f426d67a8b832c71f /server/continuedev/libs/llm/text_gen_interface.py
parent985304a213f620cdff3f8f65f74ed7e3b79be29d (diff)
downloadsncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.gz
sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.bz2
sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.zip
Preview (#541)
* Strong typing (#533) * refactor: :recycle: get rid of continuedev.src.continuedev structure * refactor: :recycle: switching back to server folder * feat: :sparkles: make config.py imports shorter * feat: :bookmark: publish as pre-release vscode extension * refactor: :recycle: refactor and add more completion params to ui * build: :building_construction: download from preview S3 * fix: :bug: fix paths * fix: :green_heart: package:pre-release * ci: :green_heart: more time for tests * fix: :green_heart: fix build scripts * fix: :bug: fix import in run.py * fix: :bookmark: update version to try again * ci: 💚 Update package.json version [skip ci] * refactor: :fire: don't check for old extensions version * fix: :bug: small bug fixes * fix: :bug: fix config.py import paths * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: platform-specific builds test #1 * feat: :green_heart: ship with binary * fix: :green_heart: fix copy statement to include.exe for windows * fix: :green_heart: cd extension before packaging * chore: :loud_sound: count tokens generated * fix: :green_heart: remove npm_config_arch * fix: :green_heart: publish as pre-release! * chore: :bookmark: update version * perf: :green_heart: hardcode distro paths * fix: :bug: fix yaml syntax error * chore: :bookmark: update version * fix: :green_heart: update permissions and version * feat: :bug: kill old server if needed * feat: :lipstick: update marketplace icon for pre-release * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: auto-reload for config.py * feat: :wrench: update default config.py imports * feat: :sparkles: codelens in config.py * feat: :sparkles: select model param count from UI * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: more model options, ollama error handling * perf: :zap: don't show server loading immediately * fix: :bug: fixing small UI details * ci: 💚 Update package.json version [skip ci] * feat: :rocket: headers param on LLM class * fix: :bug: fix headers for openai.;y * feat: :sparkles: highlight code on cmd+shift+L * ci: 💚 Update package.json version [skip ci] * feat: :lipstick: sticky top bar in gui.tsx * fix: :loud_sound: websocket logging and horizontal scrollbar * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: allow AzureOpenAI Service through GGML * ci: 💚 Update package.json version [skip ci] * fix: :bug: fix automigration * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: upload binaries in ci, download apple silicon * chore: :fire: remove notes * fix: :green_heart: use curl to download binary * fix: :green_heart: set permissions on apple silicon binary * fix: :green_heart: testing * fix: :green_heart: cleanup file * fix: :green_heart: fix preview.yaml * fix: :green_heart: only upload once per binary * fix: :green_heart: install rosetta * ci: :green_heart: download binary after tests * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: prepare ci for merge to main --------- Co-authored-by: GitHub Action <action@github.com>
Diffstat (limited to 'server/continuedev/libs/llm/text_gen_interface.py')
-rw-r--r--server/continuedev/libs/llm/text_gen_interface.py114
1 files changed, 114 insertions, 0 deletions
diff --git a/server/continuedev/libs/llm/text_gen_interface.py b/server/continuedev/libs/llm/text_gen_interface.py
new file mode 100644
index 00000000..225fd3b6
--- /dev/null
+++ b/server/continuedev/libs/llm/text_gen_interface.py
@@ -0,0 +1,114 @@
+import json
+from typing import Any, Callable, Dict, List, Union
+
+import websockets
+from pydantic import Field
+
+from ...core.main import ChatMessage
+from .base import LLM
+from .prompts.chat import llama2_template_messages
+from .prompts.edit import simplest_edit_prompt
+
+
+class TextGenUI(LLM):
+ """
+ TextGenUI is a comprehensive, open-source language model UI and local server. You can set it up with an OpenAI-compatible server plugin, but if for some reason that doesn't work, you can use this class like so:
+
+ ```python title="~/.continue/config.py"
+ from continuedev.libs.llm.text_gen_interface import TextGenUI
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=TextGenUI(
+ model="<MODEL_NAME>",
+ )
+ )
+ )
+ ```
+ """
+
+ model: str = "text-gen-ui"
+ server_url: str = Field(
+ "http://localhost:5000", description="URL of your TextGenUI server"
+ )
+ streaming_url: str = Field(
+ "http://localhost:5005",
+ description="URL of your TextGenUI streaming server (separate from main server URL)",
+ )
+
+ prompt_templates = {
+ "edit": simplest_edit_prompt,
+ }
+
+ template_messages: Union[
+ Callable[[List[Dict[str, str]]], str], None
+ ] = llama2_template_messages
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def collect_args(self, options) -> Any:
+ args = super().collect_args(options)
+ args = {**args, "max_new_tokens": options.max_tokens}
+ args.pop("max_tokens", None)
+ return args
+
+ async def _stream_complete(self, prompt, options):
+ args = self.collect_args(options)
+
+ ws_url = f"{self.streaming_url.replace('http://', 'ws://').replace('https://', 'wss://')}"
+ payload = json.dumps({"prompt": prompt, "stream": True, **args})
+ async with websockets.connect(
+ f"{ws_url}/api/v1/stream", ping_interval=None
+ ) as websocket:
+ await websocket.send(payload)
+
+ while True:
+ incoming_data = await websocket.recv()
+ incoming_data = json.loads(incoming_data)
+
+ match incoming_data["event"]:
+ case "text_stream":
+ yield incoming_data["text"]
+ case "stream_end":
+ break
+
+ async def _stream_chat(self, messages: List[ChatMessage], options):
+ args = self.collect_args(options)
+
+ async def generator():
+ ws_url = f"{self.streaming_url.replace('http://', 'ws://').replace('https://', 'wss://')}"
+ history = list(map(lambda x: x["content"], messages))
+ payload = json.dumps(
+ {
+ "user_input": messages[-1]["content"],
+ "history": {"internal": [history], "visible": [history]},
+ "stream": True,
+ **args,
+ }
+ )
+ async with websockets.connect(
+ f"{ws_url}/api/v1/chat-stream", ping_interval=None
+ ) as websocket:
+ await websocket.send(payload)
+
+ prev = ""
+ while True:
+ incoming_data = await websocket.recv()
+ incoming_data = json.loads(incoming_data)
+
+ match incoming_data["event"]:
+ case "text_stream":
+ visible = incoming_data["history"]["visible"][-1]
+ if len(visible) > 0:
+ yield {
+ "role": "assistant",
+ "content": visible[-1].replace(prev, ""),
+ }
+ prev = visible[-1]
+ case "stream_end":
+ break
+
+ async for chunk in generator():
+ yield chunk