summaryrefslogtreecommitdiff
path: root/server/continuedev/libs/llm/llamacpp.py
diff options
context:
space:
mode:
authorNate Sesti <33237525+sestinj@users.noreply.github.com>2023-10-09 18:37:27 -0700
committerGitHub <noreply@github.com>2023-10-09 18:37:27 -0700
commitf09150617ed2454f3074bcf93f53aae5ae637d40 (patch)
tree5cfe614a64d921dfe58b049f426d67a8b832c71f /server/continuedev/libs/llm/llamacpp.py
parent985304a213f620cdff3f8f65f74ed7e3b79be29d (diff)
downloadsncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.gz
sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.bz2
sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.zip
Preview (#541)
* Strong typing (#533) * refactor: :recycle: get rid of continuedev.src.continuedev structure * refactor: :recycle: switching back to server folder * feat: :sparkles: make config.py imports shorter * feat: :bookmark: publish as pre-release vscode extension * refactor: :recycle: refactor and add more completion params to ui * build: :building_construction: download from preview S3 * fix: :bug: fix paths * fix: :green_heart: package:pre-release * ci: :green_heart: more time for tests * fix: :green_heart: fix build scripts * fix: :bug: fix import in run.py * fix: :bookmark: update version to try again * ci: 💚 Update package.json version [skip ci] * refactor: :fire: don't check for old extensions version * fix: :bug: small bug fixes * fix: :bug: fix config.py import paths * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: platform-specific builds test #1 * feat: :green_heart: ship with binary * fix: :green_heart: fix copy statement to include.exe for windows * fix: :green_heart: cd extension before packaging * chore: :loud_sound: count tokens generated * fix: :green_heart: remove npm_config_arch * fix: :green_heart: publish as pre-release! * chore: :bookmark: update version * perf: :green_heart: hardcode distro paths * fix: :bug: fix yaml syntax error * chore: :bookmark: update version * fix: :green_heart: update permissions and version * feat: :bug: kill old server if needed * feat: :lipstick: update marketplace icon for pre-release * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: auto-reload for config.py * feat: :wrench: update default config.py imports * feat: :sparkles: codelens in config.py * feat: :sparkles: select model param count from UI * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: more model options, ollama error handling * perf: :zap: don't show server loading immediately * fix: :bug: fixing small UI details * ci: 💚 Update package.json version [skip ci] * feat: :rocket: headers param on LLM class * fix: :bug: fix headers for openai.;y * feat: :sparkles: highlight code on cmd+shift+L * ci: 💚 Update package.json version [skip ci] * feat: :lipstick: sticky top bar in gui.tsx * fix: :loud_sound: websocket logging and horizontal scrollbar * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: allow AzureOpenAI Service through GGML * ci: 💚 Update package.json version [skip ci] * fix: :bug: fix automigration * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: upload binaries in ci, download apple silicon * chore: :fire: remove notes * fix: :green_heart: use curl to download binary * fix: :green_heart: set permissions on apple silicon binary * fix: :green_heart: testing * fix: :green_heart: cleanup file * fix: :green_heart: fix preview.yaml * fix: :green_heart: only upload once per binary * fix: :green_heart: install rosetta * ci: :green_heart: download binary after tests * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: prepare ci for merge to main --------- Co-authored-by: GitHub Action <action@github.com>
Diffstat (limited to 'server/continuedev/libs/llm/llamacpp.py')
-rw-r--r--server/continuedev/libs/llm/llamacpp.py86
1 files changed, 86 insertions, 0 deletions
diff --git a/server/continuedev/libs/llm/llamacpp.py b/server/continuedev/libs/llm/llamacpp.py
new file mode 100644
index 00000000..bc856a52
--- /dev/null
+++ b/server/continuedev/libs/llm/llamacpp.py
@@ -0,0 +1,86 @@
+import json
+from typing import Any, Callable, Dict
+
+from pydantic import Field
+
+from .base import LLM
+from .prompts.chat import llama2_template_messages
+from .prompts.edit import simplified_edit_prompt
+
+
+class LlamaCpp(LLM):
+ """
+ Run the llama.cpp server binary to start the API server. If running on a remote server, be sure to set host to 0.0.0.0:
+
+ ```shell
+ .\server.exe -c 4096 --host 0.0.0.0 -t 16 --mlock -m models\meta\llama\codellama-7b-instruct.Q8_0.gguf
+ ```
+
+ After it's up and running, change `~/.continue/config.py` to look like this:
+
+ ```python title="~/.continue/config.py"
+ from continuedev.libs.llm.llamacpp import LlamaCpp
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=LlamaCpp(
+ max_context_length=4096,
+ server_url="http://localhost:8080")
+ )
+ )
+ ```
+ """
+
+ model: str = "llamacpp"
+ server_url: str = Field("http://localhost:8080", description="URL of the server")
+
+ llama_cpp_args: Dict[str, Any] = Field(
+ {"stop": ["[INST]"]},
+ description="A list of additional arguments to pass to llama.cpp. See [here](https://github.com/ggerganov/llama.cpp/tree/master/examples/server#api-endpoints) for the complete catalog of options.",
+ )
+
+ template_messages: Callable = llama2_template_messages
+ prompt_templates = {
+ "edit": simplified_edit_prompt,
+ }
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def collect_args(self, options) -> Any:
+ args = super().collect_args(options)
+ if "max_tokens" in args:
+ args["n_predict"] = args["max_tokens"]
+ del args["max_tokens"]
+ if "frequency_penalty" in args:
+ del args["frequency_penalty"]
+ if "presence_penalty" in args:
+ del args["presence_penalty"]
+
+ for k, v in self.llama_cpp_args.items():
+ if k not in args:
+ args[k] = v
+
+ return args
+
+ async def _stream_complete(self, prompt, options):
+ args = self.collect_args(options)
+ headers = {"Content-Type": "application/json"}
+
+ async def server_generator():
+ async with self.create_client_session() as client_session:
+ async with client_session.post(
+ f"{self.server_url}/completion",
+ json={"prompt": prompt, "stream": True, **args},
+ headers=headers,
+ proxy=self.proxy,
+ ) as resp:
+ async for line in resp.content:
+ content = line.decode("utf-8")
+ if content.strip() == "":
+ continue
+ yield json.loads(content[6:])["content"]
+
+ async for chunk in server_generator():
+ yield chunk