From f09150617ed2454f3074bcf93f53aae5ae637d40 Mon Sep 17 00:00:00 2001 From: Nate Sesti <33237525+sestinj@users.noreply.github.com> Date: Mon, 9 Oct 2023 18:37:27 -0700 Subject: Preview (#541) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Strong typing (#533) * refactor: :recycle: get rid of continuedev.src.continuedev structure * refactor: :recycle: switching back to server folder * feat: :sparkles: make config.py imports shorter * feat: :bookmark: publish as pre-release vscode extension * refactor: :recycle: refactor and add more completion params to ui * build: :building_construction: download from preview S3 * fix: :bug: fix paths * fix: :green_heart: package:pre-release * ci: :green_heart: more time for tests * fix: :green_heart: fix build scripts * fix: :bug: fix import in run.py * fix: :bookmark: update version to try again * ci: ๐Ÿ’š Update package.json version [skip ci] * refactor: :fire: don't check for old extensions version * fix: :bug: small bug fixes * fix: :bug: fix config.py import paths * ci: ๐Ÿ’š Update package.json version [skip ci] * ci: :green_heart: platform-specific builds test #1 * feat: :green_heart: ship with binary * fix: :green_heart: fix copy statement to include.exe for windows * fix: :green_heart: cd extension before packaging * chore: :loud_sound: count tokens generated * fix: :green_heart: remove npm_config_arch * fix: :green_heart: publish as pre-release! * chore: :bookmark: update version * perf: :green_heart: hardcode distro paths * fix: :bug: fix yaml syntax error * chore: :bookmark: update version * fix: :green_heart: update permissions and version * feat: :bug: kill old server if needed * feat: :lipstick: update marketplace icon for pre-release * ci: ๐Ÿ’š Update package.json version [skip ci] * feat: :sparkles: auto-reload for config.py * feat: :wrench: update default config.py imports * feat: :sparkles: codelens in config.py * feat: :sparkles: select model param count from UI * ci: ๐Ÿ’š Update package.json version [skip ci] * feat: :sparkles: more model options, ollama error handling * perf: :zap: don't show server loading immediately * fix: :bug: fixing small UI details * ci: ๐Ÿ’š Update package.json version [skip ci] * feat: :rocket: headers param on LLM class * fix: :bug: fix headers for openai.;y * feat: :sparkles: highlight code on cmd+shift+L * ci: ๐Ÿ’š Update package.json version [skip ci] * feat: :lipstick: sticky top bar in gui.tsx * fix: :loud_sound: websocket logging and horizontal scrollbar * ci: ๐Ÿ’š Update package.json version [skip ci] * feat: :sparkles: allow AzureOpenAI Service through GGML * ci: ๐Ÿ’š Update package.json version [skip ci] * fix: :bug: fix automigration * ci: ๐Ÿ’š Update package.json version [skip ci] * ci: :green_heart: upload binaries in ci, download apple silicon * chore: :fire: remove notes * fix: :green_heart: use curl to download binary * fix: :green_heart: set permissions on apple silicon binary * fix: :green_heart: testing * fix: :green_heart: cleanup file * fix: :green_heart: fix preview.yaml * fix: :green_heart: only upload once per binary * fix: :green_heart: install rosetta * ci: :green_heart: download binary after tests * ci: ๐Ÿ’š Update package.json version [skip ci] * ci: :green_heart: prepare ci for merge to main --------- Co-authored-by: GitHub Action --- server/continuedev/libs/llm/hf_inference_api.py | 78 +++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 server/continuedev/libs/llm/hf_inference_api.py (limited to 'server/continuedev/libs/llm/hf_inference_api.py') diff --git a/server/continuedev/libs/llm/hf_inference_api.py b/server/continuedev/libs/llm/hf_inference_api.py new file mode 100644 index 00000000..990ec7c8 --- /dev/null +++ b/server/continuedev/libs/llm/hf_inference_api.py @@ -0,0 +1,78 @@ +from typing import Callable, Dict, List, Union + +from huggingface_hub import InferenceClient +from pydantic import Field + +from .base import LLM, CompletionOptions +from .prompts.chat import llama2_template_messages +from .prompts.edit import simplified_edit_prompt + + +class HuggingFaceInferenceAPI(LLM): + """ + Hugging Face Inference API is a great option for newly released language models. Sign up for an account and add billing [here](https://huggingface.co/settings/billing), access the Inference Endpoints [here](https://ui.endpoints.huggingface.co), click on โ€œNew endpointโ€, and fill out the form (e.g. select a model like [WizardCoder-Python-34B-V1.0](https://huggingface.co/WizardLM/WizardCoder-Python-34B-V1.0)), and then deploy your model by clicking โ€œCreate Endpointโ€. Change `~/.continue/config.py` to look like this: + + ```python title="~/.continue/config.py" + from continuedev.core.models import Models + from continuedev.libs.llm.hf_inference_api import HuggingFaceInferenceAPI + + config = ContinueConfig( + ... + models=Models( + default=HuggingFaceInferenceAPI( + endpoint_url="", + hf_token="", + ) + ) + ``` + """ + + model: str = Field( + "Hugging Face Inference API", + description="The name of the model to use (optional for the HuggingFaceInferenceAPI class)", + ) + hf_token: str = Field(..., description="Your Hugging Face API token") + endpoint_url: str = Field( + None, description="Your Hugging Face Inference API endpoint URL" + ) + + template_messages: Union[ + Callable[[List[Dict[str, str]]], str], None + ] = llama2_template_messages + + prompt_templates = { + "edit": simplified_edit_prompt, + } + + class Config: + arbitrary_types_allowed = True + + def collect_args(self, options: CompletionOptions): + options.stop = None + args = super().collect_args(options) + + if "max_tokens" in args: + args["max_new_tokens"] = args["max_tokens"] + del args["max_tokens"] + if "stop" in args: + args["stop_sequences"] = args["stop"] + del args["stop"] + + return args + + async def _stream_complete(self, prompt, options): + args = self.collect_args(options) + + client = InferenceClient(self.endpoint_url, token=self.hf_token) + + stream = client.text_generation(prompt, stream=True, details=True, **args) + + for r in stream: + # skip special tokens + if r.token.special: + continue + # stop if we encounter a stop sequence + if options.stop is not None: + if r.token.text in options.stop: + break + yield r.token.text -- cgit v1.2.3-70-g09d2