summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTy Dunn <ty@tydunn.com>2023-09-30 12:35:46 -0700
committerGitHub <noreply@github.com>2023-09-30 12:35:46 -0700
commitcc396503cb0322dda2cf97a13737bb346ff8d8d7 (patch)
treef01c74b33ef831a022906abf9393353ff9fa871e
parent87e4a6289c3cbb54d4c227b90640ae77aa7cebc1 (diff)
downloadsncontinue-cc396503cb0322dda2cf97a13737bb346ff8d8d7.tar.gz
sncontinue-cc396503cb0322dda2cf97a13737bb346ff8d8d7.tar.bz2
sncontinue-cc396503cb0322dda2cf97a13737bb346ff8d8d7.zip
adding support for the google palm api (#524)
* adding support for palm api * docs: :bug: update modelData to new format --------- Co-authored-by: Nate Sesti <sestinj@gmail.com>
-rw-r--r--continuedev/src/continuedev/core/models.py3
-rw-r--r--continuedev/src/continuedev/libs/llm/google_palm_api.py48
-rw-r--r--continuedev/src/continuedev/plugins/steps/setup_model.py1
-rw-r--r--extension/react-app/public/logos/google-palm.pngbin0 -> 2608 bytes
-rw-r--r--extension/react-app/src/components/ModelSelect.tsx8
-rw-r--r--extension/react-app/src/util/modelData.ts30
6 files changed, 90 insertions, 0 deletions
diff --git a/continuedev/src/continuedev/core/models.py b/continuedev/src/continuedev/core/models.py
index 2396a0db..9b8d26d5 100644
--- a/continuedev/src/continuedev/core/models.py
+++ b/continuedev/src/continuedev/core/models.py
@@ -5,6 +5,7 @@ from pydantic import BaseModel
from ..libs.llm import LLM
from ..libs.llm.anthropic import AnthropicLLM
from ..libs.llm.ggml import GGML
+from ..libs.llm.google_palm_api import GooglePaLMAPI
from ..libs.llm.hf_inference_api import HuggingFaceInferenceAPI
from ..libs.llm.hf_tgi import HuggingFaceTGI
from ..libs.llm.llamacpp import LlamaCpp
@@ -39,6 +40,7 @@ MODEL_CLASSES = {
LlamaCpp,
HuggingFaceInferenceAPI,
HuggingFaceTGI,
+ GooglePaLMAPI,
]
}
@@ -53,6 +55,7 @@ MODEL_MODULE_NAMES = {
"LlamaCpp": "llamacpp",
"HuggingFaceInferenceAPI": "hf_inference_api",
"HuggingFaceTGI": "hf_tgi",
+ "GooglePaLMAPI": "google_palm_api",
}
diff --git a/continuedev/src/continuedev/libs/llm/google_palm_api.py b/continuedev/src/continuedev/libs/llm/google_palm_api.py
new file mode 100644
index 00000000..e369caf6
--- /dev/null
+++ b/continuedev/src/continuedev/libs/llm/google_palm_api.py
@@ -0,0 +1,48 @@
+from typing import Callable, Dict, List, Union
+
+from ...core.main import ChatMessage
+from ..llm import LLM
+from pydantic import Field
+import requests
+
+
+class GooglePaLMAPI(LLM):
+ """
+ The Google PaLM API is currently in public preview, so production applications are not supported yet. However, you can [create an API key in Google MakerSuite](https://makersuite.google.com/u/2/app/apikey) and begin trying out the `chat-bison-001` model. Change `~/.continue/config.py` to look like this:
+
+ ```python
+ from continuedev.src.continuedev.core.models import Models
+ from continuedev.src.continuedev.libs.llm.hf_inference_api import GooglePaLMAPI
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=GooglePaLMAPI(
+ model="chat-bison-001"
+ api_key="<MAKERSUITE_API_KEY>",
+ )
+ )
+ ```
+ """
+
+ api_key: str = Field(..., description="Google PaLM API key")
+
+ model: str = "chat-bison-001"
+
+ async def _stream_complete(self, prompt, options):
+
+ api_url = f"https://generativelanguage.googleapis.com/v1beta2/models/{self.model}:generateMessage?key={self.api_key}"
+ body = { "prompt": { "messages": [{ "content": prompt}] } }
+ response = requests.post(api_url, json=body)
+ yield response.json()["candidates"][0]["content"]
+
+ async def _stream_chat(self, messages: List[ChatMessage], options):
+
+ msg_lst = []
+ for message in messages:
+ msg_lst.append({ "content": message["content"] })
+
+ api_url = f"https://generativelanguage.googleapis.com/v1beta2/models/{self.model}:generateMessage?key={self.api_key}"
+ body = { "prompt": { "messages": msg_lst } }
+ response = requests.post(api_url, json=body)
+ yield {"content": response.json()["candidates"][0]["content"], "role": "assistant"} \ No newline at end of file
diff --git a/continuedev/src/continuedev/plugins/steps/setup_model.py b/continuedev/src/continuedev/plugins/steps/setup_model.py
index f29bd51e..87e52f1b 100644
--- a/continuedev/src/continuedev/plugins/steps/setup_model.py
+++ b/continuedev/src/continuedev/plugins/steps/setup_model.py
@@ -14,6 +14,7 @@ MODEL_CLASS_TO_MESSAGE = {
"TogetherLLM": "To get started using models from Together, first obtain your Together API key from [here](https://together.ai). Paste it into the `api_key` field at config.models.default.api_key in `config.py`. Then, on their models page, press 'start' on the model of your choice and make sure the `model=` parameter in the config file for the `TogetherLLM` class reflects the name of this model. Finally, reload the VS Code window for changes to take effect.",
"LlamaCpp": "To get started with this model, clone the [`llama.cpp` repo](https://github.com/ggerganov/llama.cpp) and follow the instructions to set up the server [here](https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md#build). Any of the parameters described in the README can be passed to the `llama_cpp_args` field in the `LlamaCpp` class in `config.py`.",
"HuggingFaceInferenceAPI": "To get started with the HuggingFace Inference API, first deploy a model and obtain your API key from [here](https://huggingface.co/inference-api). Paste it into the `hf_token` field at config.models.default.hf_token in `config.py`. Finally, reload the VS Code window for changes to take effect.",
+ "GooglePaLMAPI": "To get started with the Google PaLM API, create an API key in Makersuite [here](https://makersuite.google.com/u/2/app/apikey), then paste it into the `api_key` field at config.models.default.api_key in `config.py`.",
}
diff --git a/extension/react-app/public/logos/google-palm.png b/extension/react-app/public/logos/google-palm.png
new file mode 100644
index 00000000..0f9dc345
--- /dev/null
+++ b/extension/react-app/public/logos/google-palm.png
Binary files differ
diff --git a/extension/react-app/src/components/ModelSelect.tsx b/extension/react-app/src/components/ModelSelect.tsx
index 9dd3f489..6856a2cf 100644
--- a/extension/react-app/src/components/ModelSelect.tsx
+++ b/extension/react-app/src/components/ModelSelect.tsx
@@ -69,6 +69,14 @@ const MODEL_INFO: { title: string; class: string; args: any }[] = [
},
},
{
+ title: "Google PaLM API",
+ class: "GooglePaLMAPI",
+ args: {
+ model: "chat-bison-001",
+ api_key: "<MAKERSUITE_API_KEY>",
+ },
+ },
+ {
title: "LM Studio",
class: "GGML",
args: {
diff --git a/extension/react-app/src/util/modelData.ts b/extension/react-app/src/util/modelData.ts
index 615cbb79..91259446 100644
--- a/extension/react-app/src/util/modelData.ts
+++ b/extension/react-app/src/util/modelData.ts
@@ -387,6 +387,36 @@ After it's up and running, you can start using Continue.`,
packages: [llama2FamilyPackage],
collectInputFor: [contextLengthInput],
},
+ palm: {
+ title: "Google PaLM API",
+ class: "GooglePaLMAPI",
+ description:
+ "Try out the Google PaLM API, which is currently in public preview, using an API key from Google Makersuite",
+ longDescription: `To get started with Google Makersuite, obtain your API key from [here](https://developers.generativeai.google/products/makersuite) and paste it below.
+> Note: Google's PaLM language models do not support streaming, so the response will appear all at once after a few seconds.`,
+ icon: "google-palm.png",
+ tags: [ModelProviderTag["Requires API Key"]],
+ collectInputFor: [
+ {
+ inputType: CollectInputType.text,
+ key: "api_key",
+ label: "API Key",
+ placeholder: "Enter your MakerSpace API key",
+ required: true,
+ },
+ ],
+ packages: [
+ {
+ title: "chat-bison-001",
+ description:
+ "Google PaLM's chat-bison-001 model, fine-tuned for chatting about code",
+ params: {
+ model: "chat-bison-001",
+ context_length: 8000,
+ },
+ },
+ ],
+ },
hftgi: {
title: "HuggingFace TGI",
class: "HuggingFaceTGI",