summaryrefslogtreecommitdiff
path: root/continuedev/src
diff options
context:
space:
mode:
Diffstat (limited to 'continuedev/src')
-rw-r--r--continuedev/src/continuedev/core/models.py3
-rw-r--r--continuedev/src/continuedev/libs/llm/google_palm_api.py48
-rw-r--r--continuedev/src/continuedev/plugins/steps/setup_model.py1
3 files changed, 52 insertions, 0 deletions
diff --git a/continuedev/src/continuedev/core/models.py b/continuedev/src/continuedev/core/models.py
index 2396a0db..9b8d26d5 100644
--- a/continuedev/src/continuedev/core/models.py
+++ b/continuedev/src/continuedev/core/models.py
@@ -5,6 +5,7 @@ from pydantic import BaseModel
from ..libs.llm import LLM
from ..libs.llm.anthropic import AnthropicLLM
from ..libs.llm.ggml import GGML
+from ..libs.llm.google_palm_api import GooglePaLMAPI
from ..libs.llm.hf_inference_api import HuggingFaceInferenceAPI
from ..libs.llm.hf_tgi import HuggingFaceTGI
from ..libs.llm.llamacpp import LlamaCpp
@@ -39,6 +40,7 @@ MODEL_CLASSES = {
LlamaCpp,
HuggingFaceInferenceAPI,
HuggingFaceTGI,
+ GooglePaLMAPI,
]
}
@@ -53,6 +55,7 @@ MODEL_MODULE_NAMES = {
"LlamaCpp": "llamacpp",
"HuggingFaceInferenceAPI": "hf_inference_api",
"HuggingFaceTGI": "hf_tgi",
+ "GooglePaLMAPI": "google_palm_api",
}
diff --git a/continuedev/src/continuedev/libs/llm/google_palm_api.py b/continuedev/src/continuedev/libs/llm/google_palm_api.py
new file mode 100644
index 00000000..e369caf6
--- /dev/null
+++ b/continuedev/src/continuedev/libs/llm/google_palm_api.py
@@ -0,0 +1,48 @@
+from typing import Callable, Dict, List, Union
+
+from ...core.main import ChatMessage
+from ..llm import LLM
+from pydantic import Field
+import requests
+
+
+class GooglePaLMAPI(LLM):
+ """
+ The Google PaLM API is currently in public preview, so production applications are not supported yet. However, you can [create an API key in Google MakerSuite](https://makersuite.google.com/u/2/app/apikey) and begin trying out the `chat-bison-001` model. Change `~/.continue/config.py` to look like this:
+
+ ```python
+ from continuedev.src.continuedev.core.models import Models
+ from continuedev.src.continuedev.libs.llm.hf_inference_api import GooglePaLMAPI
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=GooglePaLMAPI(
+ model="chat-bison-001"
+ api_key="<MAKERSUITE_API_KEY>",
+ )
+ )
+ ```
+ """
+
+ api_key: str = Field(..., description="Google PaLM API key")
+
+ model: str = "chat-bison-001"
+
+ async def _stream_complete(self, prompt, options):
+
+ api_url = f"https://generativelanguage.googleapis.com/v1beta2/models/{self.model}:generateMessage?key={self.api_key}"
+ body = { "prompt": { "messages": [{ "content": prompt}] } }
+ response = requests.post(api_url, json=body)
+ yield response.json()["candidates"][0]["content"]
+
+ async def _stream_chat(self, messages: List[ChatMessage], options):
+
+ msg_lst = []
+ for message in messages:
+ msg_lst.append({ "content": message["content"] })
+
+ api_url = f"https://generativelanguage.googleapis.com/v1beta2/models/{self.model}:generateMessage?key={self.api_key}"
+ body = { "prompt": { "messages": msg_lst } }
+ response = requests.post(api_url, json=body)
+ yield {"content": response.json()["candidates"][0]["content"], "role": "assistant"} \ No newline at end of file
diff --git a/continuedev/src/continuedev/plugins/steps/setup_model.py b/continuedev/src/continuedev/plugins/steps/setup_model.py
index f29bd51e..87e52f1b 100644
--- a/continuedev/src/continuedev/plugins/steps/setup_model.py
+++ b/continuedev/src/continuedev/plugins/steps/setup_model.py
@@ -14,6 +14,7 @@ MODEL_CLASS_TO_MESSAGE = {
"TogetherLLM": "To get started using models from Together, first obtain your Together API key from [here](https://together.ai). Paste it into the `api_key` field at config.models.default.api_key in `config.py`. Then, on their models page, press 'start' on the model of your choice and make sure the `model=` parameter in the config file for the `TogetherLLM` class reflects the name of this model. Finally, reload the VS Code window for changes to take effect.",
"LlamaCpp": "To get started with this model, clone the [`llama.cpp` repo](https://github.com/ggerganov/llama.cpp) and follow the instructions to set up the server [here](https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md#build). Any of the parameters described in the README can be passed to the `llama_cpp_args` field in the `LlamaCpp` class in `config.py`.",
"HuggingFaceInferenceAPI": "To get started with the HuggingFace Inference API, first deploy a model and obtain your API key from [here](https://huggingface.co/inference-api). Paste it into the `hf_token` field at config.models.default.hf_token in `config.py`. Finally, reload the VS Code window for changes to take effect.",
+ "GooglePaLMAPI": "To get started with the Google PaLM API, create an API key in Makersuite [here](https://makersuite.google.com/u/2/app/apikey), then paste it into the `api_key` field at config.models.default.api_key in `config.py`.",
}