summaryrefslogtreecommitdiff
path: root/docs
diff options
context:
space:
mode:
authorTy Dunn <ty@tydunn.com>2023-10-02 12:18:47 -0700
committerTy Dunn <ty@tydunn.com>2023-10-02 12:18:47 -0700
commit54950690e8f9a34c33e58a788938df61781b870e (patch)
tree41044ba5b1ee67c3ae98b325fac7e04f9d3ef3b0 /docs
parent82bafc9ffa0eabd2b96b90bf3d375f22d62dc16a (diff)
downloadsncontinue-54950690e8f9a34c33e58a788938df61781b870e.tar.gz
sncontinue-54950690e8f9a34c33e58a788938df61781b870e.tar.bz2
sncontinue-54950690e8f9a34c33e58a788938df61781b870e.zip
palm api docs
Diffstat (limited to 'docs')
-rw-r--r--docs/docs/customization/models.md1
-rw-r--r--docs/docs/reference/Models/googlepalmapi.md41
2 files changed, 42 insertions, 0 deletions
diff --git a/docs/docs/customization/models.md b/docs/docs/customization/models.md
index 7c5caee7..9d882fcb 100644
--- a/docs/docs/customization/models.md
+++ b/docs/docs/customization/models.md
@@ -7,6 +7,7 @@ Commercial Models
- [OpenAIFreeTrial](../reference/Models/openaifreetrial.md) (default) - Use gpt-4 or gpt-3.5-turbo free with our API key, or with your API key. gpt-4 is probably the most capable model of all options.
- [OpenAI](../reference/Models/openai.md) - Use any OpenAI model with your own key. Can also change the base URL if you have a server that uses the OpenAI API format, including using the Azure OpenAI service, LocalAI, etc.
- [AnthropicLLM](../reference/Models/anthropicllm.md) - Use claude-2 with your Anthropic API key. Claude 2 is also highly capable, and has a 100,000 token context window.
+- [GooglePaLMAP](../reference/Models/googlepalmapi.md) - Try out the `chat-bison-001` model, which is currently in public preview, after creating an API key in [Google MakerSuite](https://makersuite.google.com/u/2/app/apikey)
Local Models
diff --git a/docs/docs/reference/Models/googlepalmapi.md b/docs/docs/reference/Models/googlepalmapi.md
new file mode 100644
index 00000000..74bec3f3
--- /dev/null
+++ b/docs/docs/reference/Models/googlepalmapi.md
@@ -0,0 +1,41 @@
+import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx';
+
+# GooglePaLMAPI
+
+The Google PaLM API is currently in public preview, so production applications are not supported yet. However, you can [create an API key in Google MakerSuite](https://makersuite.google.com/u/2/app/apikey) and begin trying out the `chat-bison-001` model. Change `~/.continue/config.py` to look like this:
+
+```python
+from continuedev.src.continuedev.core.models import Models
+from continuedev.src.continuedev.libs.llm.hf_inference_api import GooglePaLMAPI
+
+config = ContinueConfig(
+ ...
+ models=Models(
+ default=GooglePaLMAPI(
+ model="chat-bison-001"
+ api_key="<MAKERSUITE_API_KEY>",
+ )
+)
+```
+
+[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/google_palm_api.py)
+
+## Properties
+
+
+
+### Inherited Properties
+
+<ClassPropertyRef name='api_key' details='{&quot;title&quot;: &quot;Api Key&quot;, &quot;description&quot;: &quot;Google PaLM API key&quot;, &quot;type&quot;: &quot;string&quot;}' required={true} default=""/>
+<ClassPropertyRef name='title' details='{&quot;title&quot;: &quot;Title&quot;, &quot;description&quot;: &quot;A title that will identify this model in the model selection dropdown&quot;, &quot;type&quot;: &quot;string&quot;}' required={false} default=""/>
+<ClassPropertyRef name='system_message' details='{&quot;title&quot;: &quot;System Message&quot;, &quot;description&quot;: &quot;A system message that will always be followed by the LLM&quot;, &quot;type&quot;: &quot;string&quot;}' required={false} default=""/>
+<ClassPropertyRef name='context_length' details='{&quot;title&quot;: &quot;Context Length&quot;, &quot;description&quot;: &quot;The maximum context length of the LLM in tokens, as counted by count_tokens.&quot;, &quot;default&quot;: 2048, &quot;type&quot;: &quot;integer&quot;}' required={false} default="2048"/>
+<ClassPropertyRef name='unique_id' details='{&quot;title&quot;: &quot;Unique Id&quot;, &quot;description&quot;: &quot;The unique ID of the user.&quot;, &quot;type&quot;: &quot;string&quot;}' required={false} default=""/>
+<ClassPropertyRef name='model' details='{&quot;title&quot;: &quot;Model&quot;, &quot;description&quot;: &quot;The name of the model to be used (e.g. gpt-4, codellama)&quot;, &quot;default&quot;: &quot;chat-bison-001&quot;, &quot;type&quot;: &quot;string&quot;}' required={false} default="chat-bison-001"/>
+<ClassPropertyRef name='max_tokens' details='{&quot;title&quot;: &quot;Max Tokens&quot;, &quot;description&quot;: &quot;The maximum number of tokens to generate.&quot;, &quot;default&quot;: 1024, &quot;type&quot;: &quot;integer&quot;}' required={false} default="1024"/>
+<ClassPropertyRef name='stop_tokens' details='{&quot;title&quot;: &quot;Stop Tokens&quot;, &quot;description&quot;: &quot;Tokens that will stop the completion.&quot;, &quot;type&quot;: &quot;array&quot;, &quot;items&quot;: {&quot;type&quot;: &quot;string&quot;}}' required={false} default=""/>
+<ClassPropertyRef name='timeout' details='{&quot;title&quot;: &quot;Timeout&quot;, &quot;description&quot;: &quot;Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.&quot;, &quot;default&quot;: 300, &quot;type&quot;: &quot;integer&quot;}' required={false} default="300"/>
+<ClassPropertyRef name='verify_ssl' details='{&quot;title&quot;: &quot;Verify Ssl&quot;, &quot;description&quot;: &quot;Whether to verify SSL certificates for requests.&quot;, &quot;type&quot;: &quot;boolean&quot;}' required={false} default=""/>
+<ClassPropertyRef name='ca_bundle_path' details='{&quot;title&quot;: &quot;Ca Bundle Path&quot;, &quot;description&quot;: &quot;Path to a custom CA bundle to use when making the HTTP request&quot;, &quot;type&quot;: &quot;string&quot;}' required={false} default=""/>
+<ClassPropertyRef name='proxy' details='{&quot;title&quot;: &quot;Proxy&quot;, &quot;description&quot;: &quot;Proxy URL to use when making the HTTP request&quot;, &quot;type&quot;: &quot;string&quot;}' required={false} default=""/>
+<ClassPropertyRef name='prompt_templates' details='{&quot;title&quot;: &quot;Prompt Templates&quot;, &quot;description&quot;: &quot;A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \&quot;edit\&quot; key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.&quot;, &quot;default&quot;: {}, &quot;type&quot;: &quot;object&quot;}' required={false} default="{}"/>