summaryrefslogtreecommitdiff
path: root/continuedev
diff options
context:
space:
mode:
Diffstat (limited to 'continuedev')
-rw-r--r--continuedev/src/continuedev/libs/llm/anthropic.py2
-rw-r--r--continuedev/src/continuedev/libs/llm/ggml.py2
-rw-r--r--continuedev/src/continuedev/libs/llm/google_palm_api.py30
-rw-r--r--continuedev/src/continuedev/libs/llm/hf_inference_api.py2
-rw-r--r--continuedev/src/continuedev/libs/llm/llamacpp.py2
-rw-r--r--continuedev/src/continuedev/libs/llm/ollama.py2
-rw-r--r--continuedev/src/continuedev/libs/llm/openai.py2
-rw-r--r--continuedev/src/continuedev/libs/llm/openai_free_trial.py2
-rw-r--r--continuedev/src/continuedev/libs/llm/queued.py2
-rw-r--r--continuedev/src/continuedev/libs/llm/replicate.py2
-rw-r--r--continuedev/src/continuedev/libs/llm/text_gen_interface.py2
-rw-r--r--continuedev/src/continuedev/libs/llm/together.py2
-rw-r--r--continuedev/src/continuedev/models/reference/generate.py (renamed from continuedev/src/continuedev/models/reference/test.py)0
13 files changed, 27 insertions, 25 deletions
diff --git a/continuedev/src/continuedev/libs/llm/anthropic.py b/continuedev/src/continuedev/libs/llm/anthropic.py
index d3b773e4..2430e786 100644
--- a/continuedev/src/continuedev/libs/llm/anthropic.py
+++ b/continuedev/src/continuedev/libs/llm/anthropic.py
@@ -10,7 +10,7 @@ class AnthropicLLM(LLM):
"""
Import the `AnthropicLLM` class and set it as the default model:
- ```python
+ ```python title="~/.continue/config.py"
from continuedev.src.continuedev.libs.llm.anthropic import AnthropicLLM
config = ContinueConfig(
diff --git a/continuedev/src/continuedev/libs/llm/ggml.py b/continuedev/src/continuedev/libs/llm/ggml.py
index 27a55dfe..ae185b28 100644
--- a/continuedev/src/continuedev/libs/llm/ggml.py
+++ b/continuedev/src/continuedev/libs/llm/ggml.py
@@ -18,7 +18,7 @@ class GGML(LLM):
Once the model is running on localhost:8000, change `~/.continue/config.py` to look like this:
- ```python
+ ```python title="~/.continue/config.py"
from continuedev.src.continuedev.libs.llm.ggml import GGML
config = ContinueConfig(
diff --git a/continuedev/src/continuedev/libs/llm/google_palm_api.py b/continuedev/src/continuedev/libs/llm/google_palm_api.py
index e369caf6..8c0b30d1 100644
--- a/continuedev/src/continuedev/libs/llm/google_palm_api.py
+++ b/continuedev/src/continuedev/libs/llm/google_palm_api.py
@@ -1,16 +1,17 @@
-from typing import Callable, Dict, List, Union
+from typing import List
+
+import requests
+from pydantic import Field
from ...core.main import ChatMessage
from ..llm import LLM
-from pydantic import Field
-import requests
class GooglePaLMAPI(LLM):
"""
The Google PaLM API is currently in public preview, so production applications are not supported yet. However, you can [create an API key in Google MakerSuite](https://makersuite.google.com/u/2/app/apikey) and begin trying out the `chat-bison-001` model. Change `~/.continue/config.py` to look like this:
- ```python
+ ```python title="~/.continue/config.py"
from continuedev.src.continuedev.core.models import Models
from continuedev.src.continuedev.libs.llm.hf_inference_api import GooglePaLMAPI
@@ -28,21 +29,22 @@ class GooglePaLMAPI(LLM):
api_key: str = Field(..., description="Google PaLM API key")
model: str = "chat-bison-001"
-
- async def _stream_complete(self, prompt, options):
- api_url = f"https://generativelanguage.googleapis.com/v1beta2/models/{self.model}:generateMessage?key={self.api_key}"
- body = { "prompt": { "messages": [{ "content": prompt}] } }
+ async def _stream_complete(self, prompt, options):
+ api_url = f"https://generativelanguage.googleapis.com/v1beta2/models/{self.model}:generateMessage?key={self.api_key}"
+ body = {"prompt": {"messages": [{"content": prompt}]}}
response = requests.post(api_url, json=body)
yield response.json()["candidates"][0]["content"]
-
- async def _stream_chat(self, messages: List[ChatMessage], options):
+ async def _stream_chat(self, messages: List[ChatMessage], options):
msg_lst = []
for message in messages:
- msg_lst.append({ "content": message["content"] })
+ msg_lst.append({"content": message["content"]})
- api_url = f"https://generativelanguage.googleapis.com/v1beta2/models/{self.model}:generateMessage?key={self.api_key}"
- body = { "prompt": { "messages": msg_lst } }
+ api_url = f"https://generativelanguage.googleapis.com/v1beta2/models/{self.model}:generateMessage?key={self.api_key}"
+ body = {"prompt": {"messages": msg_lst}}
response = requests.post(api_url, json=body)
- yield {"content": response.json()["candidates"][0]["content"], "role": "assistant"} \ No newline at end of file
+ yield {
+ "content": response.json()["candidates"][0]["content"],
+ "role": "assistant",
+ }
diff --git a/continuedev/src/continuedev/libs/llm/hf_inference_api.py b/continuedev/src/continuedev/libs/llm/hf_inference_api.py
index ab1482e8..1d41b3a3 100644
--- a/continuedev/src/continuedev/libs/llm/hf_inference_api.py
+++ b/continuedev/src/continuedev/libs/llm/hf_inference_api.py
@@ -12,7 +12,7 @@ class HuggingFaceInferenceAPI(LLM):
"""
Hugging Face Inference API is a great option for newly released language models. Sign up for an account and add billing [here](https://huggingface.co/settings/billing), access the Inference Endpoints [here](https://ui.endpoints.huggingface.co), click on “New endpoint”, and fill out the form (e.g. select a model like [WizardCoder-Python-34B-V1.0](https://huggingface.co/WizardLM/WizardCoder-Python-34B-V1.0)), and then deploy your model by clicking “Create Endpoint”. Change `~/.continue/config.py` to look like this:
- ```python
+ ```python title="~/.continue/config.py"
from continuedev.src.continuedev.core.models import Models
from continuedev.src.continuedev.libs.llm.hf_inference_api import HuggingFaceInferenceAPI
diff --git a/continuedev/src/continuedev/libs/llm/llamacpp.py b/continuedev/src/continuedev/libs/llm/llamacpp.py
index 0b4c9fb0..c7144745 100644
--- a/continuedev/src/continuedev/libs/llm/llamacpp.py
+++ b/continuedev/src/continuedev/libs/llm/llamacpp.py
@@ -18,7 +18,7 @@ class LlamaCpp(LLM):
After it's up and running, change `~/.continue/config.py` to look like this:
- ```python
+ ```python title="~/.continue/config.py"
from continuedev.src.continuedev.libs.llm.llamacpp import LlamaCpp
config = ContinueConfig(
diff --git a/continuedev/src/continuedev/libs/llm/ollama.py b/continuedev/src/continuedev/libs/llm/ollama.py
index ee6ab540..e9b421d5 100644
--- a/continuedev/src/continuedev/libs/llm/ollama.py
+++ b/continuedev/src/continuedev/libs/llm/ollama.py
@@ -14,7 +14,7 @@ class Ollama(LLM):
"""
[Ollama](https://ollama.ai/) is an application for Mac and Linux that makes it easy to locally run open-source models, including Llama-2. Download the app from the website, and it will walk you through setup in a couple of minutes. You can also read more in their [README](https://github.com/jmorganca/ollama). Continue can then be configured to use the `Ollama` LLM class:
- ```python
+ ```python title="~/.continue/config.py"
from continuedev.src.continuedev.libs.llm.ollama import Ollama
config = ContinueConfig(
diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py
index d9e74cec..db3c9852 100644
--- a/continuedev/src/continuedev/libs/llm/openai.py
+++ b/continuedev/src/continuedev/libs/llm/openai.py
@@ -32,7 +32,7 @@ class OpenAI(LLM):
If you are locally serving a model that uses an OpenAI-compatible server, you can simply change the `api_base` in the `OpenAI` class like this:
- ```python
+ ```python title="~/.continue/config.py"
from continuedev.src.continuedev.libs.llm.openai import OpenAI
config = ContinueConfig(
diff --git a/continuedev/src/continuedev/libs/llm/openai_free_trial.py b/continuedev/src/continuedev/libs/llm/openai_free_trial.py
index 367f2bbd..6d96f8bb 100644
--- a/continuedev/src/continuedev/libs/llm/openai_free_trial.py
+++ b/continuedev/src/continuedev/libs/llm/openai_free_trial.py
@@ -16,7 +16,7 @@ class OpenAIFreeTrial(LLM):
2. Open `~/.continue/config.py`. You can do this by using the '/config' command in Continue
3. Change the default LLMs to look like this:
- ```python
+ ```python title="~/.continue/config.py"
API_KEY = "<API_KEY>"
config = ContinueConfig(
...
diff --git a/continuedev/src/continuedev/libs/llm/queued.py b/continuedev/src/continuedev/libs/llm/queued.py
index bbaadde6..785c5dc0 100644
--- a/continuedev/src/continuedev/libs/llm/queued.py
+++ b/continuedev/src/continuedev/libs/llm/queued.py
@@ -13,7 +13,7 @@ class QueuedLLM(LLM):
If you are already using another LLM class and are experiencing this problem, you can just wrap it with the QueuedLLM class like this:
- ```python
+ ```python title="~/.continue/config.py"
from continuedev.src.continuedev.libs.llm.queued import QueuedLLM
config = ContinueConfig(
diff --git a/continuedev/src/continuedev/libs/llm/replicate.py b/continuedev/src/continuedev/libs/llm/replicate.py
index 02d9bfd7..86840572 100644
--- a/continuedev/src/continuedev/libs/llm/replicate.py
+++ b/continuedev/src/continuedev/libs/llm/replicate.py
@@ -13,7 +13,7 @@ class ReplicateLLM(LLM):
"""
Replicate is a great option for newly released language models or models that you've deployed through their platform. Sign up for an account [here](https://replicate.ai/), copy your API key, and then select any model from the [Replicate Streaming List](https://replicate.com/collections/streaming-language-models). Change `~/.continue/config.py` to look like this:
- ```python
+ ```python title="~/.continue/config.py"
from continuedev.src.continuedev.core.models import Models
from continuedev.src.continuedev.libs.llm.replicate import ReplicateLLM
diff --git a/continuedev/src/continuedev/libs/llm/text_gen_interface.py b/continuedev/src/continuedev/libs/llm/text_gen_interface.py
index 1ff9feb7..f726f516 100644
--- a/continuedev/src/continuedev/libs/llm/text_gen_interface.py
+++ b/continuedev/src/continuedev/libs/llm/text_gen_interface.py
@@ -14,7 +14,7 @@ class TextGenUI(LLM):
"""
TextGenUI is a comprehensive, open-source language model UI and local server. You can set it up with an OpenAI-compatible server plugin, but if for some reason that doesn't work, you can use this class like so:
- ```python
+ ```python title="~/.continue/config.py"
from continuedev.src.continuedev.libs.llm.text_gen_interface import TextGenUI
config = ContinueConfig(
diff --git a/continuedev/src/continuedev/libs/llm/together.py b/continuedev/src/continuedev/libs/llm/together.py
index b679351c..0274bb14 100644
--- a/continuedev/src/continuedev/libs/llm/together.py
+++ b/continuedev/src/continuedev/libs/llm/together.py
@@ -15,7 +15,7 @@ class TogetherLLM(LLM):
"""
The Together API is a cloud platform for running large AI models. You can sign up [here](https://api.together.xyz/signup), copy your API key on the initial welcome screen, and then hit the play button on any model from the [Together Models list](https://docs.together.ai/docs/models-inference). Change `~/.continue/config.py` to look like this:
- ```python
+ ```python title="~/.continue/config.py"
from continuedev.src.continuedev.core.models import Models
from continuedev.src.continuedev.libs.llm.together import TogetherLLM
diff --git a/continuedev/src/continuedev/models/reference/test.py b/continuedev/src/continuedev/models/reference/generate.py
index 0ab9ba85..0ab9ba85 100644
--- a/continuedev/src/continuedev/models/reference/test.py
+++ b/continuedev/src/continuedev/models/reference/generate.py