From 8093c9d10db9d3084057f4f5ea0278b9b72f5193 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Wed, 4 Oct 2023 10:08:56 -0700 Subject: docs: :memo: title for codeblocks in docs --- continuedev/src/continuedev/libs/llm/anthropic.py | 2 +- continuedev/src/continuedev/libs/llm/ggml.py | 2 +- .../src/continuedev/libs/llm/google_palm_api.py | 30 +++-- .../src/continuedev/libs/llm/hf_inference_api.py | 2 +- continuedev/src/continuedev/libs/llm/llamacpp.py | 2 +- continuedev/src/continuedev/libs/llm/ollama.py | 2 +- continuedev/src/continuedev/libs/llm/openai.py | 2 +- .../src/continuedev/libs/llm/openai_free_trial.py | 2 +- continuedev/src/continuedev/libs/llm/queued.py | 2 +- continuedev/src/continuedev/libs/llm/replicate.py | 2 +- .../src/continuedev/libs/llm/text_gen_interface.py | 2 +- continuedev/src/continuedev/libs/llm/together.py | 2 +- .../src/continuedev/models/reference/generate.py | 144 +++++++++++++++++++++ .../src/continuedev/models/reference/test.py | 144 --------------------- 14 files changed, 171 insertions(+), 169 deletions(-) create mode 100644 continuedev/src/continuedev/models/reference/generate.py delete mode 100644 continuedev/src/continuedev/models/reference/test.py (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/libs/llm/anthropic.py b/continuedev/src/continuedev/libs/llm/anthropic.py index d3b773e4..2430e786 100644 --- a/continuedev/src/continuedev/libs/llm/anthropic.py +++ b/continuedev/src/continuedev/libs/llm/anthropic.py @@ -10,7 +10,7 @@ class AnthropicLLM(LLM): """ Import the `AnthropicLLM` class and set it as the default model: - ```python + ```python title="~/.continue/config.py" from continuedev.src.continuedev.libs.llm.anthropic import AnthropicLLM config = ContinueConfig( diff --git a/continuedev/src/continuedev/libs/llm/ggml.py b/continuedev/src/continuedev/libs/llm/ggml.py index 27a55dfe..ae185b28 100644 --- a/continuedev/src/continuedev/libs/llm/ggml.py +++ b/continuedev/src/continuedev/libs/llm/ggml.py @@ -18,7 +18,7 @@ class GGML(LLM): Once the model is running on localhost:8000, change `~/.continue/config.py` to look like this: - ```python + ```python title="~/.continue/config.py" from continuedev.src.continuedev.libs.llm.ggml import GGML config = ContinueConfig( diff --git a/continuedev/src/continuedev/libs/llm/google_palm_api.py b/continuedev/src/continuedev/libs/llm/google_palm_api.py index e369caf6..8c0b30d1 100644 --- a/continuedev/src/continuedev/libs/llm/google_palm_api.py +++ b/continuedev/src/continuedev/libs/llm/google_palm_api.py @@ -1,16 +1,17 @@ -from typing import Callable, Dict, List, Union +from typing import List + +import requests +from pydantic import Field from ...core.main import ChatMessage from ..llm import LLM -from pydantic import Field -import requests class GooglePaLMAPI(LLM): """ The Google PaLM API is currently in public preview, so production applications are not supported yet. However, you can [create an API key in Google MakerSuite](https://makersuite.google.com/u/2/app/apikey) and begin trying out the `chat-bison-001` model. Change `~/.continue/config.py` to look like this: - ```python + ```python title="~/.continue/config.py" from continuedev.src.continuedev.core.models import Models from continuedev.src.continuedev.libs.llm.hf_inference_api import GooglePaLMAPI @@ -28,21 +29,22 @@ class GooglePaLMAPI(LLM): api_key: str = Field(..., description="Google PaLM API key") model: str = "chat-bison-001" - - async def _stream_complete(self, prompt, options): - api_url = f"https://generativelanguage.googleapis.com/v1beta2/models/{self.model}:generateMessage?key={self.api_key}" - body = { "prompt": { "messages": [{ "content": prompt}] } } + async def _stream_complete(self, prompt, options): + api_url = f"https://generativelanguage.googleapis.com/v1beta2/models/{self.model}:generateMessage?key={self.api_key}" + body = {"prompt": {"messages": [{"content": prompt}]}} response = requests.post(api_url, json=body) yield response.json()["candidates"][0]["content"] - - async def _stream_chat(self, messages: List[ChatMessage], options): + async def _stream_chat(self, messages: List[ChatMessage], options): msg_lst = [] for message in messages: - msg_lst.append({ "content": message["content"] }) + msg_lst.append({"content": message["content"]}) - api_url = f"https://generativelanguage.googleapis.com/v1beta2/models/{self.model}:generateMessage?key={self.api_key}" - body = { "prompt": { "messages": msg_lst } } + api_url = f"https://generativelanguage.googleapis.com/v1beta2/models/{self.model}:generateMessage?key={self.api_key}" + body = {"prompt": {"messages": msg_lst}} response = requests.post(api_url, json=body) - yield {"content": response.json()["candidates"][0]["content"], "role": "assistant"} \ No newline at end of file + yield { + "content": response.json()["candidates"][0]["content"], + "role": "assistant", + } diff --git a/continuedev/src/continuedev/libs/llm/hf_inference_api.py b/continuedev/src/continuedev/libs/llm/hf_inference_api.py index ab1482e8..1d41b3a3 100644 --- a/continuedev/src/continuedev/libs/llm/hf_inference_api.py +++ b/continuedev/src/continuedev/libs/llm/hf_inference_api.py @@ -12,7 +12,7 @@ class HuggingFaceInferenceAPI(LLM): """ Hugging Face Inference API is a great option for newly released language models. Sign up for an account and add billing [here](https://huggingface.co/settings/billing), access the Inference Endpoints [here](https://ui.endpoints.huggingface.co), click on “New endpoint”, and fill out the form (e.g. select a model like [WizardCoder-Python-34B-V1.0](https://huggingface.co/WizardLM/WizardCoder-Python-34B-V1.0)), and then deploy your model by clicking “Create Endpoint”. Change `~/.continue/config.py` to look like this: - ```python + ```python title="~/.continue/config.py" from continuedev.src.continuedev.core.models import Models from continuedev.src.continuedev.libs.llm.hf_inference_api import HuggingFaceInferenceAPI diff --git a/continuedev/src/continuedev/libs/llm/llamacpp.py b/continuedev/src/continuedev/libs/llm/llamacpp.py index 0b4c9fb0..c7144745 100644 --- a/continuedev/src/continuedev/libs/llm/llamacpp.py +++ b/continuedev/src/continuedev/libs/llm/llamacpp.py @@ -18,7 +18,7 @@ class LlamaCpp(LLM): After it's up and running, change `~/.continue/config.py` to look like this: - ```python + ```python title="~/.continue/config.py" from continuedev.src.continuedev.libs.llm.llamacpp import LlamaCpp config = ContinueConfig( diff --git a/continuedev/src/continuedev/libs/llm/ollama.py b/continuedev/src/continuedev/libs/llm/ollama.py index ee6ab540..e9b421d5 100644 --- a/continuedev/src/continuedev/libs/llm/ollama.py +++ b/continuedev/src/continuedev/libs/llm/ollama.py @@ -14,7 +14,7 @@ class Ollama(LLM): """ [Ollama](https://ollama.ai/) is an application for Mac and Linux that makes it easy to locally run open-source models, including Llama-2. Download the app from the website, and it will walk you through setup in a couple of minutes. You can also read more in their [README](https://github.com/jmorganca/ollama). Continue can then be configured to use the `Ollama` LLM class: - ```python + ```python title="~/.continue/config.py" from continuedev.src.continuedev.libs.llm.ollama import Ollama config = ContinueConfig( diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index d9e74cec..db3c9852 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -32,7 +32,7 @@ class OpenAI(LLM): If you are locally serving a model that uses an OpenAI-compatible server, you can simply change the `api_base` in the `OpenAI` class like this: - ```python + ```python title="~/.continue/config.py" from continuedev.src.continuedev.libs.llm.openai import OpenAI config = ContinueConfig( diff --git a/continuedev/src/continuedev/libs/llm/openai_free_trial.py b/continuedev/src/continuedev/libs/llm/openai_free_trial.py index 367f2bbd..6d96f8bb 100644 --- a/continuedev/src/continuedev/libs/llm/openai_free_trial.py +++ b/continuedev/src/continuedev/libs/llm/openai_free_trial.py @@ -16,7 +16,7 @@ class OpenAIFreeTrial(LLM): 2. Open `~/.continue/config.py`. You can do this by using the '/config' command in Continue 3. Change the default LLMs to look like this: - ```python + ```python title="~/.continue/config.py" API_KEY = "" config = ContinueConfig( ... diff --git a/continuedev/src/continuedev/libs/llm/queued.py b/continuedev/src/continuedev/libs/llm/queued.py index bbaadde6..785c5dc0 100644 --- a/continuedev/src/continuedev/libs/llm/queued.py +++ b/continuedev/src/continuedev/libs/llm/queued.py @@ -13,7 +13,7 @@ class QueuedLLM(LLM): If you are already using another LLM class and are experiencing this problem, you can just wrap it with the QueuedLLM class like this: - ```python + ```python title="~/.continue/config.py" from continuedev.src.continuedev.libs.llm.queued import QueuedLLM config = ContinueConfig( diff --git a/continuedev/src/continuedev/libs/llm/replicate.py b/continuedev/src/continuedev/libs/llm/replicate.py index 02d9bfd7..86840572 100644 --- a/continuedev/src/continuedev/libs/llm/replicate.py +++ b/continuedev/src/continuedev/libs/llm/replicate.py @@ -13,7 +13,7 @@ class ReplicateLLM(LLM): """ Replicate is a great option for newly released language models or models that you've deployed through their platform. Sign up for an account [here](https://replicate.ai/), copy your API key, and then select any model from the [Replicate Streaming List](https://replicate.com/collections/streaming-language-models). Change `~/.continue/config.py` to look like this: - ```python + ```python title="~/.continue/config.py" from continuedev.src.continuedev.core.models import Models from continuedev.src.continuedev.libs.llm.replicate import ReplicateLLM diff --git a/continuedev/src/continuedev/libs/llm/text_gen_interface.py b/continuedev/src/continuedev/libs/llm/text_gen_interface.py index 1ff9feb7..f726f516 100644 --- a/continuedev/src/continuedev/libs/llm/text_gen_interface.py +++ b/continuedev/src/continuedev/libs/llm/text_gen_interface.py @@ -14,7 +14,7 @@ class TextGenUI(LLM): """ TextGenUI is a comprehensive, open-source language model UI and local server. You can set it up with an OpenAI-compatible server plugin, but if for some reason that doesn't work, you can use this class like so: - ```python + ```python title="~/.continue/config.py" from continuedev.src.continuedev.libs.llm.text_gen_interface import TextGenUI config = ContinueConfig( diff --git a/continuedev/src/continuedev/libs/llm/together.py b/continuedev/src/continuedev/libs/llm/together.py index b679351c..0274bb14 100644 --- a/continuedev/src/continuedev/libs/llm/together.py +++ b/continuedev/src/continuedev/libs/llm/together.py @@ -15,7 +15,7 @@ class TogetherLLM(LLM): """ The Together API is a cloud platform for running large AI models. You can sign up [here](https://api.together.xyz/signup), copy your API key on the initial welcome screen, and then hit the play button on any model from the [Together Models list](https://docs.together.ai/docs/models-inference). Change `~/.continue/config.py` to look like this: - ```python + ```python title="~/.continue/config.py" from continuedev.src.continuedev.core.models import Models from continuedev.src.continuedev.libs.llm.together import TogetherLLM diff --git a/continuedev/src/continuedev/models/reference/generate.py b/continuedev/src/continuedev/models/reference/generate.py new file mode 100644 index 00000000..0ab9ba85 --- /dev/null +++ b/continuedev/src/continuedev/models/reference/generate.py @@ -0,0 +1,144 @@ +import html +import importlib +import json +from textwrap import dedent + +LLM_MODULES = [ + ("openai", "OpenAI"), + ("anthropic", "AnthropicLLM"), + ("ggml", "GGML"), + ("llamacpp", "LlamaCpp"), + ("text_gen_interface", "TextGenUI"), + ("ollama", "Ollama"), + ("replicate", "ReplicateLLM"), + ("together", "TogetherLLM"), + ("hf_inference_api", "HuggingFaceInferenceAPI"), + ("hf_tgi", "HuggingFaceTGI"), + ("openai_free_trial", "OpenAIFreeTrial"), + ("google_palm_api", "GooglePaLMAPI"), + ("queued", "QueuedLLM"), +] + +CONTEXT_PROVIDER_MODULES = [ + ("diff", "DiffContextProvider"), + ("file", "FileContextProvider"), + ("filetree", "FileTreeContextProvider"), + ("github", "GitHubIssuesContextProvider"), + ("google", "GoogleContextProvider"), + ("search", "SearchContextProvider"), + ("terminal", "TerminalContextProvider"), + ("url", "URLContextProvider"), +] + + +def import_llm_module(module_name, module_title): + module_name = f"continuedev.src.continuedev.libs.llm.{module_name}" + module = importlib.import_module(module_name) + obj = getattr(module, module_title) + return obj + + +def import_context_provider_module(module_name, module_title): + module_name = f"continuedev.src.continuedev.plugins.context_providers.{module_name}" + module = importlib.import_module(module_name) + obj = getattr(module, module_title) + return obj + + +def docs_from_schema(schema, filepath, ignore_properties=[], inherited=[]): + # Generate markdown docs + properties = "" + inherited_properties = "" + + def add_property(prop, details, only_required): + required = prop in schema.get("required", []) + if only_required != required or prop in ignore_properties: + return "" + required = "true" if required else "false" + return f"""\n""" + + for prop, details in schema["properties"].items(): + property = add_property(prop, details, True) + if prop in inherited: + inherited_properties += property + else: + properties += property + + for prop, details in schema["properties"].items(): + property = add_property(prop, details, False) + if prop in inherited: + inherited_properties += property + else: + properties += property + + return dedent( + f"""\ +import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; + +# {schema['title']} + +{dedent(schema.get("description", ""))} + +[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/{filepath}) + +## Properties + +{properties} + +### Inherited Properties + +{inherited_properties}""" + ) + + +llm_module = importlib.import_module("continuedev.src.continuedev.libs.llm") +ctx_obj = getattr(llm_module, "LLM") +schema = ctx_obj.schema() +ctx_properties = schema["properties"].keys() + +for module_name, module_title in LLM_MODULES: + obj = import_llm_module(module_name, module_title) + schema = obj.schema() + markdown_docs = docs_from_schema( + schema, f"libs/llm/{module_name}.py", inherited=ctx_properties + ) + with open(f"docs/docs/reference/Models/{module_title.lower()}.md", "w") as f: + f.write(markdown_docs) + +config_module = importlib.import_module("continuedev.src.continuedev.core.config") +config_obj = getattr(config_module, "ContinueConfig") +schema = config_obj.schema() +markdown_docs = docs_from_schema(schema, "core/config.py") +with open("docs/docs/reference/config.md", "w") as f: + f.write(markdown_docs) + +ctx_module = importlib.import_module("continuedev.src.continuedev.core.context") +ctx_obj = getattr(ctx_module, "ContextProvider") +schema = ctx_obj.schema() +ctx_properties = schema["properties"].keys() +for module_name, module_title in CONTEXT_PROVIDER_MODULES: + obj = import_context_provider_module(module_name, module_title) + schema = obj.schema() + markdown_docs = docs_from_schema( + schema, + f"plugins/context_providers/{module_name}.py", + ignore_properties=[ + "sdk", + "updated_documents", + "delete_documents", + "selected_items", + "ignore_patterns", + ], + inherited=ctx_properties, + ) + with open( + f"docs/docs/reference/Context Providers/{module_title.lower()}.md", "w" + ) as f: + f.write(markdown_docs) + +# sdk_module = importlib.import_module("continuedev.src.continuedev.core.sdk") +# sdk_obj = getattr(sdk_module, "ContinueSDK") +# schema = sdk_obj.schema() +# markdown_docs = docs_from_schema(schema, "sdk", ignore_properties=[]) +# with open("docs/docs/reference/ContinueSDK.md", "w") as f: +# f.write(markdown_docs) diff --git a/continuedev/src/continuedev/models/reference/test.py b/continuedev/src/continuedev/models/reference/test.py deleted file mode 100644 index 0ab9ba85..00000000 --- a/continuedev/src/continuedev/models/reference/test.py +++ /dev/null @@ -1,144 +0,0 @@ -import html -import importlib -import json -from textwrap import dedent - -LLM_MODULES = [ - ("openai", "OpenAI"), - ("anthropic", "AnthropicLLM"), - ("ggml", "GGML"), - ("llamacpp", "LlamaCpp"), - ("text_gen_interface", "TextGenUI"), - ("ollama", "Ollama"), - ("replicate", "ReplicateLLM"), - ("together", "TogetherLLM"), - ("hf_inference_api", "HuggingFaceInferenceAPI"), - ("hf_tgi", "HuggingFaceTGI"), - ("openai_free_trial", "OpenAIFreeTrial"), - ("google_palm_api", "GooglePaLMAPI"), - ("queued", "QueuedLLM"), -] - -CONTEXT_PROVIDER_MODULES = [ - ("diff", "DiffContextProvider"), - ("file", "FileContextProvider"), - ("filetree", "FileTreeContextProvider"), - ("github", "GitHubIssuesContextProvider"), - ("google", "GoogleContextProvider"), - ("search", "SearchContextProvider"), - ("terminal", "TerminalContextProvider"), - ("url", "URLContextProvider"), -] - - -def import_llm_module(module_name, module_title): - module_name = f"continuedev.src.continuedev.libs.llm.{module_name}" - module = importlib.import_module(module_name) - obj = getattr(module, module_title) - return obj - - -def import_context_provider_module(module_name, module_title): - module_name = f"continuedev.src.continuedev.plugins.context_providers.{module_name}" - module = importlib.import_module(module_name) - obj = getattr(module, module_title) - return obj - - -def docs_from_schema(schema, filepath, ignore_properties=[], inherited=[]): - # Generate markdown docs - properties = "" - inherited_properties = "" - - def add_property(prop, details, only_required): - required = prop in schema.get("required", []) - if only_required != required or prop in ignore_properties: - return "" - required = "true" if required else "false" - return f"""\n""" - - for prop, details in schema["properties"].items(): - property = add_property(prop, details, True) - if prop in inherited: - inherited_properties += property - else: - properties += property - - for prop, details in schema["properties"].items(): - property = add_property(prop, details, False) - if prop in inherited: - inherited_properties += property - else: - properties += property - - return dedent( - f"""\ -import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; - -# {schema['title']} - -{dedent(schema.get("description", ""))} - -[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/{filepath}) - -## Properties - -{properties} - -### Inherited Properties - -{inherited_properties}""" - ) - - -llm_module = importlib.import_module("continuedev.src.continuedev.libs.llm") -ctx_obj = getattr(llm_module, "LLM") -schema = ctx_obj.schema() -ctx_properties = schema["properties"].keys() - -for module_name, module_title in LLM_MODULES: - obj = import_llm_module(module_name, module_title) - schema = obj.schema() - markdown_docs = docs_from_schema( - schema, f"libs/llm/{module_name}.py", inherited=ctx_properties - ) - with open(f"docs/docs/reference/Models/{module_title.lower()}.md", "w") as f: - f.write(markdown_docs) - -config_module = importlib.import_module("continuedev.src.continuedev.core.config") -config_obj = getattr(config_module, "ContinueConfig") -schema = config_obj.schema() -markdown_docs = docs_from_schema(schema, "core/config.py") -with open("docs/docs/reference/config.md", "w") as f: - f.write(markdown_docs) - -ctx_module = importlib.import_module("continuedev.src.continuedev.core.context") -ctx_obj = getattr(ctx_module, "ContextProvider") -schema = ctx_obj.schema() -ctx_properties = schema["properties"].keys() -for module_name, module_title in CONTEXT_PROVIDER_MODULES: - obj = import_context_provider_module(module_name, module_title) - schema = obj.schema() - markdown_docs = docs_from_schema( - schema, - f"plugins/context_providers/{module_name}.py", - ignore_properties=[ - "sdk", - "updated_documents", - "delete_documents", - "selected_items", - "ignore_patterns", - ], - inherited=ctx_properties, - ) - with open( - f"docs/docs/reference/Context Providers/{module_title.lower()}.md", "w" - ) as f: - f.write(markdown_docs) - -# sdk_module = importlib.import_module("continuedev.src.continuedev.core.sdk") -# sdk_obj = getattr(sdk_module, "ContinueSDK") -# schema = sdk_obj.schema() -# markdown_docs = docs_from_schema(schema, "sdk", ignore_properties=[]) -# with open("docs/docs/reference/ContinueSDK.md", "w") as f: -# f.write(markdown_docs) -- cgit v1.2.3-70-g09d2