summaryrefslogtreecommitdiff
path: root/server/continuedev/plugins/steps
diff options
context:
space:
mode:
Diffstat (limited to 'server/continuedev/plugins/steps')
-rw-r--r--server/continuedev/plugins/steps/chat.py198
-rw-r--r--server/continuedev/plugins/steps/setup_model.py10
2 files changed, 2 insertions, 206 deletions
diff --git a/server/continuedev/plugins/steps/chat.py b/server/continuedev/plugins/steps/chat.py
index 1b0f76f9..919d939e 100644
--- a/server/continuedev/plugins/steps/chat.py
+++ b/server/continuedev/plugins/steps/chat.py
@@ -4,26 +4,17 @@ import os
from textwrap import dedent
from typing import Any, Coroutine, List
-import openai
from directory_tree import display_tree
from dotenv import load_dotenv
from pydantic import Field
-from ...core.main import ChatMessage, FunctionCall, Models, Step, step_to_json_schema
+from ...core.main import ChatMessage, Models, Step, step_to_json_schema
from ...core.sdk import ContinueSDK
-from ...core.steps import MessageStep
-from ...libs.llm.openai import OpenAI
-from ...libs.llm.openai_free_trial import OpenAIFreeTrial
from ...libs.util.devdata import dev_data_logger
from ...libs.util.strings import remove_quotes_and_escapes
from ...libs.util.telemetry import posthog_logger
-from .main import EditHighlightedCodeStep
load_dotenv()
-OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
-openai.api_key = OPENAI_API_KEY
-
-FREE_USAGE_STEP_NAME = "Please enter OpenAI API key"
def add_ellipsis(text: str, max_length: int = 200) -> str:
@@ -40,48 +31,6 @@ class SimpleChatStep(Step):
async def run(self, sdk: ContinueSDK):
# Check if proxy server API key
- if (
- isinstance(sdk.models.default, OpenAIFreeTrial)
- and (
- sdk.models.default.api_key is None
- or sdk.models.default.api_key.strip() == ""
- )
- and len(list(filter(lambda x: not x.step.hide, sdk.history.timeline))) >= 10
- and len(
- list(
- filter(
- lambda x: x.step.name == FREE_USAGE_STEP_NAME,
- sdk.history.timeline,
- )
- )
- )
- == 0
- ):
- await sdk.run_step(
- MessageStep(
- name=FREE_USAGE_STEP_NAME,
- message=dedent(
- """\
- To make it easier to use Continue, you're getting limited free usage. When you have the chance, please enter your own OpenAI key in `~/.continue/config.py`. You can open the file by using the '/config' slash command in the text box below.
-
- Here's an example of how to edit the file:
- ```python
- ...
- config=ContinueConfig(
- ...
- models=Models(
- default=OpenAIFreeTrial(api_key="<API_KEY>", model="gpt-4"),
- summarize=OpenAIFreeTrial(api_key="<API_KEY>", model="gpt-3.5-turbo")
- )
- )
- ```
-
- You can also learn more about customizations [here](https://continue.dev/docs/customization).
- """
- ),
- )
- )
-
messages = self.messages or await sdk.get_chat_context()
generator = sdk.models.chat.stream_chat(
@@ -232,148 +181,3 @@ class EditFileStep(Step):
async def run(self, sdk: ContinueSDK):
await sdk.edit_file(self.filename, self.instructions)
-
-class ChatWithFunctions(Step):
- user_input: str
- functions: List[Step] = [
- AddFileStep(filename="", file_contents=""),
- EditFileStep(filename="", instructions=""),
- EditHighlightedCodeStep(user_input=""),
- ViewDirectoryTreeStep(),
- AddDirectoryStep(directory_name=""),
- DeleteFileStep(filename=""),
- RunTerminalCommandStep(command=""),
- ]
- name: str = "Input"
- manage_own_chat_context: bool = True
- description: str = ""
- hide: bool = True
-
- async def run(self, sdk: ContinueSDK):
- await sdk.update_ui()
-
- step_name_step_class_map = {
- step.name.replace(" ", ""): step.__class__ for step in self.functions
- }
-
- functions = [step_to_json_schema(function) for function in self.functions]
-
- self.chat_context.append(
- ChatMessage(role="user", content=self.user_input, summary=self.user_input)
- )
-
- last_function_called_name = None
- last_function_called_params = None
- while True:
- was_function_called = False
- func_args = ""
- func_name = ""
- msg_content = ""
- msg_step = None
-
- gpt350613 = OpenAI(model="gpt-3.5-turbo-0613")
- await sdk.start_model(gpt350613)
-
- async for msg_chunk in gpt350613.stream_chat(
- await sdk.get_chat_context(), functions=functions
- ):
- if sdk.current_step_was_deleted():
- return
-
- if "content" in msg_chunk and msg_chunk["content"] is not None:
- msg_content += msg_chunk["content"]
- # if last_function_called_index_in_history is not None:
- # while sdk.history.timeline[last_function_called_index].step.hide:
- # last_function_called_index += 1
- # sdk.history.timeline[last_function_called_index_in_history].step.description = msg_content
- if msg_step is None:
- msg_step = MessageStep(
- name="Chat", message=msg_chunk["content"]
- )
- await sdk.run_step(msg_step)
- else:
- msg_step.description = msg_content
- await sdk.update_ui()
- elif "function_call" in msg_chunk or func_name != "":
- was_function_called = True
- if "function_call" in msg_chunk:
- if "arguments" in msg_chunk["function_call"]:
- func_args += msg_chunk["function_call"]["arguments"]
- if "name" in msg_chunk["function_call"]:
- func_name += msg_chunk["function_call"]["name"]
-
- if not was_function_called:
- self.chat_context.append(
- ChatMessage(
- role="assistant", content=msg_content, summary=msg_content
- )
- )
- break
- else:
- if func_name == "python" and "python" not in step_name_step_class_map:
- # GPT must be fine-tuned to believe this exists, but it doesn't always
- func_name = "EditHighlightedCodeStep"
- func_args = json.dumps({"user_input": self.user_input})
- # self.chat_context.append(ChatMessage(
- # role="assistant",
- # content=None,
- # function_call=FunctionCall(
- # name=func_name,
- # arguments=func_args
- # ),
- # summary=f"Called function {func_name}"
- # ))
- # self.chat_context.append(ChatMessage(
- # role="user",
- # content="The 'python' function does not exist. Don't call it. Try again to call another function.",
- # summary="'python' function does not exist."
- # ))
- # msg_step.hide = True
- # continue
- # Call the function, then continue to chat
- func_args = "{}" if func_args == "" else func_args
- try:
- fn_call_params = json.loads(func_args)
- except json.JSONDecodeError:
- raise Exception("The model returned invalid JSON. Please try again")
- self.chat_context.append(
- ChatMessage(
- role="assistant",
- content=None,
- function_call=FunctionCall(name=func_name, arguments=func_args),
- summary=f"Called function {func_name}",
- )
- )
- sdk.history.current_index + 1
- if func_name not in step_name_step_class_map:
- raise Exception(
- f"The model tried to call a function ({func_name}) that does not exist. Please try again."
- )
-
- # if func_name == "AddFileStep":
- # step_to_run.hide = True
- # self.description += f"\nAdded file `{func_args['filename']}`"
- # elif func_name == "AddDirectoryStep":
- # step_to_run.hide = True
- # self.description += f"\nAdded directory `{func_args['directory_name']}`"
- # else:
- # self.description += f"\n`Running function {func_name}`\n\n"
- if func_name == "EditHighlightedCodeStep":
- fn_call_params["user_input"] = self.user_input
- elif func_name == "EditFile":
- fn_call_params["instructions"] = self.user_input
-
- step_to_run = step_name_step_class_map[func_name](**fn_call_params)
- if (
- last_function_called_name is not None
- and last_function_called_name == func_name
- and last_function_called_params is not None
- and last_function_called_params == fn_call_params
- ):
- # If it's calling the same function more than once in a row, it's probably looping and confused
- return
- last_function_called_name = func_name
- last_function_called_params = fn_call_params
-
- await sdk.run_step(step_to_run)
- await sdk.update_ui()
diff --git a/server/continuedev/plugins/steps/setup_model.py b/server/continuedev/plugins/steps/setup_model.py
index 87e52f1b..e7249594 100644
--- a/server/continuedev/plugins/steps/setup_model.py
+++ b/server/continuedev/plugins/steps/setup_model.py
@@ -5,16 +5,8 @@ from ...models.filesystem import RangeInFile
from ...models.main import Range
MODEL_CLASS_TO_MESSAGE = {
- "OpenAI": "Obtain your OpenAI API key from [here](https://platform.openai.com/account/api-keys) and paste it into the `api_key` field at config.models.default.api_key in `config.py`. Then reload the VS Code window for changes to take effect.",
- "OpenAIFreeTrial": "To get started with OpenAI models, obtain your OpenAI API key from [here](https://platform.openai.com/account/api-keys) and paste it into the `api_key` field at config.models.default.api_key in `config.py`. Then reload the VS Code window for changes to take effect.",
- "AnthropicLLM": "To get started with Anthropic, you first need to sign up for the beta [here](https://claude.ai/login) to obtain an API key. Once you have the key, paste it into the `api_key` field at config.models.default.api_key in `config.py`. Then reload the VS Code window for changes to take effect.",
- "ReplicateLLM": "To get started with Replicate, sign up to obtain an API key [here](https://replicate.ai/), then paste it into the `api_key` field at config.models.default.api_key in `config.py`.",
"Ollama": "To get started with Ollama, download the app from [ollama.ai](https://ollama.ai/). Once it is downloaded, be sure to pull at least one model and use its name in the model field in config.py (e.g. `model='codellama'`).",
- "GGML": "GGML models can be run locally using the `llama-cpp-python` library. To learn how to set up a local llama-cpp-python server, read [here](https://github.com/continuedev/ggml-server-example). Once it is started on port 8000, you're all set!",
- "TogetherLLM": "To get started using models from Together, first obtain your Together API key from [here](https://together.ai). Paste it into the `api_key` field at config.models.default.api_key in `config.py`. Then, on their models page, press 'start' on the model of your choice and make sure the `model=` parameter in the config file for the `TogetherLLM` class reflects the name of this model. Finally, reload the VS Code window for changes to take effect.",
- "LlamaCpp": "To get started with this model, clone the [`llama.cpp` repo](https://github.com/ggerganov/llama.cpp) and follow the instructions to set up the server [here](https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md#build). Any of the parameters described in the README can be passed to the `llama_cpp_args` field in the `LlamaCpp` class in `config.py`.",
- "HuggingFaceInferenceAPI": "To get started with the HuggingFace Inference API, first deploy a model and obtain your API key from [here](https://huggingface.co/inference-api). Paste it into the `hf_token` field at config.models.default.hf_token in `config.py`. Finally, reload the VS Code window for changes to take effect.",
- "GooglePaLMAPI": "To get started with the Google PaLM API, create an API key in Makersuite [here](https://makersuite.google.com/u/2/app/apikey), then paste it into the `api_key` field at config.models.default.api_key in `config.py`.",
+ "LlamaCpp": "To get started with this model, clone the [`llama.cpp` repo](https://github.com/ggerganov/llama.cpp) and follow the instructions to set up the server [here](https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md#build). Any of the parameters described in the README can be passed to the `llama_cpp_args` field in the `LlamaCpp` class in `config.py`."
}