diff options
author | Nate Sesti <33237525+sestinj@users.noreply.github.com> | 2023-10-09 18:37:27 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-10-09 18:37:27 -0700 |
commit | f09150617ed2454f3074bcf93f53aae5ae637d40 (patch) | |
tree | 5cfe614a64d921dfe58b049f426d67a8b832c71f | |
parent | 985304a213f620cdff3f8f65f74ed7e3b79be29d (diff) | |
download | sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.gz sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.bz2 sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.zip |
Preview (#541)
* Strong typing (#533)
* refactor: :recycle: get rid of continuedev.src.continuedev structure
* refactor: :recycle: switching back to server folder
* feat: :sparkles: make config.py imports shorter
* feat: :bookmark: publish as pre-release vscode extension
* refactor: :recycle: refactor and add more completion params to ui
* build: :building_construction: download from preview S3
* fix: :bug: fix paths
* fix: :green_heart: package:pre-release
* ci: :green_heart: more time for tests
* fix: :green_heart: fix build scripts
* fix: :bug: fix import in run.py
* fix: :bookmark: update version to try again
* ci: 💚 Update package.json version [skip ci]
* refactor: :fire: don't check for old extensions version
* fix: :bug: small bug fixes
* fix: :bug: fix config.py import paths
* ci: 💚 Update package.json version [skip ci]
* ci: :green_heart: platform-specific builds test #1
* feat: :green_heart: ship with binary
* fix: :green_heart: fix copy statement to include.exe for windows
* fix: :green_heart: cd extension before packaging
* chore: :loud_sound: count tokens generated
* fix: :green_heart: remove npm_config_arch
* fix: :green_heart: publish as pre-release!
* chore: :bookmark: update version
* perf: :green_heart: hardcode distro paths
* fix: :bug: fix yaml syntax error
* chore: :bookmark: update version
* fix: :green_heart: update permissions and version
* feat: :bug: kill old server if needed
* feat: :lipstick: update marketplace icon for pre-release
* ci: 💚 Update package.json version [skip ci]
* feat: :sparkles: auto-reload for config.py
* feat: :wrench: update default config.py imports
* feat: :sparkles: codelens in config.py
* feat: :sparkles: select model param count from UI
* ci: 💚 Update package.json version [skip ci]
* feat: :sparkles: more model options, ollama error handling
* perf: :zap: don't show server loading immediately
* fix: :bug: fixing small UI details
* ci: 💚 Update package.json version [skip ci]
* feat: :rocket: headers param on LLM class
* fix: :bug: fix headers for openai.;y
* feat: :sparkles: highlight code on cmd+shift+L
* ci: 💚 Update package.json version [skip ci]
* feat: :lipstick: sticky top bar in gui.tsx
* fix: :loud_sound: websocket logging and horizontal scrollbar
* ci: 💚 Update package.json version [skip ci]
* feat: :sparkles: allow AzureOpenAI Service through GGML
* ci: 💚 Update package.json version [skip ci]
* fix: :bug: fix automigration
* ci: 💚 Update package.json version [skip ci]
* ci: :green_heart: upload binaries in ci, download apple silicon
* chore: :fire: remove notes
* fix: :green_heart: use curl to download binary
* fix: :green_heart: set permissions on apple silicon binary
* fix: :green_heart: testing
* fix: :green_heart: cleanup file
* fix: :green_heart: fix preview.yaml
* fix: :green_heart: only upload once per binary
* fix: :green_heart: install rosetta
* ci: :green_heart: download binary after tests
* ci: 💚 Update package.json version [skip ci]
* ci: :green_heart: prepare ci for merge to main
---------
Co-authored-by: GitHub Action <action@github.com>
-rw-r--r-- | .github/workflows/README.md | 19 | ||||
-rw-r--r-- | .github/workflows/main.yaml | 265 | ||||
-rw-r--r-- | .github/workflows/preview.yaml | 245 | ||||
-rw-r--r-- | .vscode/launch.json | 13 | ||||
-rw-r--r-- | .vscode/settings.json | 2 | ||||
-rw-r--r-- | Dockerfile | 22 | ||||
-rw-r--r-- | build.cmd | 4 | ||||
-rwxr-xr-x | build.sh | 4 | ||||
-rw-r--r-- | continuedev/.gitignore | 4 | ||||
-rw-r--r-- | continuedev/main.py | 5 | ||||
-rw-r--r-- | continuedev/src/.gitignore | 4 | ||||
-rw-r--r-- | continuedev/src/continuedev/.gitignore | 1 | ||||
-rw-r--r-- | continuedev/src/continuedev/libs/llm/prompts/chat.py | 95 | ||||
-rw-r--r-- | continuedev/src/continuedev/plugins/steps/__init__.py | 1 | ||||
-rw-r--r-- | continuedev/src/continuedev/plugins/steps/open_config.py | 31 | ||||
-rw-r--r-- | docs/docs/customization/context-providers.md | 16 | ||||
-rw-r--r-- | docs/docs/customization/models.md | 14 | ||||
-rw-r--r-- | docs/docs/reference/Models/anthropicllm.md | 13 | ||||
-rw-r--r-- | docs/docs/reference/Models/ggml.md | 17 | ||||
-rw-r--r-- | docs/docs/reference/Models/googlepalmapi.md | 15 | ||||
-rw-r--r-- | docs/docs/reference/Models/huggingfaceinferenceapi.md | 15 | ||||
-rw-r--r-- | docs/docs/reference/Models/huggingfacetgi.md | 11 | ||||
-rw-r--r-- | docs/docs/reference/Models/llamacpp.md | 13 | ||||
-rw-r--r-- | docs/docs/reference/Models/ollama.md | 13 | ||||
-rw-r--r-- | docs/docs/reference/Models/openai.md | 11 | ||||
-rw-r--r-- | docs/docs/reference/Models/openaifreetrial.md | 9 | ||||
-rw-r--r-- | docs/docs/reference/Models/queuedllm.md | 13 | ||||
-rw-r--r-- | docs/docs/reference/Models/replicatellm.md | 15 | ||||
-rw-r--r-- | docs/docs/reference/Models/textgenui.md | 13 | ||||
-rw-r--r-- | docs/docs/reference/Models/togetherllm.md | 15 | ||||
-rw-r--r-- | docs/docs/reference/config.md | 2 | ||||
-rw-r--r-- | docs/docs/walkthroughs/codebase-embeddings.md | 2 | ||||
-rw-r--r-- | docs/docs/walkthroughs/codellama.md | 10 | ||||
-rw-r--r-- | extension/package-lock.json | 4 | ||||
-rw-r--r-- | extension/package.json | 5 | ||||
-rw-r--r-- | extension/react-app/package-lock.json | 1 | ||||
-rw-r--r-- | extension/react-app/package.json | 1 | ||||
-rw-r--r-- | extension/react-app/public/logos/mistral.png | bin | 0 -> 5020 bytes | |||
-rw-r--r-- | extension/react-app/public/logos/wizardlm.png | bin | 0 -> 494793 bytes | |||
-rw-r--r-- | extension/react-app/src/components/ComboBox.tsx | 31 | ||||
-rw-r--r-- | extension/react-app/src/components/ErrorStepContainer.tsx | 2 | ||||
-rw-r--r-- | extension/react-app/src/components/Layout.tsx | 29 | ||||
-rw-r--r-- | extension/react-app/src/components/ModelCard.tsx | 178 | ||||
-rw-r--r-- | extension/react-app/src/components/ModelSettings.tsx | 6 | ||||
-rw-r--r-- | extension/react-app/src/components/Suggestions.tsx | 17 | ||||
-rw-r--r-- | extension/react-app/src/components/dialogs/AddContextGroupDialog.tsx | 4 | ||||
-rw-r--r-- | extension/react-app/src/components/dialogs/FTCDialog.tsx | 5 | ||||
-rw-r--r-- | extension/react-app/src/components/index.ts | 13 | ||||
-rw-r--r-- | extension/react-app/src/pages/gui.tsx | 10 | ||||
-rw-r--r-- | extension/react-app/src/pages/history.tsx | 6 | ||||
-rw-r--r-- | extension/react-app/src/pages/modelconfig.tsx | 63 | ||||
-rw-r--r-- | extension/react-app/src/pages/models.tsx | 3 | ||||
-rw-r--r-- | extension/react-app/src/pages/settings.tsx | 11 | ||||
-rw-r--r-- | extension/react-app/src/util/modelData.ts | 451 | ||||
-rw-r--r-- | extension/schema/ContinueConfig.d.ts | 5 | ||||
-rw-r--r-- | extension/schema/LLM.d.ts | 5 | ||||
-rw-r--r-- | extension/schema/Models.d.ts | 5 | ||||
-rw-r--r-- | extension/scripts/package.js | 9 | ||||
-rw-r--r-- | extension/src/activation/activate.ts | 62 | ||||
-rw-r--r-- | extension/src/activation/environmentSetup.ts | 218 | ||||
-rw-r--r-- | extension/src/commands.ts | 15 | ||||
-rw-r--r-- | extension/src/continueIdeClient.ts | 26 | ||||
-rw-r--r-- | extension/src/lang-server/codeLens.ts | 62 | ||||
-rw-r--r-- | extension/src/test-suite/environmentSetup.test.ts | 4 | ||||
-rw-r--r-- | extension/src/util/messenger.ts | 9 | ||||
-rw-r--r-- | run.m1.spec | 2 | ||||
-rw-r--r-- | run.py | 2 | ||||
-rw-r--r-- | run.spec | 2 | ||||
-rw-r--r-- | schema/json/ContextItem.json | 4 | ||||
-rw-r--r-- | schema/json/ContinueConfig.json | 20 | ||||
-rw-r--r-- | schema/json/FileEdit.json | 4 | ||||
-rw-r--r-- | schema/json/FileEditWithFullContents.json | 4 | ||||
-rw-r--r-- | schema/json/FullState.json | 4 | ||||
-rw-r--r-- | schema/json/History.json | 4 | ||||
-rw-r--r-- | schema/json/HistoryNode.json | 4 | ||||
-rw-r--r-- | schema/json/LLM.json | 10 | ||||
-rw-r--r-- | schema/json/Models.json | 10 | ||||
-rw-r--r-- | schema/json/Position.json | 4 | ||||
-rw-r--r-- | schema/json/Range.json | 4 | ||||
-rw-r--r-- | schema/json/RangeInFile.json | 4 | ||||
-rw-r--r-- | schema/json/SessionInfo.json | 4 | ||||
-rw-r--r-- | schema/json/Traceback.json | 4 | ||||
-rw-r--r-- | schema/json/TracebackFrame.json | 4 | ||||
-rw-r--r-- | server/README.md (renamed from continuedev/README.md) | 12 | ||||
-rw-r--r-- | server/continuedev/__init__.py (renamed from continuedev/src/continuedev/__init__.py) | 0 | ||||
-rw-r--r-- | server/continuedev/__main__.py (renamed from continuedev/src/continuedev/__main__.py) | 0 | ||||
-rw-r--r-- | server/continuedev/core/abstract_sdk.py (renamed from continuedev/src/continuedev/core/abstract_sdk.py) | 0 | ||||
-rw-r--r-- | server/continuedev/core/autopilot.py (renamed from continuedev/src/continuedev/core/autopilot.py) | 30 | ||||
-rw-r--r-- | server/continuedev/core/config.py (renamed from continuedev/src/continuedev/core/config.py) | 0 | ||||
-rw-r--r-- | server/continuedev/core/context.py (renamed from continuedev/src/continuedev/core/context.py) | 55 | ||||
-rw-r--r-- | server/continuedev/core/env.py (renamed from continuedev/src/continuedev/core/env.py) | 0 | ||||
-rw-r--r-- | server/continuedev/core/lsp.py (renamed from continuedev/src/continuedev/core/lsp.py) | 0 | ||||
-rw-r--r-- | server/continuedev/core/main.py (renamed from continuedev/src/continuedev/core/main.py) | 0 | ||||
-rw-r--r-- | server/continuedev/core/models.py (renamed from continuedev/src/continuedev/core/models.py) | 2 | ||||
-rw-r--r-- | server/continuedev/core/observation.py (renamed from continuedev/src/continuedev/core/observation.py) | 0 | ||||
-rw-r--r-- | server/continuedev/core/sdk.py (renamed from continuedev/src/continuedev/core/sdk.py) | 58 | ||||
-rw-r--r-- | server/continuedev/core/steps.py (renamed from continuedev/src/continuedev/plugins/steps/core/core.py) | 24 | ||||
-rw-r--r-- | server/continuedev/headless/__init__.py (renamed from continuedev/src/continuedev/headless/__init__.py) | 0 | ||||
-rw-r--r-- | server/continuedev/headless/headless_ide.py (renamed from continuedev/src/continuedev/headless/headless_ide.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/__init__.py (renamed from continuedev/__init__.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/chroma/.gitignore (renamed from continuedev/src/continuedev/libs/chroma/.gitignore) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/chroma/query.py (renamed from continuedev/src/continuedev/libs/chroma/query.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/chroma/update.py (renamed from continuedev/src/continuedev/libs/chroma/update.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/constants/default_config.py (renamed from continuedev/src/continuedev/libs/constants/default_config.py) | 30 | ||||
-rw-r--r-- | server/continuedev/libs/constants/main.py (renamed from continuedev/src/continuedev/libs/constants/main.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/llm/__init__.py | 14 | ||||
-rw-r--r-- | server/continuedev/libs/llm/anthropic.py (renamed from continuedev/src/continuedev/libs/llm/anthropic.py) | 4 | ||||
-rw-r--r-- | server/continuedev/libs/llm/base.py (renamed from continuedev/src/continuedev/libs/llm/__init__.py) | 107 | ||||
-rw-r--r-- | server/continuedev/libs/llm/ggml.py (renamed from continuedev/src/continuedev/libs/llm/ggml.py) | 60 | ||||
-rw-r--r-- | server/continuedev/libs/llm/google_palm_api.py (renamed from continuedev/src/continuedev/libs/llm/google_palm_api.py) | 6 | ||||
-rw-r--r-- | server/continuedev/libs/llm/hf_inference_api.py (renamed from continuedev/src/continuedev/libs/llm/hf_inference_api.py) | 6 | ||||
-rw-r--r-- | server/continuedev/libs/llm/hf_tgi.py (renamed from continuedev/src/continuedev/libs/llm/hf_tgi.py) | 2 | ||||
-rw-r--r-- | server/continuedev/libs/llm/hugging_face.py (renamed from continuedev/src/continuedev/libs/llm/hugging_face.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/llm/llamacpp.py (renamed from continuedev/src/continuedev/libs/llm/llamacpp.py) | 4 | ||||
-rw-r--r-- | server/continuedev/libs/llm/ollama.py (renamed from continuedev/src/continuedev/libs/llm/ollama.py) | 27 | ||||
-rw-r--r-- | server/continuedev/libs/llm/openai.py (renamed from continuedev/src/continuedev/libs/llm/openai.py) | 11 | ||||
-rw-r--r-- | server/continuedev/libs/llm/openai_free_trial.py (renamed from continuedev/src/continuedev/libs/llm/openai_free_trial.py) | 2 | ||||
-rw-r--r-- | server/continuedev/libs/llm/prompt_utils.py (renamed from continuedev/src/continuedev/libs/llm/prompt_utils.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/llm/prompts/chat.py | 174 | ||||
-rw-r--r-- | server/continuedev/libs/llm/prompts/edit.py (renamed from continuedev/src/continuedev/libs/llm/prompts/edit.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/llm/proxy_server.py (renamed from continuedev/src/continuedev/libs/llm/proxy_server.py) | 2 | ||||
-rw-r--r-- | server/continuedev/libs/llm/queued.py (renamed from continuedev/src/continuedev/libs/llm/queued.py) | 4 | ||||
-rw-r--r-- | server/continuedev/libs/llm/replicate.py (renamed from continuedev/src/continuedev/libs/llm/replicate.py) | 6 | ||||
-rw-r--r-- | server/continuedev/libs/llm/text_gen_interface.py (renamed from continuedev/src/continuedev/libs/llm/text_gen_interface.py) | 4 | ||||
-rw-r--r-- | server/continuedev/libs/llm/together.py (renamed from continuedev/src/continuedev/libs/llm/together.py) | 6 | ||||
-rw-r--r-- | server/continuedev/libs/util/calculate_diff.py (renamed from continuedev/src/continuedev/libs/util/calculate_diff.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/commonregex.py (renamed from continuedev/src/continuedev/libs/util/commonregex.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/copy_codebase.py (renamed from continuedev/src/continuedev/libs/util/copy_codebase.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/count_tokens.py (renamed from continuedev/src/continuedev/libs/util/count_tokens.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/create_async_task.py (renamed from continuedev/src/continuedev/libs/util/create_async_task.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/devdata.py (renamed from continuedev/src/continuedev/libs/util/devdata.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/edit_config.py (renamed from continuedev/src/continuedev/libs/util/edit_config.py) | 21 | ||||
-rw-r--r-- | server/continuedev/libs/util/errors.py (renamed from continuedev/src/continuedev/libs/util/errors.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/filter_files.py (renamed from continuedev/src/continuedev/libs/util/filter_files.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/logging.py (renamed from continuedev/src/continuedev/libs/util/logging.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/map_path.py (renamed from continuedev/src/continuedev/libs/util/map_path.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/paths.py (renamed from continuedev/src/continuedev/libs/util/paths.py) | 2 | ||||
-rw-r--r-- | server/continuedev/libs/util/queue.py (renamed from continuedev/src/continuedev/libs/util/queue.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/ripgrep.py (renamed from continuedev/src/continuedev/libs/util/ripgrep.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/step_name_to_steps.py (renamed from continuedev/src/continuedev/libs/util/step_name_to_steps.py) | 2 | ||||
-rw-r--r-- | server/continuedev/libs/util/strings.py (renamed from continuedev/src/continuedev/libs/util/strings.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/telemetry.py (renamed from continuedev/src/continuedev/libs/util/telemetry.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/templating.py (renamed from continuedev/src/continuedev/libs/util/templating.py) | 0 | ||||
-rw-r--r-- | server/continuedev/libs/util/traceback/traceback_parsers.py (renamed from continuedev/src/continuedev/libs/util/traceback/traceback_parsers.py) | 0 | ||||
-rw-r--r-- | server/continuedev/models/__init__.py (renamed from continuedev/src/__init__.py) | 0 | ||||
-rw-r--r-- | server/continuedev/models/filesystem.py (renamed from continuedev/src/continuedev/models/filesystem.py) | 0 | ||||
-rw-r--r-- | server/continuedev/models/filesystem_edit.py (renamed from continuedev/src/continuedev/models/filesystem_edit.py) | 0 | ||||
-rw-r--r-- | server/continuedev/models/generate_json_schema.py (renamed from continuedev/src/continuedev/models/generate_json_schema.py) | 2 | ||||
-rw-r--r-- | server/continuedev/models/main.py (renamed from continuedev/src/continuedev/models/main.py) | 0 | ||||
-rw-r--r-- | server/continuedev/models/reference/generate.py (renamed from continuedev/src/continuedev/models/reference/generate.py) | 12 | ||||
-rw-r--r-- | server/continuedev/plugins/context_providers/__init__.py | 7 | ||||
-rw-r--r-- | server/continuedev/plugins/context_providers/diff.py (renamed from continuedev/src/continuedev/plugins/context_providers/diff.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/context_providers/dynamic.py (renamed from continuedev/src/continuedev/plugins/context_providers/dynamic.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/context_providers/embeddings.py (renamed from continuedev/src/continuedev/plugins/context_providers/embeddings.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/context_providers/file.py (renamed from continuedev/src/continuedev/plugins/context_providers/file.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/context_providers/filetree.py (renamed from continuedev/src/continuedev/plugins/context_providers/filetree.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/context_providers/github.py (renamed from continuedev/src/continuedev/plugins/context_providers/github.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/context_providers/google.py (renamed from continuedev/src/continuedev/plugins/context_providers/google.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/context_providers/highlighted_code.py (renamed from continuedev/src/continuedev/plugins/context_providers/highlighted_code.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/context_providers/search.py (renamed from continuedev/src/continuedev/plugins/context_providers/search.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/context_providers/terminal.py (renamed from continuedev/src/continuedev/plugins/context_providers/terminal.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/context_providers/url.py (renamed from continuedev/src/continuedev/plugins/context_providers/url.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/context_providers/util.py (renamed from continuedev/src/continuedev/plugins/context_providers/util.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/policies/commit.py (renamed from continuedev/src/continuedev/plugins/policies/commit.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/policies/default.py (renamed from continuedev/src/continuedev/plugins/policies/default.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/policies/headless.py (renamed from continuedev/src/continuedev/plugins/policies/headless.py) | 2 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/AddTransformRecipe/README.md (renamed from continuedev/src/continuedev/plugins/recipes/AddTransformRecipe/README.md) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/AddTransformRecipe/dlt_transform_docs.md (renamed from continuedev/src/continuedev/plugins/recipes/AddTransformRecipe/dlt_transform_docs.md) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/AddTransformRecipe/main.py (renamed from continuedev/src/continuedev/plugins/recipes/AddTransformRecipe/main.py) | 2 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/AddTransformRecipe/steps.py (renamed from continuedev/src/continuedev/plugins/recipes/AddTransformRecipe/steps.py) | 2 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/ContinueRecipeRecipe/README.md (renamed from continuedev/src/continuedev/plugins/recipes/ContinueRecipeRecipe/README.md) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/ContinueRecipeRecipe/main.py (renamed from continuedev/src/continuedev/plugins/recipes/ContinueRecipeRecipe/main.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/CreatePipelineRecipe/README.md (renamed from continuedev/src/continuedev/plugins/recipes/CreatePipelineRecipe/README.md) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/CreatePipelineRecipe/main.py (renamed from continuedev/src/continuedev/plugins/recipes/CreatePipelineRecipe/main.py) | 2 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/CreatePipelineRecipe/steps.py (renamed from continuedev/src/continuedev/plugins/recipes/CreatePipelineRecipe/steps.py) | 2 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/DDtoBQRecipe/README.md (renamed from continuedev/src/continuedev/plugins/recipes/DDtoBQRecipe/README.md) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/DDtoBQRecipe/dlt_duckdb_to_bigquery_docs.md (renamed from continuedev/src/continuedev/plugins/recipes/DDtoBQRecipe/dlt_duckdb_to_bigquery_docs.md) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/DDtoBQRecipe/main.py (renamed from continuedev/src/continuedev/plugins/recipes/DDtoBQRecipe/main.py) | 2 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/DDtoBQRecipe/steps.py (renamed from continuedev/src/continuedev/plugins/recipes/DDtoBQRecipe/steps.py) | 2 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/README.md (renamed from continuedev/src/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/README.md) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/main.py (renamed from continuedev/src/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/main.py) | 2 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/steps.py (renamed from continuedev/src/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/steps.py) | 2 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/README.md (renamed from continuedev/src/continuedev/plugins/recipes/README.md) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/TemplateRecipe/README.md (renamed from continuedev/src/continuedev/plugins/recipes/TemplateRecipe/README.md) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/TemplateRecipe/main.py (renamed from continuedev/src/continuedev/plugins/recipes/TemplateRecipe/main.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/WritePytestsRecipe/README.md (renamed from continuedev/src/continuedev/plugins/recipes/WritePytestsRecipe/README.md) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/recipes/WritePytestsRecipe/main.py (renamed from continuedev/src/continuedev/plugins/recipes/WritePytestsRecipe/main.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/README.md (renamed from continuedev/src/continuedev/plugins/steps/README.md) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/__init__.py | 13 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/chat.py (renamed from continuedev/src/continuedev/plugins/steps/chat.py) | 2 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/chroma.py (renamed from continuedev/src/continuedev/plugins/steps/chroma.py) | 2 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/clear_history.py (renamed from continuedev/src/continuedev/plugins/steps/clear_history.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/cmd.py (renamed from continuedev/src/continuedev/plugins/steps/cmd.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/comment_code.py (renamed from continuedev/src/continuedev/plugins/steps/comment_code.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/custom_command.py (renamed from continuedev/src/continuedev/plugins/steps/custom_command.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/draft/abstract_method.py (renamed from continuedev/src/continuedev/plugins/steps/draft/abstract_method.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/draft/redux.py (renamed from continuedev/src/continuedev/plugins/steps/draft/redux.py) | 2 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/draft/typeorm.py (renamed from continuedev/src/continuedev/plugins/steps/draft/typeorm.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/feedback.py (renamed from continuedev/src/continuedev/plugins/steps/feedback.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/find_and_replace.py (renamed from continuedev/src/continuedev/plugins/steps/find_and_replace.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/help.py (renamed from continuedev/src/continuedev/plugins/steps/help.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/input/nl_multiselect.py (renamed from continuedev/src/continuedev/plugins/steps/input/nl_multiselect.py) | 2 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/main.py (renamed from continuedev/src/continuedev/plugins/steps/main.py) | 4 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/on_traceback.py (renamed from continuedev/src/continuedev/plugins/steps/on_traceback.py) | 2 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/open_config.py | 17 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/react.py (renamed from continuedev/src/continuedev/plugins/steps/react.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/refactor.py (renamed from continuedev/src/continuedev/plugins/steps/refactor.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/search_directory.py (renamed from continuedev/src/continuedev/plugins/steps/search_directory.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/setup_model.py (renamed from continuedev/src/continuedev/plugins/steps/setup_model.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/share_session.py (renamed from continuedev/src/continuedev/plugins/steps/share_session.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/steps_on_startup.py (renamed from continuedev/src/continuedev/plugins/steps/steps_on_startup.py) | 0 | ||||
-rw-r--r-- | server/continuedev/plugins/steps/welcome.py (renamed from continuedev/src/continuedev/plugins/steps/welcome.py) | 0 | ||||
-rw-r--r-- | server/continuedev/server/gui.py (renamed from continuedev/src/continuedev/server/gui.py) | 24 | ||||
-rw-r--r-- | server/continuedev/server/ide.py (renamed from continuedev/src/continuedev/server/ide.py) | 9 | ||||
-rw-r--r-- | server/continuedev/server/ide_protocol.py (renamed from continuedev/src/continuedev/server/ide_protocol.py) | 0 | ||||
-rw-r--r-- | server/continuedev/server/main.py (renamed from continuedev/src/continuedev/server/main.py) | 0 | ||||
-rw-r--r-- | server/continuedev/server/meilisearch_server.py (renamed from continuedev/src/continuedev/server/meilisearch_server.py) | 3 | ||||
-rw-r--r-- | server/continuedev/server/session_manager.py (renamed from continuedev/src/continuedev/server/session_manager.py) | 0 | ||||
-rw-r--r-- | server/dev_requirements.txt (renamed from continuedev/dev_requirements.txt) | 0 | ||||
-rwxr-xr-x | server/install-dependencies.sh (renamed from continuedev/install-dependencies.sh) | 0 | ||||
-rw-r--r-- | server/main.py | 5 | ||||
-rw-r--r-- | server/notes.md (renamed from continuedev/notes.md) | 0 | ||||
-rw-r--r-- | server/poetry.lock (renamed from continuedev/poetry.lock) | 0 | ||||
-rw-r--r-- | server/poetry.toml (renamed from continuedev/poetry.toml) | 0 | ||||
-rw-r--r-- | server/pyproject.toml (renamed from continuedev/pyproject.toml) | 2 | ||||
-rw-r--r-- | server/requirements.txt (renamed from continuedev/requirements.txt) | 0 | ||||
-rw-r--r-- | server/tests/__init__.py (renamed from continuedev/src/continuedev/libs/__init__.py) | 0 | ||||
-rw-r--r-- | server/tests/llm_test.py (renamed from continuedev/src/continuedev/tests/llm_test.py) | 7 | ||||
-rw-r--r-- | server/tests/step_test.py (renamed from continuedev/src/continuedev/tests/step_test.py) | 5 | ||||
-rw-r--r-- | server/tests/util/__init__.py (renamed from continuedev/src/continuedev/models/__init__.py) | 0 | ||||
-rw-r--r-- | server/tests/util/config.py (renamed from continuedev/src/continuedev/tests/util/config.py) | 6 | ||||
-rw-r--r-- | server/tests/util/openai_mock.py (renamed from continuedev/src/continuedev/tests/util/openai_mock.py) | 0 | ||||
-rw-r--r-- | server/tests/util/prompts.py (renamed from continuedev/src/continuedev/tests/util/prompts.py) | 0 |
233 files changed, 2102 insertions, 1003 deletions
diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 00000000..fda0858d --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,19 @@ +# Deployment Process + +## preview +When merging to `preview`: +- on each platform, a PyInstaller binary is built and placed in the extension directory. Then the extension is built and tested and uploaded as an artifact. +- all of the artifacts are downloaded (and the Apple Silicon downloaded from S3 bucket) and pushed to the store/registry all at once, as pre-releases. +- the version is bumped and this change is commited to preview + +# main +When merging to `main`: + +> Make sure to sh build.sh m1 and push to the S3 bucket before merging to main, so that the newest Apple Silicon binary is available to package with the extension. + +- the continuedev package is built and uploaded to PyPI. Then the version is bumped and this change is commited to main. +- on each platform, a PyInstaller binary is built and placed in the extension directory. Then the extension is built and tested and uploaded as an artifact. The PyInstaller binary is also uploaded as an artifact. +- all of the artifacts are downloaded (and the Apple Silicon downloaded from S3 bucket) and pushed to the store/registry all at once, as full releases. +- the version is bumped and this change is commited to main +- at the end, all of the PyInstaller binaries are uploaded to the S3 bucket because they are needed for JetBrains still. +- in the future, the Intellij extension will be built and uploaded to the marketplace here
\ No newline at end of file diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 734d9374..94944615 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -6,58 +6,50 @@ on: - main jobs: - # pypi-deployment: - # runs-on: ubuntu-20.04 - # permissions: - # contents: write - # steps: - # - name: Check out code - # uses: actions/checkout@v2 - - # - name: Set up Python - # uses: actions/setup-python@v4 - # with: - # python-version: "3.10.8" - - # - name: Install dependencies - # run: | - # python -m pip install --upgrade pip - # pip install poetry - - # - name: Install project dependencies - # run: cd continuedev && poetry install - - # - name: Configure Poetry Token - # run: cd continuedev && poetry config pypi-token.pypi ${{ secrets.PYPI_API_TOKEN }} - - # - name: Bump the version - # run: cd continuedev && poetry version patch - - # - name: Build and publish the package - # run: cd continuedev && poetry publish --build - - # - name: Commit changes - # run: | - # git config --local user.email "action@github.com" - # git config --local user.name "GitHub Action" - # git commit -am "ci: 🏷 Update PyPI version [skip ci]" - - # - name: Push changes - # uses: ad-m/github-push-action@master - # with: - # github_token: ${{ secrets.GITHUB_TOKEN }} - # branch: ${{ github.ref }} - - pyinstaller: + build: strategy: matrix: - os: [windows-latest, ubuntu-20.04, macos-latest] - + include: + - os: windows-latest + platform: win32 + arch: x64 + npm_config_arch: x64 + - os: windows-latest + platform: win32 + arch: ia32 + npm_config_arch: ia32 + - os: windows-latest + platform: win32 + arch: arm64 + npm_config_arch: arm + - os: ubuntu-20.04 + platform: linux + arch: x64 + npm_config_arch: x64 + - os: ubuntu-20.04 + platform: linux + arch: arm64 + npm_config_arch: arm64 + - os: ubuntu-20.04 + platform: linux + arch: armhf + npm_config_arch: arm + - os: ubuntu-20.04 + platform: alpine + arch: x64 + npm_config_arch: x64 + - os: macos-latest + platform: darwin + arch: x64 + npm_config_arch: x64 + - os: macos-latest + platform: darwin + arch: arm64 + npm_config_arch: arm64 runs-on: ${{ matrix.os }} - steps: - # Install Python requirements and build+upload binaries for each platform + # 1. Build the Pyinstaller binary - name: Check-out repository uses: actions/checkout@v3 @@ -72,77 +64,50 @@ jobs: - name: Install Dependencies run: | - pip install -r continuedev/requirements.txt + pip install -r server/requirements.txt - name: Build PyInstaller Executable run: pyinstaller run.spec - - name: Set permissions + # 1.5 Place the binary in extension/exe directory + - name: Make sure extension/exe directory exists run: | - chmod 777 dist/run - - # - name: Test Python Server - # env: - # OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - # ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} - # TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }} - # run: | - # cd continuedev - # pip install -r dev_requirements.txt - # cd src - # python -m pytest - - - name: Upload Artifacts - uses: actions/upload-artifact@v3 - with: - name: ${{ runner.os }}Build - path: dist/* - - test-and-package: - needs: pyinstaller - strategy: - matrix: - os: [macos-latest, ubuntu-20.04, windows-latest] - - runs-on: ${{ matrix.os }} + mkdir -p extension/exe - steps: - - name: Checkout - uses: actions/checkout@v2 - - # Download corresponding binary artifact for the platform - - - name: Create server directory - run: mkdir -p ~/.continue/server/exe - - - name: Download Linux build - uses: actions/download-artifact@v2 - with: - name: LinuxBuild - path: ~/.continue/server/exe - if: matrix.os == 'ubuntu-20.04' - - - name: Download macOS build - uses: actions/download-artifact@v2 - with: - name: macOSBuild - path: ~/.continue/server/exe - if: matrix.os == 'macos-latest' + - name: Copy binary to extension (non-Windows) + run: | + cp dist/run extension/exe/run + if: matrix.os != 'windows-latest' - - name: Download Windows build - uses: actions/download-artifact@v2 - with: - name: WindowsBuild - path: ~/.continue/server/exe + - name: Copy binary to extension (Windows) + run: | + cp dist/run.exe extension/exe/run.exe if: matrix.os == 'windows-latest' + # 1.8 Set permissions and upload binary - name: Set permissions - shell: bash run: | - chmod -R 777 ~/.continue/server/exe + chmod 777 extension/exe/run - # Setup Node.js and install dependencies + - uses: actions/upload-artifact@v2 + if: matrix.os == 'macos-latest' && matrix.arch == 'x64' + with: + name: macOSBinary + path: extension/exe/run + + - uses: actions/upload-artifact@v2 + if: matrix.os == 'ubuntu-20.04' && matrix.arch == 'x64' + with: + name: LinuxBinary + path: extension/exe/run + + - uses: actions/upload-artifact@v2 + if: matrix.os == 'windows-latest' && matrix.arch == 'x64' + with: + name: WindowsBinary + path: extension/exe/run.exe + # 2. Install npm dependencies - name: Use Node.js 19.0.0 uses: actions/setup-node@v3 with: @@ -165,13 +130,17 @@ jobs: cd extension npm ci + - name: Add a copy of continuedev to the extension + run: | + cd extension + cp -r ../server/continuedev continuedev + - name: Install react-app Dependencies run: | cd extension/react-app npm ci --legacy-peer-deps - # Run tests - + # 3. Run tests for the extension - name: Prepare the extension run: | cd extension @@ -192,19 +161,31 @@ jobs: npm run test if: matrix.os != 'ubuntu-20.04' - # Create and upload .vsix artifact - - name: Package the extension + # 3.5 If on Apple Silicon, download the binary from S3 bucket + - name: Remove existing binary + if: matrix.os == 'macos-latest' && matrix.arch == 'arm64' + run: rm extension/exe/run + + - name: Download Apple Silicon Binary + if: matrix.os == 'macos-latest' && matrix.arch == 'arm64' + run: curl -o extension/exe/run https://continue-server-binaries.s3.us-west-1.amazonaws.com/apple-silicon/run + + - name: Set permissions run: | - cd extension - npm run package + chmod -R 777 extension/exe/run - - name: Upload .vsix as an artifact - uses: actions/upload-artifact@v2 + # 4. Package the extension + - shell: pwsh + run: echo "target=${{ matrix.platform }}-${{ matrix.arch }}" >> $env:GITHUB_ENV + - run: cd extension && npx vsce package --target ${{ env.target }} + + # 5. Upload the .vsix as an artifact + - uses: actions/upload-artifact@v2 with: - name: vsix-artifact - path: extension/build/* - if: matrix.os == 'ubuntu-20.04' + name: ${{ env.target }} + path: "extension/*.vsix" + # 6. Upload continue.log as an artifact for debugging of the workflow - name: Upload continue.log uses: actions/upload-artifact@v2 with: @@ -213,14 +194,13 @@ jobs: if: always() publish: - needs: test-and-package runs-on: ubuntu-20.04 + needs: build permissions: contents: write - steps: - # Checkout and download .vsix artifact + # 0. Setup git - name: Checkout uses: actions/checkout@v2 @@ -232,40 +212,23 @@ jobs: - name: Pull latest changes run: git pull origin main - - name: Download .vsix artifact - uses: actions/download-artifact@v2 - with: - name: vsix-artifact - path: extension/build - - # Publish the extension and commit/push the version change + # 1. Download the artifacts + - uses: actions/download-artifact@v3 - - name: Use Node.js 19.0.0 - uses: actions/setup-node@v3 - with: - node-version: 19.0.0 - - - name: Cache extension node_modules - uses: actions/cache@v2 - with: - path: extension/node_modules - key: ${{ runner.os }}-node-${{ hashFiles('extension/package-lock.json') }} - - - name: Install extension Dependencies - run: | + # 2. Publish the extension to VS Code Marketplace + - run: | cd extension - npm ci + npx vsce publish --packagePath ../alpine-x64/*.vsix ../darwin-arm64/*.vsix ../darwin-x64/*.vsix ../linux-arm64/*.vsix ../linux-armhf/*.vsix ../linux-x64/*.vsix ../win32-ia32/*.vsix ../win32-x64/*.vsix ../win32-arm64/*.vsix + env: + VSCE_PAT: ${{ secrets.VSCE_TOKEN }} + # 3. Publish the extension to Open VSX Registry - name: Publish (Open VSX Registry) run: | cd extension - npx ovsx publish -p ${{ secrets.VSX_REGISTRY_TOKEN }} --packagePath ./build/*.vsix - - - name: Publish - run: | - cd extension - npx vsce publish -p ${{ secrets.VSCE_TOKEN }} --packagePath ./build/*.vsix + npx ovsx publish -p ${{ secrets.VSX_REGISTRY_TOKEN }} --packagePath ../alpine-x64/*.vsix ../darwin-arm64/*.vsix ../darwin-x64/*.vsix ../linux-arm64/*.vsix ../linux-armhf/*.vsix ../linux-x64/*.vsix ../win32-ia32/*.vsix ../win32-x64/*.vsix ../win32-arm64/*.vsix + # 4. Update the package.json version and push changes - name: Update version in package.json run: | cd extension @@ -283,24 +246,23 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} branch: ${{ github.ref }} - # Download binaries and upload to S3 - + # 5. Download binaries and upload to S3 - name: Download Linux build uses: actions/download-artifact@v2 with: - name: LinuxBuild + name: LinuxBinary path: exe/linux - name: Download macOS build uses: actions/download-artifact@v2 with: - name: macOSBuild + name: macOSBinary path: exe/mac - name: Download Windows build uses: actions/download-artifact@v2 with: - name: WindowsBuild + name: WindowsBinary path: exe/windows - name: Configure AWS Credentials @@ -331,3 +293,4 @@ jobs: AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_REGION: "us-west-1" SOURCE_DIR: "exe" + diff --git a/.github/workflows/preview.yaml b/.github/workflows/preview.yaml new file mode 100644 index 00000000..796e65f2 --- /dev/null +++ b/.github/workflows/preview.yaml @@ -0,0 +1,245 @@ +name: Publish Preview Extension + +on: + push: + branches: + - preview + +jobs: + build: + strategy: + matrix: + include: + - os: windows-latest + platform: win32 + arch: x64 + npm_config_arch: x64 + - os: windows-latest + platform: win32 + arch: ia32 + npm_config_arch: ia32 + - os: windows-latest + platform: win32 + arch: arm64 + npm_config_arch: arm + - os: ubuntu-20.04 + platform: linux + arch: x64 + npm_config_arch: x64 + - os: ubuntu-20.04 + platform: linux + arch: arm64 + npm_config_arch: arm64 + - os: ubuntu-20.04 + platform: linux + arch: armhf + npm_config_arch: arm + - os: ubuntu-20.04 + platform: alpine + arch: x64 + npm_config_arch: x64 + - os: macos-latest + platform: darwin + arch: x64 + npm_config_arch: x64 + - os: macos-latest + platform: darwin + arch: arm64 + npm_config_arch: arm64 + runs-on: ${{ matrix.os }} + steps: + # 1. Build the Pyinstaller binary + - name: Check-out repository + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.10.8" + + - name: Install Pyinstaller + run: | + pip install pyinstaller + + - name: Install Dependencies + run: | + pip install -r server/requirements.txt + + - name: Build PyInstaller Executable + run: pyinstaller run.spec + + # 1.5 Place the binary in extension/exe directory + - name: Make sure extension/exe directory exists + run: | + mkdir -p extension/exe + + - name: Copy binary to extension (non-Windows) + run: | + cp dist/run extension/exe/run + if: matrix.os != 'windows-latest' + + - name: Copy binary to extension (Windows) + run: | + cp dist/run.exe extension/exe/run.exe + if: matrix.os == 'windows-latest' + + # 1.8 Set permissions and upload binary + - name: Set permissions + run: | + chmod 777 extension/exe/run + + - uses: actions/upload-artifact@v2 + if: matrix.os == 'macos-latest' && matrix.arch == 'x64' + with: + name: macOSBinary + path: extension/exe/run + + - uses: actions/upload-artifact@v2 + if: matrix.os == 'ubuntu-20.04' && matrix.arch == 'x64' + with: + name: LinuxBinary + path: extension/exe/run + + - uses: actions/upload-artifact@v2 + if: matrix.os == 'windows-latest' && matrix.arch == 'x64' + with: + name: WindowsBinary + path: extension/exe/run.exe + + # 2. Install npm dependencies + - name: Use Node.js 19.0.0 + uses: actions/setup-node@v3 + with: + node-version: 19.0.0 + + - name: Cache extension node_modules + uses: actions/cache@v2 + with: + path: extension/node_modules + key: ${{ runner.os }}-node-${{ hashFiles('extension/package-lock.json') }} + + - name: Cache react-app node_modules + uses: actions/cache@v2 + with: + path: extension/react-app/node_modules + key: ${{ runner.os }}-node-${{ hashFiles('extension/react-app/package-lock.json') }} + + - name: Install extension Dependencies + run: | + cd extension + npm ci + + - name: Add a copy of continuedev to the extension + run: | + cd extension + cp -r ../server/continuedev continuedev + + - name: Install react-app Dependencies + run: | + cd extension/react-app + npm ci --legacy-peer-deps + + # 3. Run tests for the extension + - name: Prepare the extension + run: | + cd extension + npm run prepackage + + - name: Install Xvfb for Linux and run tests + run: | + sudo apt-get install -y xvfb # Install Xvfb + Xvfb :99 & # Start Xvfb + export DISPLAY=:99 # Export the display number to the environment + cd extension + npm run test + if: matrix.os == 'ubuntu-20.04' + + - name: Run extension tests + run: | + cd extension + npm run test + if: matrix.os != 'ubuntu-20.04' + + # 3.5 If on Apple Silicon, download the binary from S3 bucket + - name: Remove existing binary + if: matrix.os == 'macos-latest' && matrix.arch == 'arm64' + run: rm extension/exe/run + + - name: Download Apple Silicon Binary + if: matrix.os == 'macos-latest' && matrix.arch == 'arm64' + run: curl -o extension/exe/run https://continue-server-binaries.s3.us-west-1.amazonaws.com/apple-silicon/run + + - name: Set permissions + run: | + chmod -R 777 extension/exe/run + + # 4. Package the extension + - shell: pwsh + run: echo "target=${{ matrix.platform }}-${{ matrix.arch }}" >> $env:GITHUB_ENV + - run: cd extension && npx vsce package --pre-release --target ${{ env.target }} + + # 5. Upload the .vsix as an artifact + - uses: actions/upload-artifact@v2 + with: + name: ${{ env.target }} + path: "extension/*.vsix" + + # 6. Upload continue.log as an artifact for debugging of the workflow + - name: Upload continue.log + uses: actions/upload-artifact@v2 + with: + name: continue-log + path: /home/runner/.continue/continue.log + if: always() + + publish: + runs-on: ubuntu-20.04 + needs: build + permissions: + contents: write + steps: + # 0. Setup git + - name: Checkout + uses: actions/checkout@v2 + + - name: Set up Git + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + + - name: Pull latest changes + run: git pull origin preview + + # 1. Download the artifacts + - uses: actions/download-artifact@v3 + + # 2. Publish the extension to VS Code Marketplace + - run: | + cd extension + npx vsce publish --pre-release --packagePath ../alpine-x64/*.vsix ../darwin-arm64/*.vsix ../darwin-x64/*.vsix ../linux-arm64/*.vsix ../linux-armhf/*.vsix ../linux-x64/*.vsix ../win32-ia32/*.vsix ../win32-x64/*.vsix ../win32-arm64/*.vsix + env: + VSCE_PAT: ${{ secrets.VSCE_TOKEN }} + + # 3. Publish the extension to Open VSX Registry + - name: Publish (Open VSX Registry) + run: | + cd extension + npx ovsx publish --pre-release -p ${{ secrets.VSX_REGISTRY_TOKEN }} --packagePath ../alpine-x64/*.vsix ../darwin-arm64/*.vsix ../darwin-x64/*.vsix ../linux-arm64/*.vsix ../linux-armhf/*.vsix ../linux-x64/*.vsix ../win32-ia32/*.vsix ../win32-x64/*.vsix ../win32-arm64/*.vsix + + # 4. Update the package.json version and push changes + - name: Update version in package.json + run: | + cd extension + npm version patch + + - name: Commit changes + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git commit -am "ci: 💚 Update package.json version [skip ci]" + + - name: Push changes + uses: ad-m/github-push-action@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: ${{ github.ref }}
\ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json index 90878cc0..36ecd458 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -20,7 +20,7 @@ "name": "Pytest", "type": "python", "request": "launch", - "program": "continuedev/src/continuedev/tests/llm_test.py", + "program": "server/tests/llm_test.py", "console": "integratedTerminal", "justMyCode": true, "subProcess": true @@ -29,21 +29,22 @@ "name": "Reference", "type": "python", "request": "launch", - "module": "continuedev.src.continuedev.models.reference.generate", + "module": "server.continuedev.models.reference.generate", "justMyCode": true }, { "name": "Server", "type": "python", "request": "launch", - "module": "continuedev.src.continuedev.__main__", + "module": "server.continuedev.__main__", "args": [ "--port", "8001" // "--meilisearch-url", "http://localhost:7701" ], "justMyCode": false, - "subProcess": false + "subProcess": false, + "timeout": 10000 // Does it need a build task? // What about a watch task? - type errors? }, @@ -51,8 +52,8 @@ "name": "Headless", "type": "python", "request": "launch", - "module": "continuedev.headless", - "args": ["--config", "continuedev/config.py"], + "module": "server.continuedev.headless", + "args": ["--config", "server/continuedev/config.py"], "justMyCode": false, "subProcess": false }, diff --git a/.vscode/settings.json b/.vscode/settings.json index 856240e5..055c3d1a 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,3 +1,3 @@ { - "python.defaultInterpreterPath": "${workspaceFolder}/continuedev/.venv/bin/python3" + "python.defaultInterpreterPath": "${workspaceFolder}/server/.venv/bin/python3" } diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 73eda76c..00000000 --- a/Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -# A Dockerfile for quickly building and testing the pyinstaller binary - -FROM python:3.9 - -# Set the working directory to /app - -WORKDIR /app - -# Copy the current directory contents into the container at /app - -COPY . /app - -# Install any needed packages specified in requirements.txt -RUN pip install pyinstaller - -RUN pip install -r /app/continuedev/requirements.txt - -RUN pyinstaller run.spec - -RUN chmod +x /app/dist/run - -CMD ["/app/dist/run"] @@ -4,14 +4,14 @@ REM 1. Remove unwanted stuff rmdir /s /q build rmdir /s /q env rmdir /s /q dist -rmdir /s /q continuedev\.venv +rmdir /s /q server\.venv REM 2. Create a new virtual environment and activate it python -m venv env call env\Scripts\activate REM 3. Install the required packages -pip install -r continuedev\requirements.txt +pip install -r server\requirements.txt pip install pyinstaller @@ -4,14 +4,14 @@ rm -rf build rm -rf env rm -rf dist -rm -rf continuedev/.venv +rm -rf server/.venv # 2. Create a new virtual environment and activate it python3 -m venv env . env/bin/activate # 3. Install the required packages -pip install -r continuedev/requirements.txt +pip install -r server/requirements.txt pip install pyinstaller # 4. Detect M1 architecture or allow manual override diff --git a/continuedev/.gitignore b/continuedev/.gitignore deleted file mode 100644 index 7bb65a6a..00000000 --- a/continuedev/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -notes.md -config.json -run.build -run.dist
\ No newline at end of file diff --git a/continuedev/main.py b/continuedev/main.py deleted file mode 100644 index 3cf4e817..00000000 --- a/continuedev/main.py +++ /dev/null @@ -1,5 +0,0 @@ -from .src.continuedev.server.main import run_server - - -def main(): - run_server() diff --git a/continuedev/src/.gitignore b/continuedev/src/.gitignore deleted file mode 100644 index 7137bb08..00000000 --- a/continuedev/src/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -main.build -main.dist -run.build -run.dist
\ No newline at end of file diff --git a/continuedev/src/continuedev/.gitignore b/continuedev/src/continuedev/.gitignore deleted file mode 100644 index 13bafb25..00000000 --- a/continuedev/src/continuedev/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.continue diff --git a/continuedev/src/continuedev/libs/llm/prompts/chat.py b/continuedev/src/continuedev/libs/llm/prompts/chat.py deleted file mode 100644 index 0bf8635b..00000000 --- a/continuedev/src/continuedev/libs/llm/prompts/chat.py +++ /dev/null @@ -1,95 +0,0 @@ -from textwrap import dedent -from typing import Dict, List - -from anthropic import AI_PROMPT, HUMAN_PROMPT - - -def anthropic_template_messages(messages: List[Dict[str, str]]) -> str: - prompt = "" - - # Anthropic prompt must start with a Human turn - if ( - len(messages) > 0 - and messages[0]["role"] != "user" - and messages[0]["role"] != "system" - ): - prompt += f"{HUMAN_PROMPT} Hello." - for msg in messages: - prompt += f"{HUMAN_PROMPT if (msg['role'] == 'user' or msg['role'] == 'system') else AI_PROMPT} {msg['content']} " - - prompt += AI_PROMPT - return prompt - - -def template_alpaca_messages(msgs: List[Dict[str, str]]) -> str: - prompt = "" - - if msgs[0]["role"] == "system": - prompt += f"{msgs[0]['content']}\n" - msgs.pop(0) - - for msg in msgs: - prompt += "### Instruction:\n" if msg["role"] == "user" else "### Response:\n" - prompt += f"{msg['content']}\n" - - prompt += "### Response:\n" - - return prompt - - -def llama2_template_messages(msgs: List[Dict[str, str]]) -> str: - if len(msgs) == 0: - return "" - - if msgs[0]["role"] == "assistant": - # These models aren't trained to handle assistant message coming first, - # and typically these are just introduction messages from Continue - msgs.pop(0) - - prompt = "" - has_system = msgs[0]["role"] == "system" - - if has_system and msgs[0]["content"].strip() == "": - has_system = False - msgs = msgs[1:] - - if has_system: - system_message = dedent( - f"""\ - <<SYS>> - {msgs[0]["content"]} - <</SYS>> - - """ - ) - if len(msgs) > 1: - prompt += f"[INST] {system_message}{msgs[1]['content']} [/INST]" - else: - prompt += f"[INST] {system_message} [/INST]" - return - - for i in range(2 if has_system else 0, len(msgs)): - if msgs[i]["role"] == "user": - prompt += f"[INST] {msgs[i]['content']} [/INST]" - else: - prompt += msgs[i]["content"] + " " - - return prompt - - -def code_llama_template_messages(msgs: List[Dict[str, str]]) -> str: - return f"[INST] {msgs[-1]['content']}\n[/INST]" - - -def extra_space_template_messages(msgs: List[Dict[str, str]]) -> str: - return f" {msgs[-1]['content']}" - - -def code_llama_python_template_messages(msgs: List[Dict[str, str]]) -> str: - return dedent( - f"""\ - [INST] - You are an expert Python programmer and personal assistant, here is your task: {msgs[-1]['content']} - Your answer should start with a [PYTHON] tag and end with a [/PYTHON] tag. - [/INST]""" - ) diff --git a/continuedev/src/continuedev/plugins/steps/__init__.py b/continuedev/src/continuedev/plugins/steps/__init__.py deleted file mode 100644 index 8b137891..00000000 --- a/continuedev/src/continuedev/plugins/steps/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/continuedev/src/continuedev/plugins/steps/open_config.py b/continuedev/src/continuedev/plugins/steps/open_config.py deleted file mode 100644 index d6283af2..00000000 --- a/continuedev/src/continuedev/plugins/steps/open_config.py +++ /dev/null @@ -1,31 +0,0 @@ -from textwrap import dedent - -from ...core.main import Step -from ...core.sdk import ContinueSDK -from ...libs.util.paths import getConfigFilePath - - -class OpenConfigStep(Step): - name: str = "Open config" - - async def describe(self, models): - return dedent( - """\ - `\"config.py\"` is now open. You can add a custom slash command in the `\"custom_commands\"` section, like in this example: - ```python - config = ContinueConfig( - ... - custom_commands=[CustomCommand( - name="test", - description="Write unit tests like I do for the highlighted code", - prompt="Write a comprehensive set of unit tests for the selected code. It should setup, run tests that check for correctness including important edge cases, and teardown. Ensure that the tests are complete and sophisticated.", - )] - ) - ``` - `name` is the command you will type. - `description` is the description displayed in the slash command menu. - `prompt` is the instruction given to the model. The overall prompt becomes "Task: {prompt}, Additional info: {user_input}". For example, if you entered "/test exactly 5 assertions", the overall prompt would become "Task: Write a comprehensive...and sophisticated, Additional info: exactly 5 assertions".""" - ) - - async def run(self, sdk: ContinueSDK): - await sdk.ide.setFileOpen(getConfigFilePath()) diff --git a/docs/docs/customization/context-providers.md b/docs/docs/customization/context-providers.md index 3147f90e..e9f5e9cf 100644 --- a/docs/docs/customization/context-providers.md +++ b/docs/docs/customization/context-providers.md @@ -22,7 +22,7 @@ Setup instructions are below for each (showing the import and config object). Yo Type '@issue' to reference the title and contents of a GitHub issue. ```python -from continuedev.src.continuedev.plugins.context_providers.github import GitHubIssuesContextProvider +from continuedev.plugins.context_providers.github import GitHubIssuesContextProvider ``` ```python @@ -37,7 +37,7 @@ GitHubIssuesContextProvider( Type '@search' to reference the results of codebase search, just like the results you would get from VS Code search. ```python -from continuedev.src.continuedev.plugins.context_providers.search import SearchContextProvider +from continuedev.plugins.context_providers.search import SearchContextProvider ``` ```python @@ -49,7 +49,7 @@ SearchContextProvider() # No arguments necessary Type '@url' to reference the contents of a URL. You can either reference preset URLs, or reference one dynamically by typing '@url https://example.com'. The text contents of the page will be fetched and used as context. ```python -from continuedev.src.continuedev.plugins.context_providers.url import URLContextProvider +from continuedev.plugins.context_providers.url import URLContextProvider ``` ```python @@ -61,7 +61,7 @@ URLContextProvider(preset_urls=["https://continue.dev/docs/customization"]) Type '@diff' to reference all of the changes you've made to your current branch. This is useful if you want to summarize what you've done or ask for a general review of your work before committing. ```python -from continuedev.src.continuedev.plugins.context_providers.diff import DiffContextProvider +from continuedev.plugins.context_providers.diff import DiffContextProvider ``` ```python @@ -73,7 +73,7 @@ DiffContextProvider() Type '@tree' to reference the contents of your current workspace. The LLM will be able to see the nested directory structure of your project. ```python -from continuedev.src.continuedev.plugins.context_providers.filetree import FileTreeContextProvider +from continuedev.plugins.context_providers.filetree import FileTreeContextProvider ``` ```python @@ -85,7 +85,7 @@ FileTreeContextProvider() Type '@google' to reference the results of a Google search. For example, type "@google python tutorial" if you want to search and discuss ways of learning Python. ```python -from continuedev.src.continuedev.plugins.context_providers.google import GoogleContextProvider +from continuedev.plugins.context_providers.google import GoogleContextProvider ``` ```python @@ -101,7 +101,7 @@ Note: You can get an API key for free at [serper.dev](https://serper.dev). Type '@terminal' to reference the contents of your IDE's terminal. ```python -from continuedev.src.continuedev.plugins.context_providers.terminal import TerminalContextProvider +from continuedev.plugins.context_providers.terminal import TerminalContextProvider ``` ```python @@ -169,7 +169,7 @@ This example is a situation where you request all of the data (issues in this ca There are other scenarios where you might want to just get information on demand, for example by typing '@url https://continue.dev/docs/context-providers' and having the ContextProvider fetch the contents of that URL dynamically. For this case, you can implement the `DynamicContextProvider` class like this: ```python -from continuedev.src.continuedev.plugins.context_providers.dynamic import DynamicContextProvider +from continuedev.plugins.context_providers.dynamic import DynamicContextProvider class ExampleDynamicProvider(DynamicProvider): title = "example" diff --git a/docs/docs/customization/models.md b/docs/docs/customization/models.md index 8004130d..5bb8a860 100644 --- a/docs/docs/customization/models.md +++ b/docs/docs/customization/models.md @@ -27,7 +27,7 @@ Open-Source Models (not local) In `config.py`, you'll find the `models` property: ```python -from continuedev.src.continuedev.core.models import Models +from continuedev.core.models import Models config = ContinueConfig( ... @@ -53,7 +53,7 @@ If by chance the provider has the exact same API interface as OpenAI, the `OpenA If you'd like to use OpenAI models but are concerned about privacy, you can use the Azure OpenAI service, which is GDPR and HIPAA compliant. After applying for access [here](https://azure.microsoft.com/en-us/products/ai-services/openai-service), you will typically hear back within only a few days. Once you have access, instantiate the model like so: ```python -from continuedev.src.continuedev.libs.llm.openai import OpenAI +from continuedev.libs.llm.openai import OpenAI config = ContinueConfig( ... @@ -100,8 +100,8 @@ def template_alpaca_messages(msgs: List[Dict[str, str]]) -> str: It can then be used like this: ```python -from continuedev.src.continuedev.libs.llm.chat import template_alpaca_messages -from continuedev.src.continuedev.libs.llm.ollama import Ollama +from continuedev.libs.llm.chat import template_alpaca_messages +from continuedev.libs.llm.ollama import Ollama ... config=ContinueConfig( ... @@ -114,7 +114,7 @@ config=ContinueConfig( ) ``` -This exact function and a few other default implementations are available in [`continuedev.src.continuedev.libs.llm.prompts.chat`](https://github.com/continuedev/continue/blob/main/continuedev/src/continuedev/libs/llm/prompts/chat.py). +This exact function and a few other default implementations are available in [`continuedev.libs.llm.prompts.chat`](https://github.com/continuedev/continue/blob/main/continuedev/src/continuedev/libs/llm/prompts/chat.py). ## Customizing the /edit Prompt @@ -139,7 +139,7 @@ Output nothing except for the code. No code block, no English explanation, no st It can then be used like this: ```python -from continuedev.src.continuedev.libs.llm.ollama import Ollama +from continuedev.libs.llm.ollama import Ollama ... config=ContinueConfig( ... @@ -154,4 +154,4 @@ config=ContinueConfig( ) ``` -A few pre-made templates are available in [`continuedev.src.continuedev.libs.llm.prompts.edit`](https://github.com/continuedev/continue/blob/main/continuedev/src/continuedev/libs/llm/prompts/edit.py). +A few pre-made templates are available in [`continuedev.libs.llm.prompts.edit`](https://github.com/continuedev/continue/blob/main/continuedev/src/continuedev/libs/llm/prompts/edit.py). diff --git a/docs/docs/reference/Models/anthropicllm.md b/docs/docs/reference/Models/anthropicllm.md index b35761f0..68f8f9d3 100644 --- a/docs/docs/reference/Models/anthropicllm.md +++ b/docs/docs/reference/Models/anthropicllm.md @@ -5,7 +5,7 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; Import the `AnthropicLLM` class and set it as the default model: ```python title="~/.continue/config.py" -from continuedev.src.continuedev.libs.llm.anthropic import AnthropicLLM +from continuedev.libs.llm.anthropic import AnthropicLLM config = ContinueConfig( ... @@ -27,14 +27,19 @@ Claude 2 is not yet publicly released. You can request early access [here](https <ClassPropertyRef name='api_key' details='{"title": "Api Key", "description": "The API key for the LLM provider.", "type": "string"}' required={true} default=""/> <ClassPropertyRef name='title' details='{"title": "Title", "description": "A title that will identify this model in the model selection dropdown", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='model' details='{"title": "Model", "description": "The name of the model to be used (e.g. gpt-4, codellama)", "default": "claude-2", "type": "string"}' required={false} default="claude-2"/> -<ClassPropertyRef name='max_tokens' details='{"title": "Max Tokens", "description": "The maximum number of tokens to generate.", "default": 1024, "type": "integer"}' required={false} default="1024"/> +<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='stop_tokens' details='{"title": "Stop Tokens", "description": "Tokens that will stop the completion.", "type": "array", "items": {"type": "string"}}' required={false} default=""/> +<ClassPropertyRef name='temperature' details='{"title": "Temperature", "description": "The temperature of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_p' details='{"title": "Top P", "description": "The top_p of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_k' details='{"title": "Top K", "description": "The top_k of the completion.", "type": "integer"}' required={false} default=""/> +<ClassPropertyRef name='presence_penalty' details='{"title": "Presence Penalty", "description": "The presence penalty Aof the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='frequency_penalty' details='{"title": "Frequency Penalty", "description": "The frequency penalty of the completion.", "type": "number"}' required={false} default=""/> <ClassPropertyRef name='timeout' details='{"title": "Timeout", "description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.", "default": 300, "type": "integer"}' required={false} default="300"/> <ClassPropertyRef name='verify_ssl' details='{"title": "Verify Ssl", "description": "Whether to verify SSL certificates for requests.", "type": "boolean"}' required={false} default=""/> <ClassPropertyRef name='ca_bundle_path' details='{"title": "Ca Bundle Path", "description": "Path to a custom CA bundle to use when making the HTTP request", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='proxy' details='{"title": "Proxy", "description": "Proxy URL to use when making the HTTP request", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='headers' details='{"title": "Headers", "description": "Headers to use when making the HTTP request", "type": "object", "additionalProperties": {"type": "string"}}' required={false} default=""/> <ClassPropertyRef name='prompt_templates' details='{"title": "Prompt Templates", "description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.", "default": {}, "type": "object"}' required={false} default="{}"/> diff --git a/docs/docs/reference/Models/ggml.md b/docs/docs/reference/Models/ggml.md index 7fa2a3fc..6f214d27 100644 --- a/docs/docs/reference/Models/ggml.md +++ b/docs/docs/reference/Models/ggml.md @@ -7,7 +7,7 @@ See our [5 minute quickstart](https://github.com/continuedev/ggml-server-example Once the model is running on localhost:8000, change `~/.continue/config.py` to look like this: ```python title="~/.continue/config.py" -from continuedev.src.continuedev.libs.llm.ggml import GGML +from continuedev.libs.llm.ggml import GGML config = ContinueConfig( ... @@ -24,20 +24,29 @@ config = ContinueConfig( ## Properties <ClassPropertyRef name='server_url' details='{"title": "Server Url", "description": "URL of the OpenAI-compatible server where the model is being served", "default": "http://localhost:8000", "type": "string"}' required={false} default="http://localhost:8000"/> +<ClassPropertyRef name='api_base' details='{"title": "Api Base", "description": "OpenAI API base URL.", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='api_type' details='{"title": "Api Type", "description": "OpenAI API type.", "enum": ["azure", "openai"], "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='api_version' details='{"title": "Api Version", "description": "OpenAI API version. For use with Azure OpenAI Service.", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='engine' details='{"title": "Engine", "description": "OpenAI engine. For use with Azure OpenAI Service.", "type": "string"}' required={false} default=""/> ### Inherited Properties <ClassPropertyRef name='title' details='{"title": "Title", "description": "A title that will identify this model in the model selection dropdown", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='model' details='{"title": "Model", "description": "The name of the model to use (optional for the GGML class)", "default": "ggml", "type": "string"}' required={false} default="ggml"/> -<ClassPropertyRef name='max_tokens' details='{"title": "Max Tokens", "description": "The maximum number of tokens to generate.", "default": 1024, "type": "integer"}' required={false} default="1024"/> +<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='stop_tokens' details='{"title": "Stop Tokens", "description": "Tokens that will stop the completion.", "type": "array", "items": {"type": "string"}}' required={false} default=""/> +<ClassPropertyRef name='temperature' details='{"title": "Temperature", "description": "The temperature of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_p' details='{"title": "Top P", "description": "The top_p of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_k' details='{"title": "Top K", "description": "The top_k of the completion.", "type": "integer"}' required={false} default=""/> +<ClassPropertyRef name='presence_penalty' details='{"title": "Presence Penalty", "description": "The presence penalty Aof the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='frequency_penalty' details='{"title": "Frequency Penalty", "description": "The frequency penalty of the completion.", "type": "number"}' required={false} default=""/> <ClassPropertyRef name='timeout' details='{"title": "Timeout", "description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.", "default": 300, "type": "integer"}' required={false} default="300"/> <ClassPropertyRef name='verify_ssl' details='{"title": "Verify Ssl", "description": "Whether to verify SSL certificates for requests.", "type": "boolean"}' required={false} default=""/> <ClassPropertyRef name='ca_bundle_path' details='{"title": "Ca Bundle Path", "description": "Path to a custom CA bundle to use when making the HTTP request", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='proxy' details='{"title": "Proxy", "description": "Proxy URL to use when making the HTTP request", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='headers' details='{"title": "Headers", "description": "Headers to use when making the HTTP request", "type": "object", "additionalProperties": {"type": "string"}}' required={false} default=""/> <ClassPropertyRef name='prompt_templates' details='{"title": "Prompt Templates", "description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.", "default": {"edit": "Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags."}, "type": "object"}' required={false} default="{'edit': 'Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags.'}"/> <ClassPropertyRef name='api_key' details='{"title": "Api Key", "description": "The API key for the LLM provider.", "type": "string"}' required={false} default=""/> diff --git a/docs/docs/reference/Models/googlepalmapi.md b/docs/docs/reference/Models/googlepalmapi.md index 4823dbd1..d9cb0cc2 100644 --- a/docs/docs/reference/Models/googlepalmapi.md +++ b/docs/docs/reference/Models/googlepalmapi.md @@ -5,8 +5,8 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; The Google PaLM API is currently in public preview, so production applications are not supported yet. However, you can [create an API key in Google MakerSuite](https://makersuite.google.com/u/2/app/apikey) and begin trying out the `chat-bison-001` model. Change `~/.continue/config.py` to look like this: ```python title="~/.continue/config.py" -from continuedev.src.continuedev.core.models import Models -from continuedev.src.continuedev.libs.llm.hf_inference_api import GooglePaLMAPI +from continuedev.core.models import Models +from continuedev.libs.llm.hf_inference_api import GooglePaLMAPI config = ContinueConfig( ... @@ -28,14 +28,19 @@ config = ContinueConfig( <ClassPropertyRef name='api_key' details='{"title": "Api Key", "description": "Google PaLM API key", "type": "string"}' required={true} default=""/> <ClassPropertyRef name='title' details='{"title": "Title", "description": "A title that will identify this model in the model selection dropdown", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='model' details='{"title": "Model", "description": "The name of the model to be used (e.g. gpt-4, codellama)", "default": "chat-bison-001", "type": "string"}' required={false} default="chat-bison-001"/> -<ClassPropertyRef name='max_tokens' details='{"title": "Max Tokens", "description": "The maximum number of tokens to generate.", "default": 1024, "type": "integer"}' required={false} default="1024"/> +<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='stop_tokens' details='{"title": "Stop Tokens", "description": "Tokens that will stop the completion.", "type": "array", "items": {"type": "string"}}' required={false} default=""/> +<ClassPropertyRef name='temperature' details='{"title": "Temperature", "description": "The temperature of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_p' details='{"title": "Top P", "description": "The top_p of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_k' details='{"title": "Top K", "description": "The top_k of the completion.", "type": "integer"}' required={false} default=""/> +<ClassPropertyRef name='presence_penalty' details='{"title": "Presence Penalty", "description": "The presence penalty Aof the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='frequency_penalty' details='{"title": "Frequency Penalty", "description": "The frequency penalty of the completion.", "type": "number"}' required={false} default=""/> <ClassPropertyRef name='timeout' details='{"title": "Timeout", "description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.", "default": 300, "type": "integer"}' required={false} default="300"/> <ClassPropertyRef name='verify_ssl' details='{"title": "Verify Ssl", "description": "Whether to verify SSL certificates for requests.", "type": "boolean"}' required={false} default=""/> <ClassPropertyRef name='ca_bundle_path' details='{"title": "Ca Bundle Path", "description": "Path to a custom CA bundle to use when making the HTTP request", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='proxy' details='{"title": "Proxy", "description": "Proxy URL to use when making the HTTP request", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='headers' details='{"title": "Headers", "description": "Headers to use when making the HTTP request", "type": "object", "additionalProperties": {"type": "string"}}' required={false} default=""/> <ClassPropertyRef name='prompt_templates' details='{"title": "Prompt Templates", "description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.", "default": {}, "type": "object"}' required={false} default="{}"/> diff --git a/docs/docs/reference/Models/huggingfaceinferenceapi.md b/docs/docs/reference/Models/huggingfaceinferenceapi.md index 9dbf23ed..fee892cc 100644 --- a/docs/docs/reference/Models/huggingfaceinferenceapi.md +++ b/docs/docs/reference/Models/huggingfaceinferenceapi.md @@ -5,8 +5,8 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; Hugging Face Inference API is a great option for newly released language models. Sign up for an account and add billing [here](https://huggingface.co/settings/billing), access the Inference Endpoints [here](https://ui.endpoints.huggingface.co), click on “New endpoint”, and fill out the form (e.g. select a model like [WizardCoder-Python-34B-V1.0](https://huggingface.co/WizardLM/WizardCoder-Python-34B-V1.0)), and then deploy your model by clicking “Create Endpoint”. Change `~/.continue/config.py` to look like this: ```python title="~/.continue/config.py" -from continuedev.src.continuedev.core.models import Models -from continuedev.src.continuedev.libs.llm.hf_inference_api import HuggingFaceInferenceAPI +from continuedev.core.models import Models +from continuedev.libs.llm.hf_inference_api import HuggingFaceInferenceAPI config = ContinueConfig( ... @@ -29,15 +29,20 @@ config = ContinueConfig( ### Inherited Properties <ClassPropertyRef name='title' details='{"title": "Title", "description": "A title that will identify this model in the model selection dropdown", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='model' details='{"title": "Model", "description": "The name of the model to use (optional for the HuggingFaceInferenceAPI class)", "default": "Hugging Face Inference API", "type": "string"}' required={false} default="Hugging Face Inference API"/> -<ClassPropertyRef name='max_tokens' details='{"title": "Max Tokens", "description": "The maximum number of tokens to generate.", "default": 1024, "type": "integer"}' required={false} default="1024"/> +<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='stop_tokens' details='{"title": "Stop Tokens", "description": "Tokens that will stop the completion.", "type": "array", "items": {"type": "string"}}' required={false} default=""/> +<ClassPropertyRef name='temperature' details='{"title": "Temperature", "description": "The temperature of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_p' details='{"title": "Top P", "description": "The top_p of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_k' details='{"title": "Top K", "description": "The top_k of the completion.", "type": "integer"}' required={false} default=""/> +<ClassPropertyRef name='presence_penalty' details='{"title": "Presence Penalty", "description": "The presence penalty Aof the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='frequency_penalty' details='{"title": "Frequency Penalty", "description": "The frequency penalty of the completion.", "type": "number"}' required={false} default=""/> <ClassPropertyRef name='timeout' details='{"title": "Timeout", "description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.", "default": 300, "type": "integer"}' required={false} default="300"/> <ClassPropertyRef name='verify_ssl' details='{"title": "Verify Ssl", "description": "Whether to verify SSL certificates for requests.", "type": "boolean"}' required={false} default=""/> <ClassPropertyRef name='ca_bundle_path' details='{"title": "Ca Bundle Path", "description": "Path to a custom CA bundle to use when making the HTTP request", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='proxy' details='{"title": "Proxy", "description": "Proxy URL to use when making the HTTP request", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='headers' details='{"title": "Headers", "description": "Headers to use when making the HTTP request", "type": "object", "additionalProperties": {"type": "string"}}' required={false} default=""/> <ClassPropertyRef name='prompt_templates' details='{"title": "Prompt Templates", "description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.", "default": {"edit": "Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags."}, "type": "object"}' required={false} default="{'edit': 'Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags.'}"/> <ClassPropertyRef name='api_key' details='{"title": "Api Key", "description": "The API key for the LLM provider.", "type": "string"}' required={false} default=""/> diff --git a/docs/docs/reference/Models/huggingfacetgi.md b/docs/docs/reference/Models/huggingfacetgi.md index 1275c13f..176b0539 100644 --- a/docs/docs/reference/Models/huggingfacetgi.md +++ b/docs/docs/reference/Models/huggingfacetgi.md @@ -14,15 +14,20 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; ### Inherited Properties <ClassPropertyRef name='title' details='{"title": "Title", "description": "A title that will identify this model in the model selection dropdown", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='model' details='{"title": "Model", "description": "The name of the model to be used (e.g. gpt-4, codellama)", "default": "huggingface-tgi", "type": "string"}' required={false} default="huggingface-tgi"/> -<ClassPropertyRef name='max_tokens' details='{"title": "Max Tokens", "description": "The maximum number of tokens to generate.", "default": 1024, "type": "integer"}' required={false} default="1024"/> +<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='stop_tokens' details='{"title": "Stop Tokens", "description": "Tokens that will stop the completion.", "type": "array", "items": {"type": "string"}}' required={false} default=""/> +<ClassPropertyRef name='temperature' details='{"title": "Temperature", "description": "The temperature of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_p' details='{"title": "Top P", "description": "The top_p of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_k' details='{"title": "Top K", "description": "The top_k of the completion.", "type": "integer"}' required={false} default=""/> +<ClassPropertyRef name='presence_penalty' details='{"title": "Presence Penalty", "description": "The presence penalty Aof the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='frequency_penalty' details='{"title": "Frequency Penalty", "description": "The frequency penalty of the completion.", "type": "number"}' required={false} default=""/> <ClassPropertyRef name='timeout' details='{"title": "Timeout", "description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.", "default": 300, "type": "integer"}' required={false} default="300"/> <ClassPropertyRef name='verify_ssl' details='{"title": "Verify Ssl", "description": "Whether to verify SSL certificates for requests.", "type": "boolean"}' required={false} default=""/> <ClassPropertyRef name='ca_bundle_path' details='{"title": "Ca Bundle Path", "description": "Path to a custom CA bundle to use when making the HTTP request", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='proxy' details='{"title": "Proxy", "description": "Proxy URL to use when making the HTTP request", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='headers' details='{"title": "Headers", "description": "Headers to use when making the HTTP request", "type": "object", "additionalProperties": {"type": "string"}}' required={false} default=""/> <ClassPropertyRef name='prompt_templates' details='{"title": "Prompt Templates", "description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.", "default": {"edit": "Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags."}, "type": "object"}' required={false} default="{'edit': 'Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags.'}"/> <ClassPropertyRef name='api_key' details='{"title": "Api Key", "description": "The API key for the LLM provider.", "type": "string"}' required={false} default=""/> diff --git a/docs/docs/reference/Models/llamacpp.md b/docs/docs/reference/Models/llamacpp.md index 362914f8..b20c7675 100644 --- a/docs/docs/reference/Models/llamacpp.md +++ b/docs/docs/reference/Models/llamacpp.md @@ -11,7 +11,7 @@ Run the llama.cpp server binary to start the API server. If running on a remote After it's up and running, change `~/.continue/config.py` to look like this: ```python title="~/.continue/config.py" -from continuedev.src.continuedev.libs.llm.llamacpp import LlamaCpp +from continuedev.libs.llm.llamacpp import LlamaCpp config = ContinueConfig( ... @@ -34,15 +34,20 @@ config = ContinueConfig( ### Inherited Properties <ClassPropertyRef name='title' details='{"title": "Title", "description": "A title that will identify this model in the model selection dropdown", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='model' details='{"title": "Model", "description": "The name of the model to be used (e.g. gpt-4, codellama)", "default": "llamacpp", "type": "string"}' required={false} default="llamacpp"/> -<ClassPropertyRef name='max_tokens' details='{"title": "Max Tokens", "description": "The maximum number of tokens to generate.", "default": 1024, "type": "integer"}' required={false} default="1024"/> +<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='stop_tokens' details='{"title": "Stop Tokens", "description": "Tokens that will stop the completion.", "type": "array", "items": {"type": "string"}}' required={false} default=""/> +<ClassPropertyRef name='temperature' details='{"title": "Temperature", "description": "The temperature of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_p' details='{"title": "Top P", "description": "The top_p of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_k' details='{"title": "Top K", "description": "The top_k of the completion.", "type": "integer"}' required={false} default=""/> +<ClassPropertyRef name='presence_penalty' details='{"title": "Presence Penalty", "description": "The presence penalty Aof the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='frequency_penalty' details='{"title": "Frequency Penalty", "description": "The frequency penalty of the completion.", "type": "number"}' required={false} default=""/> <ClassPropertyRef name='timeout' details='{"title": "Timeout", "description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.", "default": 300, "type": "integer"}' required={false} default="300"/> <ClassPropertyRef name='verify_ssl' details='{"title": "Verify Ssl", "description": "Whether to verify SSL certificates for requests.", "type": "boolean"}' required={false} default=""/> <ClassPropertyRef name='ca_bundle_path' details='{"title": "Ca Bundle Path", "description": "Path to a custom CA bundle to use when making the HTTP request", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='proxy' details='{"title": "Proxy", "description": "Proxy URL to use when making the HTTP request", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='headers' details='{"title": "Headers", "description": "Headers to use when making the HTTP request", "type": "object", "additionalProperties": {"type": "string"}}' required={false} default=""/> <ClassPropertyRef name='prompt_templates' details='{"title": "Prompt Templates", "description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.", "default": {"edit": "Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags."}, "type": "object"}' required={false} default="{'edit': 'Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags.'}"/> <ClassPropertyRef name='api_key' details='{"title": "Api Key", "description": "The API key for the LLM provider.", "type": "string"}' required={false} default=""/> diff --git a/docs/docs/reference/Models/ollama.md b/docs/docs/reference/Models/ollama.md index 64a326b7..b6d418ce 100644 --- a/docs/docs/reference/Models/ollama.md +++ b/docs/docs/reference/Models/ollama.md @@ -5,7 +5,7 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; [Ollama](https://ollama.ai/) is an application for Mac and Linux that makes it easy to locally run open-source models, including Llama-2. Download the app from the website, and it will walk you through setup in a couple of minutes. You can also read more in their [README](https://github.com/jmorganca/ollama). Continue can then be configured to use the `Ollama` LLM class: ```python title="~/.continue/config.py" -from continuedev.src.continuedev.libs.llm.ollama import Ollama +from continuedev.libs.llm.ollama import Ollama config = ContinueConfig( ... @@ -25,15 +25,20 @@ config = ContinueConfig( ### Inherited Properties <ClassPropertyRef name='title' details='{"title": "Title", "description": "A title that will identify this model in the model selection dropdown", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='model' details='{"title": "Model", "description": "The name of the model to be used (e.g. gpt-4, codellama)", "default": "llama2", "type": "string"}' required={false} default="llama2"/> -<ClassPropertyRef name='max_tokens' details='{"title": "Max Tokens", "description": "The maximum number of tokens to generate.", "default": 1024, "type": "integer"}' required={false} default="1024"/> +<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='stop_tokens' details='{"title": "Stop Tokens", "description": "Tokens that will stop the completion.", "type": "array", "items": {"type": "string"}}' required={false} default=""/> +<ClassPropertyRef name='temperature' details='{"title": "Temperature", "description": "The temperature of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_p' details='{"title": "Top P", "description": "The top_p of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_k' details='{"title": "Top K", "description": "The top_k of the completion.", "type": "integer"}' required={false} default=""/> +<ClassPropertyRef name='presence_penalty' details='{"title": "Presence Penalty", "description": "The presence penalty Aof the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='frequency_penalty' details='{"title": "Frequency Penalty", "description": "The frequency penalty of the completion.", "type": "number"}' required={false} default=""/> <ClassPropertyRef name='timeout' details='{"title": "Timeout", "description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.", "default": 300, "type": "integer"}' required={false} default="300"/> <ClassPropertyRef name='verify_ssl' details='{"title": "Verify Ssl", "description": "Whether to verify SSL certificates for requests.", "type": "boolean"}' required={false} default=""/> <ClassPropertyRef name='ca_bundle_path' details='{"title": "Ca Bundle Path", "description": "Path to a custom CA bundle to use when making the HTTP request", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='proxy' details='{"title": "Proxy", "description": "Proxy URL to use when making the HTTP request", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='headers' details='{"title": "Headers", "description": "Headers to use when making the HTTP request", "type": "object", "additionalProperties": {"type": "string"}}' required={false} default=""/> <ClassPropertyRef name='prompt_templates' details='{"title": "Prompt Templates", "description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.", "default": {"edit": "Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags."}, "type": "object"}' required={false} default="{'edit': 'Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags.'}"/> <ClassPropertyRef name='api_key' details='{"title": "Api Key", "description": "The API key for the LLM provider.", "type": "string"}' required={false} default=""/> diff --git a/docs/docs/reference/Models/openai.md b/docs/docs/reference/Models/openai.md index 039c1bf7..81af404d 100644 --- a/docs/docs/reference/Models/openai.md +++ b/docs/docs/reference/Models/openai.md @@ -7,7 +7,7 @@ The OpenAI class can be used to access OpenAI models like gpt-4 and gpt-3.5-turb If you are locally serving a model that uses an OpenAI-compatible server, you can simply change the `api_base` in the `OpenAI` class like this: ```python title="~/.continue/config.py" -from continuedev.src.continuedev.libs.llm.openai import OpenAI +from continuedev.libs.llm.openai import OpenAI config = ContinueConfig( ... @@ -43,13 +43,18 @@ Options for serving models locally with an OpenAI-compatible server include: <ClassPropertyRef name='model' details='{"title": "Model", "description": "The name of the model to be used (e.g. gpt-4, codellama)", "type": "string"}' required={true} default=""/> <ClassPropertyRef name='api_key' details='{"title": "Api Key", "description": "OpenAI API key", "type": "string"}' required={true} default=""/> <ClassPropertyRef name='title' details='{"title": "Title", "description": "A title that will identify this model in the model selection dropdown", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> -<ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='max_tokens' details='{"title": "Max Tokens", "description": "The maximum number of tokens to generate.", "default": 1024, "type": "integer"}' required={false} default="1024"/> <ClassPropertyRef name='stop_tokens' details='{"title": "Stop Tokens", "description": "Tokens that will stop the completion.", "type": "array", "items": {"type": "string"}}' required={false} default=""/> +<ClassPropertyRef name='temperature' details='{"title": "Temperature", "description": "The temperature of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_p' details='{"title": "Top P", "description": "The top_p of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_k' details='{"title": "Top K", "description": "The top_k of the completion.", "type": "integer"}' required={false} default=""/> +<ClassPropertyRef name='presence_penalty' details='{"title": "Presence Penalty", "description": "The presence penalty Aof the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='frequency_penalty' details='{"title": "Frequency Penalty", "description": "The frequency penalty of the completion.", "type": "number"}' required={false} default=""/> <ClassPropertyRef name='timeout' details='{"title": "Timeout", "description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.", "default": 300, "type": "integer"}' required={false} default="300"/> <ClassPropertyRef name='verify_ssl' details='{"title": "Verify Ssl", "description": "Whether to verify SSL certificates for requests.", "type": "boolean"}' required={false} default=""/> <ClassPropertyRef name='ca_bundle_path' details='{"title": "Ca Bundle Path", "description": "Path to a custom CA bundle to use when making the HTTP request", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='proxy' details='{"title": "Proxy", "description": "Proxy URL to use for requests.", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='headers' details='{"title": "Headers", "description": "Headers to use when making the HTTP request", "type": "object", "additionalProperties": {"type": "string"}}' required={false} default=""/> <ClassPropertyRef name='prompt_templates' details='{"title": "Prompt Templates", "description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.", "default": {}, "type": "object"}' required={false} default="{}"/> diff --git a/docs/docs/reference/Models/openaifreetrial.md b/docs/docs/reference/Models/openaifreetrial.md index 8ebe92a7..1cf1154f 100644 --- a/docs/docs/reference/Models/openaifreetrial.md +++ b/docs/docs/reference/Models/openaifreetrial.md @@ -36,14 +36,19 @@ These classes support any models available through the OpenAI API, assuming your <ClassPropertyRef name='model' details='{"title": "Model", "description": "The name of the model to be used (e.g. gpt-4, codellama)", "type": "string"}' required={true} default=""/> <ClassPropertyRef name='title' details='{"title": "Title", "description": "A title that will identify this model in the model selection dropdown", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> -<ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='max_tokens' details='{"title": "Max Tokens", "description": "The maximum number of tokens to generate.", "default": 1024, "type": "integer"}' required={false} default="1024"/> <ClassPropertyRef name='stop_tokens' details='{"title": "Stop Tokens", "description": "Tokens that will stop the completion.", "type": "array", "items": {"type": "string"}}' required={false} default=""/> +<ClassPropertyRef name='temperature' details='{"title": "Temperature", "description": "The temperature of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_p' details='{"title": "Top P", "description": "The top_p of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_k' details='{"title": "Top K", "description": "The top_k of the completion.", "type": "integer"}' required={false} default=""/> +<ClassPropertyRef name='presence_penalty' details='{"title": "Presence Penalty", "description": "The presence penalty Aof the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='frequency_penalty' details='{"title": "Frequency Penalty", "description": "The frequency penalty of the completion.", "type": "number"}' required={false} default=""/> <ClassPropertyRef name='timeout' details='{"title": "Timeout", "description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.", "default": 300, "type": "integer"}' required={false} default="300"/> <ClassPropertyRef name='verify_ssl' details='{"title": "Verify Ssl", "description": "Whether to verify SSL certificates for requests.", "type": "boolean"}' required={false} default=""/> <ClassPropertyRef name='ca_bundle_path' details='{"title": "Ca Bundle Path", "description": "Path to a custom CA bundle to use when making the HTTP request", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='proxy' details='{"title": "Proxy", "description": "Proxy URL to use when making the HTTP request", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='headers' details='{"title": "Headers", "description": "Headers to use when making the HTTP request", "type": "object", "additionalProperties": {"type": "string"}}' required={false} default=""/> <ClassPropertyRef name='prompt_templates' details='{"title": "Prompt Templates", "description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.", "default": {}, "type": "object"}' required={false} default="{}"/> <ClassPropertyRef name='api_key' details='{"title": "Api Key", "description": "The API key for the LLM provider.", "type": "string"}' required={false} default=""/> diff --git a/docs/docs/reference/Models/queuedllm.md b/docs/docs/reference/Models/queuedllm.md index c9a0b4b1..9ea28c58 100644 --- a/docs/docs/reference/Models/queuedllm.md +++ b/docs/docs/reference/Models/queuedllm.md @@ -7,7 +7,7 @@ QueuedLLM exists to make up for LLM servers that cannot handle multiple requests If you are already using another LLM class and are experiencing this problem, you can just wrap it with the QueuedLLM class like this: ```python title="~/.continue/config.py" -from continuedev.src.continuedev.libs.llm.queued import QueuedLLM +from continuedev.libs.llm.queued import QueuedLLM config = ContinueConfig( ... @@ -27,15 +27,20 @@ config = ContinueConfig( ### Inherited Properties <ClassPropertyRef name='title' details='{"title": "Title", "description": "A title that will identify this model in the model selection dropdown", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='model' details='{"title": "Model", "description": "The name of the model to be used (e.g. gpt-4, codellama)", "default": "queued", "type": "string"}' required={false} default="queued"/> -<ClassPropertyRef name='max_tokens' details='{"title": "Max Tokens", "description": "The maximum number of tokens to generate.", "default": 1024, "type": "integer"}' required={false} default="1024"/> +<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='stop_tokens' details='{"title": "Stop Tokens", "description": "Tokens that will stop the completion.", "type": "array", "items": {"type": "string"}}' required={false} default=""/> +<ClassPropertyRef name='temperature' details='{"title": "Temperature", "description": "The temperature of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_p' details='{"title": "Top P", "description": "The top_p of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_k' details='{"title": "Top K", "description": "The top_k of the completion.", "type": "integer"}' required={false} default=""/> +<ClassPropertyRef name='presence_penalty' details='{"title": "Presence Penalty", "description": "The presence penalty Aof the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='frequency_penalty' details='{"title": "Frequency Penalty", "description": "The frequency penalty of the completion.", "type": "number"}' required={false} default=""/> <ClassPropertyRef name='timeout' details='{"title": "Timeout", "description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.", "default": 300, "type": "integer"}' required={false} default="300"/> <ClassPropertyRef name='verify_ssl' details='{"title": "Verify Ssl", "description": "Whether to verify SSL certificates for requests.", "type": "boolean"}' required={false} default=""/> <ClassPropertyRef name='ca_bundle_path' details='{"title": "Ca Bundle Path", "description": "Path to a custom CA bundle to use when making the HTTP request", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='proxy' details='{"title": "Proxy", "description": "Proxy URL to use when making the HTTP request", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='headers' details='{"title": "Headers", "description": "Headers to use when making the HTTP request", "type": "object", "additionalProperties": {"type": "string"}}' required={false} default=""/> <ClassPropertyRef name='prompt_templates' details='{"title": "Prompt Templates", "description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.", "default": {}, "type": "object"}' required={false} default="{}"/> <ClassPropertyRef name='api_key' details='{"title": "Api Key", "description": "The API key for the LLM provider.", "type": "string"}' required={false} default=""/> diff --git a/docs/docs/reference/Models/replicatellm.md b/docs/docs/reference/Models/replicatellm.md index 0dc5f838..167db379 100644 --- a/docs/docs/reference/Models/replicatellm.md +++ b/docs/docs/reference/Models/replicatellm.md @@ -5,8 +5,8 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; Replicate is a great option for newly released language models or models that you've deployed through their platform. Sign up for an account [here](https://replicate.ai/), copy your API key, and then select any model from the [Replicate Streaming List](https://replicate.com/collections/streaming-language-models). Change `~/.continue/config.py` to look like this: ```python title="~/.continue/config.py" -from continuedev.src.continuedev.core.models import Models -from continuedev.src.continuedev.libs.llm.replicate import ReplicateLLM +from continuedev.core.models import Models +from continuedev.libs.llm.replicate import ReplicateLLM config = ContinueConfig( ... @@ -30,14 +30,19 @@ If you don't specify the `model` parameter, it will default to `replicate/llama- <ClassPropertyRef name='api_key' details='{"title": "Api Key", "description": "Replicate API key", "type": "string"}' required={true} default=""/> <ClassPropertyRef name='title' details='{"title": "Title", "description": "A title that will identify this model in the model selection dropdown", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='model' details='{"title": "Model", "description": "The name of the model to be used (e.g. gpt-4, codellama)", "default": "replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781", "type": "string"}' required={false} default="replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781"/> -<ClassPropertyRef name='max_tokens' details='{"title": "Max Tokens", "description": "The maximum number of tokens to generate.", "default": 1024, "type": "integer"}' required={false} default="1024"/> +<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='stop_tokens' details='{"title": "Stop Tokens", "description": "Tokens that will stop the completion.", "type": "array", "items": {"type": "string"}}' required={false} default=""/> +<ClassPropertyRef name='temperature' details='{"title": "Temperature", "description": "The temperature of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_p' details='{"title": "Top P", "description": "The top_p of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_k' details='{"title": "Top K", "description": "The top_k of the completion.", "type": "integer"}' required={false} default=""/> +<ClassPropertyRef name='presence_penalty' details='{"title": "Presence Penalty", "description": "The presence penalty Aof the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='frequency_penalty' details='{"title": "Frequency Penalty", "description": "The frequency penalty of the completion.", "type": "number"}' required={false} default=""/> <ClassPropertyRef name='timeout' details='{"title": "Timeout", "description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.", "default": 300, "type": "integer"}' required={false} default="300"/> <ClassPropertyRef name='verify_ssl' details='{"title": "Verify Ssl", "description": "Whether to verify SSL certificates for requests.", "type": "boolean"}' required={false} default=""/> <ClassPropertyRef name='ca_bundle_path' details='{"title": "Ca Bundle Path", "description": "Path to a custom CA bundle to use when making the HTTP request", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='proxy' details='{"title": "Proxy", "description": "Proxy URL to use when making the HTTP request", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='headers' details='{"title": "Headers", "description": "Headers to use when making the HTTP request", "type": "object", "additionalProperties": {"type": "string"}}' required={false} default=""/> <ClassPropertyRef name='prompt_templates' details='{"title": "Prompt Templates", "description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.", "default": {"edit": "Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags."}, "type": "object"}' required={false} default="{'edit': 'Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags.'}"/> diff --git a/docs/docs/reference/Models/textgenui.md b/docs/docs/reference/Models/textgenui.md index e0d757e4..680ff6c4 100644 --- a/docs/docs/reference/Models/textgenui.md +++ b/docs/docs/reference/Models/textgenui.md @@ -5,7 +5,7 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; TextGenUI is a comprehensive, open-source language model UI and local server. You can set it up with an OpenAI-compatible server plugin, but if for some reason that doesn't work, you can use this class like so: ```python title="~/.continue/config.py" -from continuedev.src.continuedev.libs.llm.text_gen_interface import TextGenUI +from continuedev.libs.llm.text_gen_interface import TextGenUI config = ContinueConfig( ... @@ -28,15 +28,20 @@ config = ContinueConfig( ### Inherited Properties <ClassPropertyRef name='title' details='{"title": "Title", "description": "A title that will identify this model in the model selection dropdown", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='model' details='{"title": "Model", "description": "The name of the model to be used (e.g. gpt-4, codellama)", "default": "text-gen-ui", "type": "string"}' required={false} default="text-gen-ui"/> -<ClassPropertyRef name='max_tokens' details='{"title": "Max Tokens", "description": "The maximum number of tokens to generate.", "default": 1024, "type": "integer"}' required={false} default="1024"/> +<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='stop_tokens' details='{"title": "Stop Tokens", "description": "Tokens that will stop the completion.", "type": "array", "items": {"type": "string"}}' required={false} default=""/> +<ClassPropertyRef name='temperature' details='{"title": "Temperature", "description": "The temperature of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_p' details='{"title": "Top P", "description": "The top_p of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_k' details='{"title": "Top K", "description": "The top_k of the completion.", "type": "integer"}' required={false} default=""/> +<ClassPropertyRef name='presence_penalty' details='{"title": "Presence Penalty", "description": "The presence penalty Aof the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='frequency_penalty' details='{"title": "Frequency Penalty", "description": "The frequency penalty of the completion.", "type": "number"}' required={false} default=""/> <ClassPropertyRef name='timeout' details='{"title": "Timeout", "description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.", "default": 300, "type": "integer"}' required={false} default="300"/> <ClassPropertyRef name='verify_ssl' details='{"title": "Verify Ssl", "description": "Whether to verify SSL certificates for requests.", "type": "boolean"}' required={false} default=""/> <ClassPropertyRef name='ca_bundle_path' details='{"title": "Ca Bundle Path", "description": "Path to a custom CA bundle to use when making the HTTP request", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='proxy' details='{"title": "Proxy", "description": "Proxy URL to use when making the HTTP request", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='headers' details='{"title": "Headers", "description": "Headers to use when making the HTTP request", "type": "object", "additionalProperties": {"type": "string"}}' required={false} default=""/> <ClassPropertyRef name='prompt_templates' details='{"title": "Prompt Templates", "description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.", "default": {"edit": "Here is the code before editing:\n```\n{{{code_to_edit}}}\n```\n\nHere is the edit requested:\n\"{{{user_input}}}\"\n\nHere is the code after editing:"}, "type": "object"}' required={false} default="{'edit': 'Here is the code before editing:\n```\n{{{code_to_edit}}}\n```\n\nHere is the edit requested:\n"{{{user_input}}}"\n\nHere is the code after editing:'}"/> <ClassPropertyRef name='api_key' details='{"title": "Api Key", "description": "The API key for the LLM provider.", "type": "string"}' required={false} default=""/> diff --git a/docs/docs/reference/Models/togetherllm.md b/docs/docs/reference/Models/togetherllm.md index e0dc35de..a7eae025 100644 --- a/docs/docs/reference/Models/togetherllm.md +++ b/docs/docs/reference/Models/togetherllm.md @@ -5,8 +5,8 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; The Together API is a cloud platform for running large AI models. You can sign up [here](https://api.together.xyz/signup), copy your API key on the initial welcome screen, and then hit the play button on any model from the [Together Models list](https://docs.together.ai/docs/models-inference). Change `~/.continue/config.py` to look like this: ```python title="~/.continue/config.py" -from continuedev.src.continuedev.core.models import Models -from continuedev.src.continuedev.libs.llm.together import TogetherLLM +from continuedev.core.models import Models +from continuedev.libs.llm.together import TogetherLLM config = ContinueConfig( ... @@ -30,14 +30,19 @@ config = ContinueConfig( <ClassPropertyRef name='api_key' details='{"title": "Api Key", "description": "Together API key", "type": "string"}' required={true} default=""/> <ClassPropertyRef name='title' details='{"title": "Title", "description": "A title that will identify this model in the model selection dropdown", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> -<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='unique_id' details='{"title": "Unique Id", "description": "The unique ID of the user.", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='model' details='{"title": "Model", "description": "The name of the model to be used (e.g. gpt-4, codellama)", "default": "togethercomputer/RedPajama-INCITE-7B-Instruct", "type": "string"}' required={false} default="togethercomputer/RedPajama-INCITE-7B-Instruct"/> -<ClassPropertyRef name='max_tokens' details='{"title": "Max Tokens", "description": "The maximum number of tokens to generate.", "default": 1024, "type": "integer"}' required={false} default="1024"/> +<ClassPropertyRef name='system_message' details='{"title": "System Message", "description": "A system message that will always be followed by the LLM", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='context_length' details='{"title": "Context Length", "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.", "default": 2048, "type": "integer"}' required={false} default="2048"/> <ClassPropertyRef name='stop_tokens' details='{"title": "Stop Tokens", "description": "Tokens that will stop the completion.", "type": "array", "items": {"type": "string"}}' required={false} default=""/> +<ClassPropertyRef name='temperature' details='{"title": "Temperature", "description": "The temperature of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_p' details='{"title": "Top P", "description": "The top_p of the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='top_k' details='{"title": "Top K", "description": "The top_k of the completion.", "type": "integer"}' required={false} default=""/> +<ClassPropertyRef name='presence_penalty' details='{"title": "Presence Penalty", "description": "The presence penalty Aof the completion.", "type": "number"}' required={false} default=""/> +<ClassPropertyRef name='frequency_penalty' details='{"title": "Frequency Penalty", "description": "The frequency penalty of the completion.", "type": "number"}' required={false} default=""/> <ClassPropertyRef name='timeout' details='{"title": "Timeout", "description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.", "default": 300, "type": "integer"}' required={false} default="300"/> <ClassPropertyRef name='verify_ssl' details='{"title": "Verify Ssl", "description": "Whether to verify SSL certificates for requests.", "type": "boolean"}' required={false} default=""/> <ClassPropertyRef name='ca_bundle_path' details='{"title": "Ca Bundle Path", "description": "Path to a custom CA bundle to use when making the HTTP request", "type": "string"}' required={false} default=""/> <ClassPropertyRef name='proxy' details='{"title": "Proxy", "description": "Proxy URL to use when making the HTTP request", "type": "string"}' required={false} default=""/> +<ClassPropertyRef name='headers' details='{"title": "Headers", "description": "Headers to use when making the HTTP request", "type": "object", "additionalProperties": {"type": "string"}}' required={false} default=""/> <ClassPropertyRef name='prompt_templates' details='{"title": "Prompt Templates", "description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.", "default": {"edit": "Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags."}, "type": "object"}' required={false} default="{'edit': 'Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags.'}"/> diff --git a/docs/docs/reference/config.md b/docs/docs/reference/config.md index a160a5c2..16d6bebe 100644 --- a/docs/docs/reference/config.md +++ b/docs/docs/reference/config.md @@ -11,7 +11,7 @@ Continue can be deeply customized by editing the `ContinueConfig` object in `~/. <ClassPropertyRef name='steps_on_startup' details='{"title": "Steps On Startup", "description": "Steps that will be automatically run at the beginning of a new session", "default": [], "type": "array", "items": {"$ref": "#/definitions/Step"}}' required={false} default="[]"/> <ClassPropertyRef name='disallowed_steps' details='{"title": "Disallowed Steps", "description": "Steps that are not allowed to be run, and will be skipped if attempted", "default": [], "type": "array", "items": {"type": "string"}}' required={false} default="[]"/> <ClassPropertyRef name='allow_anonymous_telemetry' details='{"title": "Allow Anonymous Telemetry", "description": "If this field is set to True, we will collect anonymous telemetry as described in the documentation page on telemetry. If set to False, we will not collect any data.", "default": true, "type": "boolean"}' required={false} default="True"/> -<ClassPropertyRef name='models' details='{"title": "Models", "description": "Configuration for the models used by Continue. Read more about how to configure models in the documentation.", "default": {"default": {"title": null, "system_message": null, "context_length": 2048, "model": "gpt-4", "max_tokens": 1024, "stop_tokens": null, "timeout": 300, "verify_ssl": null, "ca_bundle_path": null, "proxy": null, "prompt_templates": {}, "api_key": null, "llm": null, "class_name": "OpenAIFreeTrial"}, "summarize": {"title": null, "system_message": null, "context_length": 2048, "model": "gpt-3.5-turbo", "max_tokens": 1024, "stop_tokens": null, "timeout": 300, "verify_ssl": null, "ca_bundle_path": null, "proxy": null, "prompt_templates": {}, "api_key": null, "llm": null, "class_name": "OpenAIFreeTrial"}, "edit": null, "chat": null, "saved": []}, "allOf": [{"$ref": "#/definitions/Models"}]}' required={false} default="{'default': {'title': None, 'system_message': None, 'context_length': 2048, 'model': 'gpt-4', 'max_tokens': 1024, 'stop_tokens': None, 'timeout': 300, 'verify_ssl': None, 'ca_bundle_path': None, 'proxy': None, 'prompt_templates': {}, 'api_key': None, 'llm': None, 'class_name': 'OpenAIFreeTrial'}, 'summarize': {'title': None, 'system_message': None, 'context_length': 2048, 'model': 'gpt-3.5-turbo', 'max_tokens': 1024, 'stop_tokens': None, 'timeout': 300, 'verify_ssl': None, 'ca_bundle_path': None, 'proxy': None, 'prompt_templates': {}, 'api_key': None, 'llm': None, 'class_name': 'OpenAIFreeTrial'}, 'edit': None, 'chat': None, 'saved': []}"/> +<ClassPropertyRef name='models' details='{"title": "Models", "description": "Configuration for the models used by Continue. Read more about how to configure models in the documentation.", "default": {"default": {"title": null, "model": "gpt-4", "system_message": null, "context_length": 2048, "stop_tokens": null, "temperature": null, "top_p": null, "top_k": null, "presence_penalty": null, "frequency_penalty": null, "timeout": 300, "verify_ssl": null, "ca_bundle_path": null, "proxy": null, "headers": null, "prompt_templates": {}, "template_messages": null, "api_key": null, "llm": null, "class_name": "OpenAIFreeTrial"}, "summarize": {"title": null, "model": "gpt-3.5-turbo", "system_message": null, "context_length": 2048, "stop_tokens": null, "temperature": null, "top_p": null, "top_k": null, "presence_penalty": null, "frequency_penalty": null, "timeout": 300, "verify_ssl": null, "ca_bundle_path": null, "proxy": null, "headers": null, "prompt_templates": {}, "template_messages": null, "api_key": null, "llm": null, "class_name": "OpenAIFreeTrial"}, "edit": null, "chat": null, "saved": []}, "allOf": [{"$ref": "#/definitions/Models"}]}' required={false} default="{'default': {'title': None, 'model': 'gpt-4', 'system_message': None, 'context_length': 2048, 'stop_tokens': None, 'temperature': None, 'top_p': None, 'top_k': None, 'presence_penalty': None, 'frequency_penalty': None, 'timeout': 300, 'verify_ssl': None, 'ca_bundle_path': None, 'proxy': None, 'headers': None, 'prompt_templates': {}, 'template_messages': None, 'api_key': None, 'llm': None, 'class_name': 'OpenAIFreeTrial'}, 'summarize': {'title': None, 'model': 'gpt-3.5-turbo', 'system_message': None, 'context_length': 2048, 'stop_tokens': None, 'temperature': None, 'top_p': None, 'top_k': None, 'presence_penalty': None, 'frequency_penalty': None, 'timeout': 300, 'verify_ssl': None, 'ca_bundle_path': None, 'proxy': None, 'headers': None, 'prompt_templates': {}, 'template_messages': None, 'api_key': None, 'llm': None, 'class_name': 'OpenAIFreeTrial'}, 'edit': None, 'chat': None, 'saved': []}"/> <ClassPropertyRef name='temperature' details='{"title": "Temperature", "description": "The temperature parameter for sampling from the LLM. Higher temperatures will result in more random output, while lower temperatures will result in more predictable output. This value ranges from 0 to 1.", "default": 0.5, "type": "number"}' required={false} default="0.5"/> <ClassPropertyRef name='custom_commands' details='{"title": "Custom Commands", "description": "An array of custom commands that allow you to reuse prompts. Each has name, description, and prompt properties. When you enter /<name> in the text input, it will act as a shortcut to the prompt.", "default": [{"name": "test", "prompt": "Write a comprehensive set of unit tests for the selected code. It should setup, run tests that check for correctness including important edge cases, and teardown. Ensure that the tests are complete and sophisticated. Give the tests just as chat output, don't edit any file.", "description": "This is an example custom command. Use /config to edit it and create more"}], "type": "array", "items": {"$ref": "#/definitions/CustomCommand"}}' required={false} default="[{'name': 'test', 'prompt': "Write a comprehensive set of unit tests for the selected code. It should setup, run tests that check for correctness including important edge cases, and teardown. Ensure that the tests are complete and sophisticated. Give the tests just as chat output, don't edit any file.", 'description': 'This is an example custom command. Use /config to edit it and create more'}]"/> <ClassPropertyRef name='slash_commands' details='{"title": "Slash Commands", "description": "An array of slash commands that let you map custom Steps to a shortcut.", "default": [], "type": "array", "items": {"$ref": "#/definitions/SlashCommand"}}' required={false} default="[]"/> diff --git a/docs/docs/walkthroughs/codebase-embeddings.md b/docs/docs/walkthroughs/codebase-embeddings.md index 012395b6..66e7bc1b 100644 --- a/docs/docs/walkthroughs/codebase-embeddings.md +++ b/docs/docs/walkthroughs/codebase-embeddings.md @@ -46,8 +46,6 @@ config=ContinueConfig( 6. When you open a workspace, Continue will generate the embeddings. You can then enter '/codebase \<QUESTION\>' to ask a question with embeddings-based retrieval. 7. Please share feedback in [Discord](https://discord.gg/NWtdYexhMs)! -> Note: There is a known bug that requires different imports for the PyPI package. If you encounter "No module named 'continuedev.src', you should replace all instances of 'continuedev.src.continuedev' in `config.py` with just 'continuedev'. - ## Parameters After retrieving the top `n_retrieve` results from the vector database, an additional re-reranking step uses 2 LLM calls to select the top `n_final` results to use to answer the question. If you want to increase the speed of the query at the cost of relevancy, you can skip the re-ranking step by setting `use_reranking` to `False`. Then the top `n_final` results will just be directly calculated from the vector database. diff --git a/docs/docs/walkthroughs/codellama.md b/docs/docs/walkthroughs/codellama.md index 4bf18c57..298d996e 100644 --- a/docs/docs/walkthroughs/codellama.md +++ b/docs/docs/walkthroughs/codellama.md @@ -12,8 +12,8 @@ If you haven't already installed Continue, you can do that [here](https://market 4. Update your Continue config file to look like this: ```python -from continuedev.src.continuedev.core.models import Models -from continuedev.src.continuedev.libs.llm.together import TogetherLLM +from continuedev.core.models import Models +from continuedev.libs.llm.together import TogetherLLM config = ContinueConfig( ... @@ -33,7 +33,7 @@ config = ContinueConfig( 3. Change your Continue config file to look like this: ```python -from continuedev.src.continuedev.libs.llm.ollama import Ollama +from continuedev.libs.llm.ollama import Ollama config = ContinueConfig( ... @@ -53,8 +53,8 @@ config = ContinueConfig( 2. Change your Continue config file to look like this: ```python -from continuedev.src.continuedev.core.models import Models -from continuedev.src.continuedev.libs.llm.replicate import ReplicateLLM +from continuedev.core.models import Models +from continuedev.libs.llm.replicate import ReplicateLLM config = ContinueConfig( ... diff --git a/extension/package-lock.json b/extension/package-lock.json index 9d9b3e19..31de1c7a 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.412", + "version": "0.1.14", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.412", + "version": "0.1.14", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 0e25fcca..6b7bee81 100644 --- a/extension/package.json +++ b/extension/package.json @@ -1,7 +1,7 @@ { "name": "continue", "icon": "media/icon.png", - "version": "0.0.412", + "version": "0.1.14", "repository": { "type": "git", "url": "https://github.com/continuedev/continue" @@ -213,7 +213,8 @@ "build-test": "tsc && node esbuild.test.mjs", "test": "npm run build-test && node ./out/test-runner/runTestOnVSCodeHost.js", "prepackage": "node scripts/prepackage.js", - "package": "node scripts/package.js" + "package": "node scripts/package.js", + "package:pre-release": "node scripts/package.js --pre-release" }, "devDependencies": { "@nestjs/common": "^8.4.7", diff --git a/extension/react-app/package-lock.json b/extension/react-app/package-lock.json index fb68081c..a52396ef 100644 --- a/extension/react-app/package-lock.json +++ b/extension/react-app/package-lock.json @@ -11,6 +11,7 @@ "@types/vscode-webview": "^1.57.1", "@uiw/react-markdown-preview": "^4.1.13", "downshift": "^7.6.0", + "lodash": "^4.17.21", "meilisearch": "^0.33.0", "posthog-js": "^1.58.0", "prismjs": "^1.29.0", diff --git a/extension/react-app/package.json b/extension/react-app/package.json index b9f70645..be23b34b 100644 --- a/extension/react-app/package.json +++ b/extension/react-app/package.json @@ -12,6 +12,7 @@ "@types/vscode-webview": "^1.57.1", "@uiw/react-markdown-preview": "^4.1.13", "downshift": "^7.6.0", + "lodash": "^4.17.21", "meilisearch": "^0.33.0", "posthog-js": "^1.58.0", "prismjs": "^1.29.0", diff --git a/extension/react-app/public/logos/mistral.png b/extension/react-app/public/logos/mistral.png Binary files differnew file mode 100644 index 00000000..0f535f84 --- /dev/null +++ b/extension/react-app/public/logos/mistral.png diff --git a/extension/react-app/public/logos/wizardlm.png b/extension/react-app/public/logos/wizardlm.png Binary files differnew file mode 100644 index 00000000..a420cf03 --- /dev/null +++ b/extension/react-app/public/logos/wizardlm.png diff --git a/extension/react-app/src/components/ComboBox.tsx b/extension/react-app/src/components/ComboBox.tsx index c08c05de..1d0ca1a5 100644 --- a/extension/react-app/src/components/ComboBox.tsx +++ b/extension/react-app/src/components/ComboBox.tsx @@ -285,15 +285,13 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => { useEffect(() => { if (!inputRef.current) return; - if (inputRef.current.scrollHeight > inputRef.current.clientHeight) { - inputRef.current.style.height = "auto"; - inputRef.current.style.height = - Math.min(inputRef.current.scrollHeight, 300) + "px"; - } + inputRef.current.style.height = "auto"; + inputRef.current.style.height = + Math.min(inputRef.current.scrollHeight, 300) + "px"; }, [ inputRef.current?.scrollHeight, inputRef.current?.clientHeight, - props.value, + inputRef.current?.value, ]); // Whether the current input follows an '@' and should be treated as context query @@ -344,7 +342,6 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => { useEffect(() => { if (!nestedContextProvider) { - dispatch(setTakenActionTrue(null)); setItems( contextProviders?.map((provider) => ({ name: provider.display_title, @@ -437,7 +434,6 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => { setNestedContextProvider(undefined); // Handle slash commands - dispatch(setTakenActionTrue(null)); setItems( availableSlashCommands?.filter((slashCommand) => { const sc = slashCommand.name.toLowerCase(); @@ -445,6 +441,10 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => { return sc.startsWith(iv) && sc !== iv; }) || [] ); + + if (inputValue.startsWith("/") || inputValue.startsWith("@")) { + dispatch(setTakenActionTrue(null)); + } }, [ availableSlashCommands, @@ -756,6 +756,8 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => { props.index ); inputRef.current?.focus(); + setPreviewingContextItem(undefined); + setFocusedContextItem(undefined); }} onKeyDown={(e: any) => { if (e.key === "Backspace") { @@ -880,6 +882,7 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => { paddingLeft: "12px", cursor: "default", paddingTop: getFontSize(), + width: "fit-content", }} > {props.active ? "Using" : "Used"} {selectedContextItems.length}{" "} @@ -937,17 +940,7 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => { {...getInputProps({ onCompositionStart: () => setIsComposing(true), onCompositionEnd: () => setIsComposing(false), - onChange: (e) => { - const target = e.target as HTMLTextAreaElement; - // Update the height of the textarea to match the content, up to a max of 200px. - target.style.height = "auto"; - target.style.height = `${Math.min( - target.scrollHeight, - 300 - ).toString()}px`; - - // setShowContextDropdown(target.value.endsWith("@")); - }, + onChange: (e) => {}, onFocus: (e) => { setInputFocused(true); dispatch(setBottomMessage(undefined)); diff --git a/extension/react-app/src/components/ErrorStepContainer.tsx b/extension/react-app/src/components/ErrorStepContainer.tsx index 666780c5..07c0a046 100644 --- a/extension/react-app/src/components/ErrorStepContainer.tsx +++ b/extension/react-app/src/components/ErrorStepContainer.tsx @@ -42,7 +42,7 @@ function ErrorStepContainer(props: ErrorStepContainerProps) { </HeaderButtonWithText> </div> <Div> - <pre className="overflow-x-scroll"> + <pre style={{ whiteSpace: "pre-wrap", wordWrap: "break-word" }}> {props.historyNode.observation?.error as string} </pre> </Div> diff --git a/extension/react-app/src/components/Layout.tsx b/extension/react-app/src/components/Layout.tsx index a54c0ed4..db31c8db 100644 --- a/extension/react-app/src/components/Layout.tsx +++ b/extension/react-app/src/components/Layout.tsx @@ -30,6 +30,20 @@ const LayoutTopDiv = styled.div` border-radius: ${defaultBorderRadius}; scrollbar-base-color: transparent; scrollbar-width: thin; + + & * { + ::-webkit-scrollbar { + width: 4px; + } + + ::-webkit-scrollbar:horizontal { + height: 4px; + } + + ::-webkit-scrollbar-thumb { + border-radius: 2px; + } + } `; const BottomMessageDiv = styled.div<{ displayOnBottom: boolean }>` @@ -47,7 +61,6 @@ const BottomMessageDiv = styled.div<{ displayOnBottom: boolean }>` z-index: 100; box-shadow: 0px 0px 2px 0px ${vscForeground}; max-height: 35vh; - overflow: scroll; `; const Footer = styled.footer` @@ -131,6 +144,20 @@ const Layout = () => { }; }, [client, timeline]); + useEffect(() => { + const handler = (event: any) => { + if (event.data.type === "addModel") { + navigate("/models"); + } else if (event.data.type === "openSettings") { + navigate("/settings"); + } + }; + window.addEventListener("message", handler); + return () => { + window.removeEventListener("message", handler); + }; + }, []); + return ( <LayoutTopDiv> <div diff --git a/extension/react-app/src/components/ModelCard.tsx b/extension/react-app/src/components/ModelCard.tsx index d1cb3165..0ab6ac32 100644 --- a/extension/react-app/src/components/ModelCard.tsx +++ b/extension/react-app/src/components/ModelCard.tsx @@ -1,16 +1,16 @@ -import React, { useContext } from "react"; +import React, { useContext, useState } from "react"; import styled from "styled-components"; import { buttonColor, defaultBorderRadius, lightGray } from "."; import { useSelector } from "react-redux"; import { RootStore } from "../redux/store"; import { BookOpenIcon } from "@heroicons/react/24/outline"; import HeaderButtonWithText from "./HeaderButtonWithText"; -import { MODEL_PROVIDER_TAG_COLORS } from "../util/modelData"; +import { MODEL_PROVIDER_TAG_COLORS, PackageDimension } from "../util/modelData"; +import InfoHover from "./InfoHover"; -const Div = styled.div<{ color: string; disabled: boolean }>` +const Div = styled.div<{ color: string; disabled: boolean; hovered: boolean }>` border: 1px solid ${lightGray}; border-radius: ${defaultBorderRadius}; - padding: 4px 8px; position: relative; width: 100%; transition: all 0.5s; @@ -20,13 +20,45 @@ const Div = styled.div<{ color: string; disabled: boolean }>` ? ` opacity: 0.5; ` - : ` - &:hover { + : props.hovered + ? ` border: 1px solid ${props.color}; background-color: ${props.color}22; + cursor: pointer;` + : ""} +`; + +const DimensionsDiv = styled.div` + display: flex; + justify-content: flex-end; + margin-left: auto; + padding: 4px; + /* width: fit-content; */ + + border-top: 1px solid ${lightGray}; +`; + +const DimensionOptionDiv = styled.div<{ selected: boolean }>` + display: flex; + flex-direction: column; + align-items: center; + margin-right: 8px; + background-color: ${lightGray}; + padding: 4px; + border-radius: ${defaultBorderRadius}; + outline: 0.5px solid ${lightGray}; + + ${(props) => + props.selected && + ` + background-color: ${buttonColor}; + color: white; + `} + + &:hover { cursor: pointer; + outline: 1px solid ${buttonColor}; } - `} `; interface ModelCardProps { @@ -35,8 +67,12 @@ interface ModelCardProps { tags?: string[]; refUrl?: string; icon?: string; - onClick: (e: React.MouseEvent<HTMLDivElement, MouseEvent>) => void; + onClick: ( + e: React.MouseEvent<HTMLDivElement, MouseEvent>, + dimensionChoices?: string[] + ) => void; disabled?: boolean; + dimensions?: PackageDimension[]; } function ModelCard(props: ModelCardProps) { @@ -44,53 +80,103 @@ function ModelCard(props: ModelCardProps) { (state: RootStore) => state.config.vscMediaUrl ); + const [dimensionChoices, setDimensionChoices] = useState<string[]>( + props.dimensions?.map((d) => Object.keys(d.options)[0]) || [] + ); + + const [hovered, setHovered] = useState(false); + return ( <Div disabled={props.disabled || false} color={buttonColor} - onClick={props.disabled ? undefined : (e) => props.onClick(e)} + hovered={hovered} > - <div style={{ display: "flex", alignItems: "center" }}> - {vscMediaUrl && props.icon && ( - <img - src={`${vscMediaUrl}/logos/${props.icon}`} - height="24px" - style={{ marginRight: "10px" }} - /> - )} - <h3>{props.title}</h3> - </div> - {props.tags?.map((tag) => { - return ( - <span + <div + onMouseEnter={() => setHovered(true)} + onMouseLeave={() => setHovered(false)} + className="px-2 py-1" + onClick={ + props.disabled + ? undefined + : (e) => { + if ((e.target as any).closest("a")) { + return; + } + props.onClick(e, dimensionChoices); + } + } + > + <div style={{ display: "flex", alignItems: "center" }}> + {vscMediaUrl && props.icon && ( + <img + src={`${vscMediaUrl}/logos/${props.icon}`} + height="24px" + style={{ marginRight: "10px" }} + /> + )} + <h3>{props.title}</h3> + </div> + {props.tags?.map((tag) => { + return ( + <span + style={{ + backgroundColor: `${MODEL_PROVIDER_TAG_COLORS[tag]}55`, + color: "white", + padding: "2px 4px", + borderRadius: defaultBorderRadius, + marginRight: "4px", + }} + > + {tag} + </span> + ); + })} + <p>{props.description}</p> + + {props.refUrl && ( + <a style={{ - backgroundColor: `${MODEL_PROVIDER_TAG_COLORS[tag]}55`, - color: "white", - padding: "2px 4px", - borderRadius: defaultBorderRadius, - marginRight: "4px", + position: "absolute", + right: "8px", + top: "8px", }} + href={props.refUrl} + target="_blank" > - {tag} - </span> - ); - })} - <p>{props.description}</p> + <HeaderButtonWithText text="Read the docs"> + <BookOpenIcon width="1.6em" height="1.6em" /> + </HeaderButtonWithText> + </a> + )} + </div> - {props.refUrl && ( - <a - style={{ - position: "absolute", - right: "8px", - top: "8px", - }} - href={props.refUrl} - target="_blank" - > - <HeaderButtonWithText text="Read the docs"> - <BookOpenIcon width="1.6em" height="1.6em" /> - </HeaderButtonWithText> - </a> + {props.dimensions?.length && ( + <DimensionsDiv> + {props.dimensions?.map((dimension, i) => { + return ( + <div className="flex items-center"> + <InfoHover msg={dimension.description} /> + <p className="mx-2 text-sm my-0 py-0">{dimension.name}</p> + {Object.keys(dimension.options).map((key) => { + return ( + <DimensionOptionDiv + onClick={(e) => { + e.stopPropagation(); + const newChoices = [...dimensionChoices]; + newChoices[i] = key; + setDimensionChoices(newChoices); + }} + selected={dimensionChoices[i] === key} + > + {key} + </DimensionOptionDiv> + ); + })} + </div> + ); + })} + </DimensionsDiv> )} </Div> ); diff --git a/extension/react-app/src/components/ModelSettings.tsx b/extension/react-app/src/components/ModelSettings.tsx index 4b9d5e64..3f9414b1 100644 --- a/extension/react-app/src/components/ModelSettings.tsx +++ b/extension/react-app/src/components/ModelSettings.tsx @@ -3,7 +3,7 @@ import { LLM } from "../../../schema/LLM"; import { Label, Select, - TextInput, + Input, defaultBorderRadius, lightGray, vscForeground, @@ -58,7 +58,7 @@ function ModelSettings(props: { llm: any | undefined; role: string }) { {typeof modelOptions.api_key !== undefined && ( <> <Label fontSize={getFontSize()}>API Key</Label> - <TextInput + <Input type="text" defaultValue={props.llm.api_key} placeholder="API Key" @@ -69,7 +69,7 @@ function ModelSettings(props: { llm: any | undefined; role: string }) { {modelOptions.model && ( <> <Label fontSize={getFontSize()}>Model</Label> - <TextInput + <Input type="text" defaultValue={props.llm.model} placeholder="Model" diff --git a/extension/react-app/src/components/Suggestions.tsx b/extension/react-app/src/components/Suggestions.tsx index bdda7579..5779eea8 100644 --- a/extension/react-app/src/components/Suggestions.tsx +++ b/extension/react-app/src/components/Suggestions.tsx @@ -16,6 +16,7 @@ import { useSelector } from "react-redux"; import { RootStore } from "../redux/store"; import HeaderButtonWithText from "./HeaderButtonWithText"; import { getFontSize } from "../util"; +import { usePostHog } from "posthog-js/react"; const Div = styled.div<{ isDisabled: boolean }>` border-radius: ${defaultBorderRadius}; @@ -159,6 +160,7 @@ const TutorialDiv = styled.div` `; function SuggestionsArea(props: { onClick: (textInput: string) => void }) { + const posthog = usePostHog(); const [stage, setStage] = useState( parseInt(localStorage.getItem("stage") || "0") ); @@ -207,8 +209,18 @@ function SuggestionsArea(props: { onClick: (textInput: string) => void }) { className="absolute right-1 top-1 cursor-pointer" text="Close Tutorial" onClick={() => { - console.log("HIDE"); setHide(true); + const tutorialClosedCount = parseInt( + localStorage.getItem("tutorialClosedCount") || "0" + ); + localStorage.setItem( + "tutorialClosedCount", + (tutorialClosedCount + 1).toString() + ); + posthog?.capture("tutorial_closed", { + stage, + tutorialClosedCount, + }); }} > <XMarkIcon width="1.2em" height="1.2em" /> @@ -219,8 +231,9 @@ function SuggestionsArea(props: { onClick: (textInput: string) => void }) { disabled={!codeIsHighlighted} {...suggestion} onClick={() => { - if (stage > 0 && !codeIsHighlighted) return; + if (!codeIsHighlighted) return; props.onClick(suggestion.textInput); + posthog?.capture("tutorial_stage_complete", { stage }); setStage(stage + 1); localStorage.setItem("stage", (stage + 1).toString()); setHide(true); diff --git a/extension/react-app/src/components/dialogs/AddContextGroupDialog.tsx b/extension/react-app/src/components/dialogs/AddContextGroupDialog.tsx index 9cd0a95e..a6cf151c 100644 --- a/extension/react-app/src/components/dialogs/AddContextGroupDialog.tsx +++ b/extension/react-app/src/components/dialogs/AddContextGroupDialog.tsx @@ -1,5 +1,5 @@ import { useContext } from "react"; -import { Button, TextInput } from ".."; +import { Button, Input } from ".."; import { GUIClientContext } from "../../App"; import { useDispatch } from "react-redux"; import { @@ -27,7 +27,7 @@ function AddContextGroupDialog({ return ( <div className="p-4"> - <TextInput + <Input defaultValue="My Context Group" type="text" ref={(input) => { diff --git a/extension/react-app/src/components/dialogs/FTCDialog.tsx b/extension/react-app/src/components/dialogs/FTCDialog.tsx index 3ea753bc..5fa2d4e6 100644 --- a/extension/react-app/src/components/dialogs/FTCDialog.tsx +++ b/extension/react-app/src/components/dialogs/FTCDialog.tsx @@ -1,6 +1,6 @@ import React, { useContext } from "react"; import styled from "styled-components"; -import { Button, TextInput } from ".."; +import { Button, Input } from ".."; import { useNavigate } from "react-router-dom"; import { GUIClientContext } from "../../App"; import { useDispatch } from "react-redux"; @@ -37,7 +37,7 @@ function FTCDialog() { OpenAIFreeTrial object. </p> - <TextInput + <Input type="text" placeholder="Enter your OpenAI API key" value={apiKey} @@ -46,6 +46,7 @@ function FTCDialog() { <GridDiv> <Button onClick={() => { + dispatch(setShowDialog(false)); navigate("/models"); }} > diff --git a/extension/react-app/src/components/index.ts b/extension/react-app/src/components/index.ts index 9d9b7c40..12b84759 100644 --- a/extension/react-app/src/components/index.ts +++ b/extension/react-app/src/components/index.ts @@ -10,9 +10,10 @@ export const vscBackgroundTransparent = "#1e1e1ede"; export const buttonColor = "#1bbe84"; export const buttonColorHover = "#1bbe84a8"; -export const secondaryDark = "var(--vscode-list-hoverBackground)"; -export const vscBackground = "var(--vscode-editor-background)"; -export const vscForeground = "var(--vscode-editor-foreground)"; +export const secondaryDark = + "var(--vscode-list-hoverBackground, rgb(45 45 45))"; +export const vscBackground = "var(--vscode-editor-background, rgb(30 30 30))"; +export const vscForeground = "var(--vscode-editor-foreground, white)"; export const Button = styled.button` padding: 10px 12px; @@ -92,7 +93,7 @@ export const H3 = styled.h3` width: fit-content; `; -export const TextInput = styled.input.attrs({ type: "text" })` +export const Input = styled.input` width: 100%; padding: 8px 12px; margin: 8px 0; @@ -106,6 +107,10 @@ export const TextInput = styled.input.attrs({ type: "text" })` &:focus { background: ${secondaryDark}; } + + &:invalid { + outline: 1px solid red; + } `; export const NumberInput = styled.input.attrs({ type: "number" })` diff --git a/extension/react-app/src/pages/gui.tsx b/extension/react-app/src/pages/gui.tsx index b8199c19..637896c6 100644 --- a/extension/react-app/src/pages/gui.tsx +++ b/extension/react-app/src/pages/gui.tsx @@ -1,5 +1,5 @@ import styled from "styled-components"; -import { TextInput, defaultBorderRadius, lightGray } from "../components"; +import { Input, defaultBorderRadius, lightGray, vscBackground } from "../components"; import { FullState } from "../../../schema/FullState"; import { useEffect, @@ -58,7 +58,7 @@ const TopGuiDiv = styled.div` } `; -const TitleTextInput = styled(TextInput)` +const TitleTextInput = styled(Input)` border: none; outline: none; @@ -109,6 +109,10 @@ const GUIHeaderDiv = styled.div` padding-left: 8px; padding-right: 8px; border-bottom: 0.5px solid ${lightGray}; + position: sticky; + top: 0; + z-index: 100; + background-color: ${vscBackground}; `; interface GUIProps { @@ -480,7 +484,7 @@ function GUI(props: GUIProps) { useEffect(() => { const timeout = setTimeout(() => { setShowLoading(true); - }, 10000); + }, 15_000); return () => { clearTimeout(timeout); diff --git a/extension/react-app/src/pages/history.tsx b/extension/react-app/src/pages/history.tsx index 63024e36..7c76cb53 100644 --- a/extension/react-app/src/pages/history.tsx +++ b/extension/react-app/src/pages/history.tsx @@ -17,6 +17,9 @@ const Tr = styled.tr` } overflow-wrap: anywhere; + + border-bottom: 1px solid ${secondaryDark}; + border-top: 1px solid ${secondaryDark}; `; const parseDate = (date: string): Date => { @@ -44,7 +47,6 @@ const TdDiv = styled.div` padding-right: 1rem; padding-top: 0.5rem; padding-bottom: 0.5rem; - border-bottom: 1px solid ${secondaryDark}; `; function lastPartOfPath(path: string): string { @@ -155,7 +157,7 @@ function History() { )} <div> - <table className="w-full"> + <table className="w-full border-spacing-0 border-collapse"> <tbody> {filteredAndSortedSessions.map((session, index) => { const prevDate = diff --git a/extension/react-app/src/pages/modelconfig.tsx b/extension/react-app/src/pages/modelconfig.tsx index 97e2d76c..00d9d9bf 100644 --- a/extension/react-app/src/pages/modelconfig.tsx +++ b/extension/react-app/src/pages/modelconfig.tsx @@ -3,7 +3,7 @@ import ModelCard from "../components/ModelCard"; import styled from "styled-components"; import { ArrowLeftIcon } from "@heroicons/react/24/outline"; import { - TextInput, + Input, defaultBorderRadius, lightGray, vscBackground, @@ -22,6 +22,7 @@ import { RootStore } from "../redux/store"; import StyledMarkdownPreview from "../components/StyledMarkdownPreview"; import { getFontSize } from "../util"; import { FormProvider, useForm } from "react-hook-form"; +import _ from "lodash"; const GridDiv = styled.div` display: grid; @@ -151,22 +152,28 @@ function ModelConfig() { <> <h3 className="mb-2">Enter required parameters</h3> - {modelInfo?.collectInputFor?.map((d) => { - return ( - <div> - <label htmlFor={d.key}>{d.key}</label> - <TextInput - id={d.key} - className="border-2 border-gray-200 rounded-md p-2 m-2" - placeholder={d.key} - defaultValue={d.defaultValue} - {...formMethods.register(d.key, { - required: true, - })} - /> - </div> - ); - })} + {modelInfo?.collectInputFor + ?.filter((d) => d.required) + .map((d) => { + return ( + <div> + <label htmlFor={d.key}>{d.key}</label> + <Input + type={d.inputType} + id={d.key} + className="border-2 border-gray-200 rounded-md p-2 m-2" + placeholder={d.key} + defaultValue={d.defaultValue} + min={d.min} + max={d.max} + step={d.step} + {...formMethods.register(d.key, { + required: true, + })} + /> + </div> + ); + })} </> )} @@ -182,11 +189,15 @@ function ModelConfig() { return ( <div> <label htmlFor={d.key}>{d.key}</label> - <TextInput + <Input + type={d.inputType} id={d.key} className="border-2 border-gray-200 rounded-md p-2 m-2" placeholder={d.key} defaultValue={d.defaultValue} + min={d.min} + max={d.max} + step={d.step} {...formMethods.register(d.key, { required: false, })} @@ -209,19 +220,29 @@ function ModelConfig() { tags={pkg.tags} refUrl={pkg.refUrl} icon={pkg.icon || modelInfo.icon} - onClick={(e) => { + dimensions={pkg.dimensions} + onClick={(e, dimensionChoices) => { if (disableModelCards()) return; const formParams: any = {}; for (const d of modelInfo.collectInputFor || []) { formParams[d.key] = d.inputType === "text" ? formMethods.watch(d.key) - : parseInt(formMethods.watch(d.key)); + : parseFloat(formMethods.watch(d.key)); } client?.addModelForRole("*", modelInfo.class, { ...pkg.params, ...modelInfo.params, + ..._.merge( + {}, + ...(pkg.dimensions?.map((dimension, i) => { + if (!dimensionChoices?.[i]) return {}; + return { + ...dimension.options[dimensionChoices[i]], + }; + }) || []) + ), ...formParams, }); navigate("/"); @@ -239,7 +260,7 @@ function ModelConfig() { formParams[d.key] = d.inputType === "text" ? formMethods.watch(d.key) - : parseInt(formMethods.watch(d.key)); + : parseFloat(formMethods.watch(d.key)); } client?.addModelForRole("*", modelInfo.class, { diff --git a/extension/react-app/src/pages/models.tsx b/extension/react-app/src/pages/models.tsx index a9a97a13..75c76d67 100644 --- a/extension/react-app/src/pages/models.tsx +++ b/extension/react-app/src/pages/models.tsx @@ -51,9 +51,6 @@ function Models() { icon={modelInfo.icon} refUrl={`https://continue.dev/docs/reference/Models/${modelInfo.class.toLowerCase()}`} onClick={(e) => { - if ((e.target as any).closest("a")) { - return; - } navigate(`/modelconfig/${name}`); }} /> diff --git a/extension/react-app/src/pages/settings.tsx b/extension/react-app/src/pages/settings.tsx index cb269d7b..060a5b75 100644 --- a/extension/react-app/src/pages/settings.tsx +++ b/extension/react-app/src/pages/settings.tsx @@ -1,4 +1,4 @@ -import React, { useContext } from "react"; +import React, { useContext, useEffect } from "react"; import { GUIClientContext } from "../App"; import { useDispatch, useSelector } from "react-redux"; import { RootStore } from "../redux/store"; @@ -113,6 +113,13 @@ function Settings() { navigate("/"); }; + useEffect(() => { + if (!config) return; + + formMethods.setValue("system_message", config.system_message); + formMethods.setValue("temperature", config.temperature); + }, [config]); + return ( <FormProvider {...formMethods}> <div className="overflow-scroll"> @@ -145,7 +152,6 @@ function Settings() { <TextArea placeholder="Enter a system message (e.g. 'Always respond in German')" {...formMethods.register("system_message")} - defaultValue={config.system_message} /> <Hr /> @@ -164,7 +170,6 @@ function Settings() { min="0" max="1" step="0.01" - defaultValue={config.temperature} {...formMethods.register("temperature")} /> <p>1</p> diff --git a/extension/react-app/src/util/modelData.ts b/extension/react-app/src/util/modelData.ts index 91259446..035e4af2 100644 --- a/extension/react-app/src/util/modelData.ts +++ b/extension/react-app/src/util/modelData.ts @@ -1,3 +1,17 @@ +import _ from "lodash"; + +function updatedObj(old: any, pathToValue: { [key: string]: any }) { + const newObject = _.cloneDeep(old); + for (const key in pathToValue) { + if (typeof pathToValue[key] === "function") { + _.updateWith(newObject, key, pathToValue[key]); + } else { + _.updateWith(newObject, key, (__) => pathToValue[key]); + } + } + return newObject; +} + export enum ModelProviderTag { "Requires API Key" = "Requires API Key", "Local" = "Local", @@ -14,6 +28,7 @@ MODEL_PROVIDER_TAG_COLORS[ModelProviderTag["Free"]] = "#ffff00"; export enum CollectInputType { "text" = "text", "number" = "number", + "range" = "range", } export interface InputDescriptor { @@ -38,6 +53,64 @@ const contextLengthInput: InputDescriptor = { defaultValue: 2048, required: false, }; +const temperatureInput: InputDescriptor = { + inputType: CollectInputType.number, + key: "temperature", + label: "Temperature", + defaultValue: undefined, + required: false, + min: 0.0, + max: 1.0, + step: 0.01, +}; +const topPInput: InputDescriptor = { + inputType: CollectInputType.number, + key: "top_p", + label: "Top-P", + defaultValue: undefined, + required: false, + min: 0, + max: 1, + step: 0.01, +}; +const topKInput: InputDescriptor = { + inputType: CollectInputType.number, + key: "top_k", + label: "Top-K", + defaultValue: undefined, + required: false, + min: 0, + max: 1, + step: 0.01, +}; +const presencePenaltyInput: InputDescriptor = { + inputType: CollectInputType.number, + key: "presence_penalty", + label: "Presence Penalty", + defaultValue: undefined, + required: false, + min: 0, + max: 1, + step: 0.01, +}; +const FrequencyPenaltyInput: InputDescriptor = { + inputType: CollectInputType.number, + key: "frequency_penalty", + label: "Frequency Penalty", + defaultValue: undefined, + required: false, + min: 0, + max: 1, + step: 0.01, +}; +const completionParamsInputs = [ + contextLengthInput, + temperatureInput, + topKInput, + topPInput, + presencePenaltyInput, + FrequencyPenaltyInput, +]; const serverUrlInput = { inputType: CollectInputType.text, @@ -59,6 +132,14 @@ export interface ModelInfo { collectInputFor?: InputDescriptor[]; } +// A dimension is like parameter count - 7b, 13b, 34b, etc. +// You would set options to the field that should be changed for that option in the params field of ModelPackage +export interface PackageDimension { + name: string; + description: string; + options: { [key: string]: { [key: string]: any } }; +} + export interface ModelPackage { collectInputFor?: InputDescriptor[]; description: string; @@ -75,100 +156,189 @@ export interface ModelPackage { replace?: [string, string][]; [key: string]: any; }; + dimensions?: PackageDimension[]; } -const codeLlama7bInstruct: ModelPackage = { - title: "CodeLlama-7b-Instruct", - description: "A 7b parameter model tuned for code generation", +enum ChatTemplates { + "alpaca" = "template_alpaca_messages", + "llama2" = "llama2_template_messages", + "sqlcoder" = "sqlcoder_template_messages", +} + +const codeLlamaInstruct: ModelPackage = { + title: "CodeLlama Instruct", + description: + "A model from Meta, fine-tuned for code generation and conversation", refUrl: "", params: { title: "CodeLlama-7b-Instruct", model: "codellama:7b-instruct", context_length: 2048, - template_messages: "llama2_template_messages", + template_messages: ChatTemplates.llama2, }, icon: "meta.svg", + dimensions: [ + { + name: "Parameter Count", + description: "The number of parameters in the model", + options: { + "7b": { + model: "codellama:7b-instruct", + title: "CodeLlama-7b-Instruct", + }, + "13b": { + model: "codellama:13b-instruct", + title: "CodeLlama-13b-Instruct", + }, + "34b": { + model: "codellama:34b-instruct", + title: "CodeLlama-34b-Instruct", + }, + }, + }, + ], }; -const codeLlama13bInstruct: ModelPackage = { - title: "CodeLlama-13b-Instruct", - description: "A 13b parameter model tuned for code generation", + +const llama2Chat: ModelPackage = { + title: "Llama2 Chat", + description: "The latest Llama model from Meta, fine-tuned for chat", refUrl: "", params: { - title: "CodeLlama13b-Instruct", - model: "codellama13b-instruct", + title: "Llama2-7b-Chat", + model: "llama2:7b-chat", context_length: 2048, - template_messages: "llama2_template_messages", + template_messages: ChatTemplates.llama2, }, icon: "meta.svg", + dimensions: [ + { + name: "Parameter Count", + description: "The number of parameters in the model", + options: { + "7b": { + model: "llama2:7b-chat", + title: "Llama2-7b-Chat", + }, + "13b": { + model: "llama2:13b-chat", + title: "Llama2-13b-Chat", + }, + "34b": { + model: "llama2:34b-chat", + title: "Llama2-34b-Chat", + }, + }, + }, + ], }; -const codeLlama34bInstruct: ModelPackage = { - title: "CodeLlama-34b-Instruct", - description: "A 34b parameter model tuned for code generation", + +const wizardCoder: ModelPackage = { + title: "WizardCoder", + description: + "A CodeLlama-based code generation model from WizardLM, focused on Python", refUrl: "", params: { - title: "CodeLlama-34b-Instruct", - model: "codellama:34b-instruct", + title: "WizardCoder-7b-Python", + model: "wizardcoder:7b-python", context_length: 2048, - template_messages: "llama2_template_messages", + template_messages: ChatTemplates.alpaca, }, - icon: "meta.svg", + icon: "wizardlm.png", + dimensions: [ + { + name: "Parameter Count", + description: "The number of parameters in the model", + options: { + "7b": { + model: "wizardcoder:7b-python", + title: "WizardCoder-7b-Python", + }, + "13b": { + model: "wizardcoder:13b-python", + title: "WizardCoder-13b-Python", + }, + "34b": { + model: "wizardcoder:34b-python", + title: "WizardCoder-34b-Python", + }, + }, + }, + ], }; -const llama2Chat7b: ModelPackage = { - title: "Llama2-7b-Chat", - description: "A 7b parameter model fine-tuned for chat", - refUrl: "", +const phindCodeLlama: ModelPackage = { + title: "Phind CodeLlama (34b)", + description: "A finetune of CodeLlama by Phind", params: { - title: "Llama2-7b-Chat", - model: "llama2:7b-chat", + title: "Phind CodeLlama", + model: "phind-codellama", context_length: 2048, - template_messages: "llama2_template_messages", + template_messages: ChatTemplates.llama2, }, - icon: "meta.svg", }; -const llama2Chat13b: ModelPackage = { - title: "Llama2-13b-Chat", - description: "A 13b parameter model fine-tuned for chat", - refUrl: "", + +const mistral: ModelPackage = { + title: "Mistral (7b)", + description: + "A 7b parameter base model created by Mistral AI, very competent for code generation and other tasks", params: { - title: "Llama2-13b-Chat", - model: "llama2:13b-chat", + title: "Mistral", + model: "mistral", context_length: 2048, - template_messages: "llama2_template_messages", + template_messages: ChatTemplates.llama2, }, - icon: "meta.svg", + icon: "mistral.png", }; -const llama2Chat34b: ModelPackage = { - title: "Llama2-34b-Chat", - description: "A 34b parameter model fine-tuned for chat", - refUrl: "", + +const sqlCoder: ModelPackage = { + title: "SQLCoder", + description: + "A finetune of StarCoder by Defog.ai, focused specifically on SQL", params: { - title: "Llama2-34b-Chat", - model: "llama2:34b-chat", + title: "SQLCoder", + model: "sqlcoder", context_length: 2048, - template_messages: "llama2_template_messages", + template_messages: ChatTemplates.sqlcoder, }, - icon: "meta.svg", + dimensions: [ + { + name: "Parameter Count", + description: "The number of parameters in the model", + options: { + "7b": { + model: "sqlcoder:7b", + title: "SQLCoder-7b", + }, + "13b": { + model: "sqlcoder:15b", + title: "SQLCoder-15b", + }, + }, + }, + ], }; -const codeLlamaPackages = [ - codeLlama7bInstruct, - codeLlama13bInstruct, - codeLlama34bInstruct, -]; - -const llama2Packages = [llama2Chat7b, llama2Chat13b, llama2Chat34b]; -const llama2FamilyPackage = { - title: "Llama2 or CodeLlama", - description: "Any model using the Llama2 or CodeLlama chat template", +const codeup: ModelPackage = { + title: "CodeUp (13b)", + description: "An open-source coding model based on Llama2", params: { - model: "llama2", + title: "CodeUp", + model: "codeup", context_length: 2048, - template_messages: "llama2_template_messages", + template_messages: ChatTemplates.llama2, }, - icon: "meta.svg", }; +const osModels = [ + codeLlamaInstruct, + llama2Chat, + wizardCoder, + phindCodeLlama, + sqlCoder, + mistral, + codeup, +]; + const gpt4: ModelPackage = { title: "GPT-4", description: "The latest model from OpenAI", @@ -192,6 +362,23 @@ const gpt35turbo: ModelPackage = { }, }; +const OLLAMA_TO_REPLICATE_MODEL_NAMES: { [key: string]: string } = { + "codellama:7b-instruct": + "meta/codellama-7b-instruct:6527b83e01e41412db37de5110a8670e3701ee95872697481a355e05ce12af0e", + "codellama:13b-instruct": + "meta/codellama-13b-instruct:1f01a52ff933873dff339d5fb5e1fd6f24f77456836f514fa05e91c1a42699c7", + "codellama:34b-instruct": + "meta/codellama-34b-instruct:8281a5c610f6e88237ff3ddaf3c33b56f60809e2bdd19fbec2fda742aa18167e", + "llama2:7b-chat": + "meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e", + "llama2:13b-chat": + "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d", +}; + +function replicateConvertModelName(model: string): string { + return OLLAMA_TO_REPLICATE_MODEL_NAMES[model] || model; +} + export const MODEL_INFO: { [key: string]: ModelInfo } = { openai: { title: "OpenAI", @@ -210,6 +397,7 @@ export const MODEL_INFO: { [key: string]: ModelInfo } = { placeholder: "Enter your OpenAI API key", required: true, }, + ...completionParamsInputs, ], }, anthropic: { @@ -229,6 +417,7 @@ export const MODEL_INFO: { [key: string]: ModelInfo } = { placeholder: "Enter your Anthropic API key", required: true, }, + ...completionParamsInputs, ], packages: [ { @@ -251,17 +440,8 @@ export const MODEL_INFO: { [key: string]: ModelInfo } = { 'To get started with Ollama, follow these steps:\n1. Download from [ollama.ai](https://ollama.ai/) and open the application\n2. Open a terminal and run `ollama pull <MODEL_NAME>`. Example model names are `codellama:7b-instruct` or `llama2:7b-text`. You can find the full list [here](https://ollama.ai/library).\n3. Make sure that the model name used in step 2 is the same as the one in config.py (e.g. `model="codellama:7b-instruct"`)\n4. Once the model has finished downloading, you can start asking questions through Continue.', icon: "ollama.png", tags: [ModelProviderTag["Local"], ModelProviderTag["Open-Source"]], - packages: [ - ...codeLlamaPackages.map((p) => ({ - ...p, - refUrl: "https://ollama.ai/library/codellama", - })), - ...llama2Packages.map((p) => ({ - ...p, - refUrl: "https://ollama.ai/library/llama2", - })), - ], - collectInputFor: [contextLengthInput], + packages: osModels, + collectInputFor: [...completionParamsInputs], }, together: { title: "TogetherAI", @@ -285,32 +465,51 @@ export const MODEL_INFO: { [key: string]: ModelInfo } = { placeholder: "Enter your TogetherAI API key", required: true, }, + ...completionParamsInputs, ], packages: [ - ...codeLlamaPackages.map((p) => { - return { - ...p, - params: { - ...p.params, - model: - "togethercomputer/" + - p.params.model.replace("llama2", "llama-2").replace(":", "-"), - }, - }; + updatedObj(llama2Chat, { + "dimensions[0].options": (options: any) => + _.mapValues(options, (option) => { + return _.assign({}, option, { + model: + "togethercomputer/" + + option.model.replace("llama2", "llama-2").replace(":", "-"), + }); + }), + }), + updatedObj(codeLlamaInstruct, { + "dimensions[0].options": (options: any) => + _.mapValues(options, (option) => { + return _.assign({}, option, { + model: + "togethercomputer/" + + option.model + .replace("codellama", "CodeLlama") + .replace(":", "-") + .replace("instruct", "Instruct"), + }); + }), }), - ...llama2Packages.map((p) => { - return { - ...p, - params: { - ...p.params, - model: - "togethercomputer/" + - p.params.model - .replace("codellama", "CodeLlama") - .replace(":", "-") - .replace("instruct", "Instruct"), + updatedObj(wizardCoder, { + "params.model": "WizardLM/WizardCoder-15B-V1.0", + "params.title": "WizardCoder-15b", + "dimensions[0].options": { + "15b": { + model: "WizardLM/WizardCoder-15B-V1.0", + title: "WizardCoder-15b", }, - }; + "34b (Python)": { + model: "WizardLM/WizardCoder-Python-34B-V1.0", + title: "WizardCoder-34b-Python", + }, + }, + }), + updatedObj(phindCodeLlama, { + "params.model": "Phind/Phind-CodeLlama-34B-Python-v1", + }), + updatedObj(mistral, { + "params.model": "mistralai/Mistral-7B-Instruct-v0.1", }), ].map((p) => { p.params.context_length = 4096; @@ -329,8 +528,8 @@ export const MODEL_INFO: { [key: string]: ModelInfo } = { params: { server_url: "http://localhost:1234", }, - packages: [llama2FamilyPackage], - collectInputFor: [contextLengthInput], + packages: osModels, + collectInputFor: [...completionParamsInputs], }, replicate: { title: "Replicate", @@ -348,23 +547,62 @@ export const MODEL_INFO: { [key: string]: ModelInfo } = { placeholder: "Enter your Replicate API key", required: true, }, + ...completionParamsInputs, ], icon: "replicate.png", tags: [ ModelProviderTag["Requires API Key"], ModelProviderTag["Open-Source"], ], - packages: [...codeLlamaPackages, ...llama2Packages].map((p) => { - return { - ...p, - params: { - ...p.params, - model: - "meta/" + - p.params.model.replace(":", "-").replace("llama2", "llama-2"), - }, - }; - }), + packages: [ + ...[codeLlamaInstruct, llama2Chat] + .map((p: ModelPackage) => { + if (p.title === "Llama2 Chat") { + return updatedObj(p, { + "dimensions[0].options.34b": undefined, + "dimensions[0].options.70b": { + model: + "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3", + title: "Llama2-70b-Chat", + }, + }); + } + return p; + }) + .map((p) => { + return updatedObj(p, { + "params.model": (model: string) => { + return replicateConvertModelName(model); + }, + "dimensions[0].options": (options: any) => { + const newOptions: any = {}; + for (const key in options) { + newOptions[key] = { + ...options[key], + model: replicateConvertModelName(options[key]?.model), + }; + } + return newOptions; + }, + }); + }), + updatedObj(wizardCoder, { + title: "WizardCoder (15b)", + "params.model": + "andreasjansson/wizardcoder-python-34b-v1-gguf:67eed332a5389263b8ede41be3ee7dc119fa984e2bde287814c4abed19a45e54", + dimensions: undefined, + }), + updatedObj(sqlCoder, { + dimensions: undefined, + title: "SQLCoder (15b)", + "params.model": + "gregwdata/defog-sqlcoder-q8:0a9abc0d143072fd5d8920ad90b8fbaafaf16b10ffdad24bd897b5bffacfce0b", + }), + updatedObj(mistral, { + "params.model": + "a16z-infra/mistral-7b-instruct-v0.1:83b6a56e7c828e667f21fd596c338fd4f0039b46bcfa18d973e8e70e455fda70", + }), + ], }, llamacpp: { title: "llama.cpp", @@ -384,8 +622,8 @@ export const MODEL_INFO: { [key: string]: ModelInfo } = { After it's up and running, you can start using Continue.`, icon: "llamacpp.png", tags: [ModelProviderTag.Local, ModelProviderTag["Open-Source"]], - packages: [llama2FamilyPackage], - collectInputFor: [contextLengthInput], + packages: osModels, + collectInputFor: [...completionParamsInputs], }, palm: { title: "Google PaLM API", @@ -426,9 +664,9 @@ After it's up and running, you can start using Continue.`, "HuggingFace Text Generation Inference is an advanced, highly-performant option for serving open-source models to multiple people. To get started, follow the [Quick Tour](https://huggingface.co/docs/text-generation-inference/quicktour) on their website to set up the Docker container. Make sure to enter the server URL below that corresponds to the host and port you set up for the Docker container.", icon: "hf.png", tags: [ModelProviderTag.Local, ModelProviderTag["Open-Source"]], - packages: [llama2FamilyPackage], + packages: osModels, collectInputFor: [ - contextLengthInput, + ...completionParamsInputs, { ...serverUrlInput, defaultValue: "http://localhost:8080" }, ], }, @@ -451,11 +689,11 @@ After it's up and running, you can start using Continue.`, ...serverUrlInput, defaultValue: "http://localhost:8000", }, - contextLengthInput, + ...completionParamsInputs, ], icon: "openai.svg", tags: [ModelProviderTag.Local, ModelProviderTag["Open-Source"]], - packages: [llama2FamilyPackage], + packages: osModels, }, freetrial: { title: "GPT-4 limited free trial", @@ -467,5 +705,6 @@ After it's up and running, you can start using Continue.`, icon: "openai.svg", tags: [ModelProviderTag.Free], packages: [gpt4, gpt35turbo], + collectInputFor: [...completionParamsInputs], }, }; diff --git a/extension/schema/ContinueConfig.d.ts b/extension/schema/ContinueConfig.d.ts index 64aa5c02..b9eb92ff 100644 --- a/extension/schema/ContinueConfig.d.ts +++ b/extension/schema/ContinueConfig.d.ts @@ -56,6 +56,10 @@ export type UniqueId = string; */ export type Model = string; /** + * The maximum number of tokens to generate. + */ +export type MaxTokens = number; +/** * Tokens that will stop the completion. */ export type StopTokens = string[]; @@ -222,6 +226,7 @@ export interface LLM { context_length?: ContextLength; unique_id?: UniqueId; model: Model; + max_tokens?: MaxTokens; stop_tokens?: StopTokens; timeout?: Timeout; verify_ssl?: VerifySsl; diff --git a/extension/schema/LLM.d.ts b/extension/schema/LLM.d.ts index 2c1ced29..fbd5aa41 100644 --- a/extension/schema/LLM.d.ts +++ b/extension/schema/LLM.d.ts @@ -27,6 +27,10 @@ export type UniqueId = string; */ export type Model = string; /** + * The maximum number of tokens to generate. + */ +export type MaxTokens = number; +/** * Tokens that will stop the completion. */ export type StopTokens = string[]; @@ -57,6 +61,7 @@ export interface LLM1 { context_length?: ContextLength; unique_id?: UniqueId; model: Model; + max_tokens?: MaxTokens; stop_tokens?: StopTokens; timeout?: Timeout; verify_ssl?: VerifySsl; diff --git a/extension/schema/Models.d.ts b/extension/schema/Models.d.ts index 67d73cfc..9ce79b60 100644 --- a/extension/schema/Models.d.ts +++ b/extension/schema/Models.d.ts @@ -27,6 +27,10 @@ export type UniqueId = string; */ export type Model = string; /** + * The maximum number of tokens to generate. + */ +export type MaxTokens = number; +/** * Tokens that will stop the completion. */ export type StopTokens = string[]; @@ -70,6 +74,7 @@ export interface LLM { context_length?: ContextLength; unique_id?: UniqueId; model: Model; + max_tokens?: MaxTokens; stop_tokens?: StopTokens; timeout?: Timeout; verify_ssl?: VerifySsl; diff --git a/extension/scripts/package.js b/extension/scripts/package.js index 8f1e68fd..c55ec42a 100644 --- a/extension/scripts/package.js +++ b/extension/scripts/package.js @@ -1,11 +1,18 @@ const { exec } = require("child_process"); const fs = require("fs"); +const args = process.argv.slice(2); +const isPreRelease = args.includes("--pre-release"); + if (!fs.existsSync("build")) { fs.mkdirSync("build"); } -exec("vsce package --out ./build patch", (error) => { +const command = isPreRelease + ? "vsce package --out ./build patch --pre-release" + : "vsce package --out ./build patch"; + +exec(command, (error) => { if (error) throw error; console.log("vsce package completed"); }); diff --git a/extension/src/activation/activate.ts b/extension/src/activation/activate.ts index 9fcaf685..edd13137 100644 --- a/extension/src/activation/activate.ts +++ b/extension/src/activation/activate.ts @@ -22,6 +22,39 @@ function getExtensionVersionInt(versionString: string): number { return parseInt(versionString.replace(/\./g, "")); } +function addPythonPathForConfig() { + // Add to python.analysis.extraPaths global setting so config.py gets LSP + + if ( + vscode.workspace.workspaceFolders?.some((folder) => + folder.uri.fsPath.endsWith("continue") + ) + ) { + // Not for the Continue repo + return; + } + + const pythonConfig = vscode.workspace.getConfiguration("python"); + const analysisPaths = pythonConfig.get<string[]>("analysis.extraPaths"); + const autoCompletePaths = pythonConfig.get<string[]>( + "autoComplete.extraPaths" + ); + const pathToAdd = extensionContext?.extensionPath; + if (analysisPaths && pathToAdd && !analysisPaths.includes(pathToAdd)) { + analysisPaths.push(pathToAdd); + pythonConfig.update("analysis.extraPaths", analysisPaths); + } + + if ( + autoCompletePaths && + pathToAdd && + !autoCompletePaths.includes(pathToAdd) + ) { + autoCompletePaths.push(pathToAdd); + pythonConfig.update("autoComplete.extraPaths", autoCompletePaths); + } +} + export async function activateExtension(context: vscode.ExtensionContext) { extensionContext = context; console.log("Using Continue version: ", getExtensionVersion()); @@ -33,39 +66,12 @@ export async function activateExtension(context: vscode.ExtensionContext) { } catch (e) { console.log("Error getting workspace folder: ", e); } - // Before anything else, check whether this is an out-of-date version of the extension - // Do so by grabbing the package.json off of the GitHub repository for now. - fetch(PACKAGE_JSON_RAW_GITHUB_URL) - .then(async (res) => res.json()) - .then((packageJson) => { - const n1 = getExtensionVersionInt(packageJson.version); - const n2 = getExtensionVersionInt(getExtensionVersion()); - if (Math.abs(n1 - n2) > 1) { - // Accept up to 1 version difference - vscode.window.showInformationMessage( - `You are using an out-of-date version of the Continue extension. Please update to the latest version.` - ); - } - }) - .catch((e) => console.log("Error checking for extension updates: ", e)); - - // Add to python.analysis.extraPaths global setting - // const pythonConfig = vscode.workspace.getConfiguration("python"); - // const extraPaths = pythonConfig.get<string[]>("analysis.extraPaths"); - // const pathToAdd = path.join(os.homedir(), ".continue", "server"); - // if (extraPaths) { - // if (!extraPaths.includes(pathToAdd)) { - // extraPaths.push(pathToAdd); - // pythonConfig.update("analysis.extraPaths", extraPaths); - // } - // } else { - // pythonConfig.update("analysis.extraPaths", [pathToAdd]); - // } // Register commands and providers registerAllCodeLensProviders(context); registerAllCommands(context); registerQuickFixProvider(); + addPythonPathForConfig(); // Start the server const sessionIdPromise = (async () => { diff --git a/extension/src/activation/environmentSetup.ts b/extension/src/activation/environmentSetup.ts index 3f82631f..4c47fba6 100644 --- a/extension/src/activation/environmentSetup.ts +++ b/extension/src/activation/environmentSetup.ts @@ -89,7 +89,10 @@ export function getExtensionVersion() { } // Returns whether a server of the current version is already running -async function checkOrKillRunningServer(serverUrl: string): Promise<boolean> { +async function checkOrKillRunningServer( + serverUrl: string, + deleteBinary: boolean +): Promise<boolean> { const serverRunning = await checkServerRunning(serverUrl); let shouldKillAndReplace = true; @@ -127,10 +130,13 @@ async function checkOrKillRunningServer(serverUrl: string): Promise<boolean> { if (fs.existsSync(serverVersionPath())) { fs.unlinkSync(serverVersionPath()); } - // Also delete the server binary - const serverBinary = serverBinaryPath(); - if (fs.existsSync(serverBinary)) { - fs.unlinkSync(serverBinary); + + if (deleteBinary) { + // Optionally, delete the server binary + const serverBinary = serverBinaryPath(); + if (fs.existsSync(serverBinary)) { + fs.unlinkSync(serverBinary); + } } } @@ -146,6 +152,17 @@ function ensureDirectoryExistence(filePath: string) { fs.mkdirSync(dirname); } +function isPreviewExtension() { + // If the extension minor version is odd, it is a preview version + const extensionVersion = getExtensionVersion(); + if (!extensionVersion || extensionVersion === "") { + return false; + } + const extensionVersionSplit = extensionVersion.split("."); + const extensionMinorVersion = extensionVersionSplit[1]; + return parseInt(extensionMinorVersion) % 2 === 1; +} + export async function downloadFromS3( bucket: string, fileName: string, @@ -187,24 +204,84 @@ export async function downloadFromS3( }); } -export async function startContinuePythonServer(redownload: boolean = true) { - // Check vscode settings - const manuallyRunningServer = - vscode.workspace - .getConfiguration("continue") - .get<boolean>("manuallyRunningServer") || false; - const serverUrl = getContinueServerUrl(); - if ( - (serverUrl !== "http://localhost:65432" && - serverUrl !== "http://127.0.0.1:65432") || - manuallyRunningServer - ) { - console.log("Continue server is being run manually, skipping start"); - return; - } +function includedBinaryPath(): string { + const extensionPath = getExtensionUri().fsPath; + return path.join( + extensionPath, + "exe", + `run${os.platform() === "win32" ? ".exe" : ""}` + ); +} + +function runExecutable(path: string) { + console.log("---- Starting Continue server ----"); + let attempts = 0; + let maxAttempts = 5; + let delay = 1000; // Delay between each attempt in milliseconds + + const spawnChild = () => { + const retry = (e: any) => { + attempts++; + console.log(`Error caught: ${e}.\n\nRetrying attempt ${attempts}...`); + setTimeout(spawnChild, delay); + }; + try { + // NodeJS bug requires not using detached on Windows, otherwise windowsHide is ineffective + // Otherwise, detach is preferable + const windowsSettings = { + windowsHide: true, + }; + const macLinuxSettings = { + detached: true, + stdio: "ignore", + }; + const settings: any = + os.platform() === "win32" ? windowsSettings : macLinuxSettings; + // Spawn the server + const child = spawn(path, settings); + + // Either unref to avoid zombie process, or listen to events because you can + if (os.platform() === "win32") { + child.stdout.on("data", (data: any) => { + console.log(`stdout: ${data}`); + }); + child.stderr.on("data", (data: any) => { + console.log(`stderr: ${data}`); + }); + child.on("error", (err: any) => { + if (attempts < maxAttempts) { + retry(err); + } else { + console.error("Failed to start subprocess.", err); + } + }); + child.on("exit", (code: any, signal: any) => { + console.log("Subprocess exited with code", code, signal); + }); + child.on("close", (code: any, signal: any) => { + console.log("Subprocess closed with code", code, signal); + }); + } else { + child.unref(); + } + } catch (e: any) { + if (attempts < maxAttempts) { + retry(e); + } else { + throw e; + } + } + }; + + spawnChild(); + + // Write the current version of vscode extension to a file called server_version.txt + fs.writeFileSync(serverVersionPath(), getExtensionVersion()); +} +async function setupWithS3Download(redownload: boolean, serverUrl: string) { // Check if server is already running - if (redownload && (await checkOrKillRunningServer(serverUrl))) { + if (redownload && (await checkOrKillRunningServer(serverUrl, true))) { console.log("Continue server already running"); return; } @@ -263,7 +340,7 @@ export async function startContinuePythonServer(redownload: boolean = true) { try { await downloadFromS3( bucket, - fileName, + `${isPreviewExtension() ? "preview/" : ""}${fileName}`, destination, "us-west-1", false @@ -285,7 +362,7 @@ export async function startContinuePythonServer(redownload: boolean = true) { try { await downloadFromS3( bucket, - fileName, + `${isPreviewExtension() ? "preview/" : ""}${fileName}`, destination, "us-west-1", true @@ -342,67 +419,44 @@ export async function startContinuePythonServer(redownload: boolean = true) { } // Run the executable - console.log("---- Starting Continue server ----"); - let attempts = 0; - let maxAttempts = 5; - let delay = 1000; // Delay between each attempt in milliseconds + runExecutable(destination); +} - const spawnChild = () => { - const retry = (e: any) => { - attempts++; - console.log(`Error caught: ${e}.\n\nRetrying attempt ${attempts}...`); - setTimeout(spawnChild, delay); - }; - try { - // NodeJS bug requires not using detached on Windows, otherwise windowsHide is ineffective - // Otherwise, detach is preferable - const windowsSettings = { - windowsHide: true, - }; - const macLinuxSettings = { - detached: true, - stdio: "ignore", - }; - const settings: any = - os.platform() === "win32" ? windowsSettings : macLinuxSettings; - // Spawn the server - const child = spawn(destination, settings); +export async function startContinuePythonServer(redownload: boolean = true) { + // Check vscode settings for whether server is being run manually + const manuallyRunningServer = + vscode.workspace + .getConfiguration("continue") + .get<boolean>("manuallyRunningServer") || false; + const serverUrl = getContinueServerUrl(); + if ( + (serverUrl !== "http://localhost:65432" && + serverUrl !== "http://127.0.0.1:65432") || + manuallyRunningServer + ) { + console.log("Continue server is being run manually, skipping start"); + return; + } - // Either unref to avoid zombie process, or listen to events because you can - if (os.platform() === "win32") { - child.stdout.on("data", (data: any) => { - console.log(`stdout: ${data}`); - }); - child.stderr.on("data", (data: any) => { - console.log(`stderr: ${data}`); - }); - child.on("error", (err: any) => { - if (attempts < maxAttempts) { - retry(err); - } else { - console.error("Failed to start subprocess.", err); - } - }); - child.on("exit", (code: any, signal: any) => { - console.log("Subprocess exited with code", code, signal); - }); - child.on("close", (code: any, signal: any) => { - console.log("Subprocess closed with code", code, signal); - }); - } else { - child.unref(); - } - } catch (e: any) { - if (attempts < maxAttempts) { - retry(e); - } else { - throw e; - } - } - }; + // If on Apple Silicon, download binary from S3 + // const isAppleSilicon = os.platform() === "darwin" && os.arch() === "arm64"; + // if (isAppleSilicon) { + // await setupWithS3Download(redownload, serverUrl); + // return; + // } - spawnChild(); + // Check if current server version is already running + if (redownload && (await checkOrKillRunningServer(serverUrl, false))) { + console.log("Continue server already running"); + return; + } - // Write the current version of vscode extension to a file called server_version.txt - fs.writeFileSync(serverVersionPath(), getExtensionVersion()); + // Otherwise, use the binary installed with the extension + if (!fs.existsSync(includedBinaryPath())) { + throw new Error( + `Continue server binary not found at ${includedBinaryPath()}` + ); + } + + runExecutable(includedBinaryPath()); } diff --git a/extension/src/commands.ts b/extension/src/commands.ts index 4e2f4571..267f7e30 100644 --- a/extension/src/commands.ts +++ b/extension/src/commands.ts @@ -76,6 +76,7 @@ const commandsMap: { [command: string]: (...args: any) => any } = { vscode.commands.executeCommand("workbench.action.toggleAuxiliaryBar"); }, "continue.quickTextEntry": async () => { + addHighlightedCodeToContext(true); const text = await vscode.window.showInputBox({ placeHolder: "Ask a question or enter a slash command", title: "Continue Quick Input", @@ -100,6 +101,20 @@ const commandsMap: { [command: string]: (...args: any) => any } = { vscode.commands.executeCommand("continue.continueGUIView.focus"); await ideProtocolClient.debugTerminal(); }, + + // Commands without keyboard shortcuts + "continue.addModel": () => { + vscode.commands.executeCommand("continue.continueGUIView.focus"); + debugPanelWebview?.postMessage({ + type: "addModel", + }); + }, + "continue.openSettingsUI": () => { + vscode.commands.executeCommand("continue.continueGUIView.focus"); + debugPanelWebview?.postMessage({ + type: "openSettings", + }); + }, }; export function registerAllCommands(context: vscode.ExtensionContext) { diff --git a/extension/src/continueIdeClient.ts b/extension/src/continueIdeClient.ts index c25dff50..342de769 100644 --- a/extension/src/continueIdeClient.ts +++ b/extension/src/continueIdeClient.ts @@ -148,32 +148,6 @@ class IdeProtocolClient { const filepath = event.uri.fsPath; const contents = event.getText(); this.messenger?.send("fileSaved", { filepath, contents }); - - if (event.fileName.endsWith("config.py")) { - if ( - this.context.globalState.get<boolean>( - "continue.showConfigInfoMessage" - ) !== false - ) { - vscode.window - .showInformationMessage( - "Reload the VS Code window for your changes to the Continue config to take effect.", - "Reload", - "Don't show again" - ) - .then((selection) => { - if (selection === "Don't show again") { - // Get the global state - context.globalState.update( - "continue.showConfigInfoMessage", - false - ); - } else if (selection === "Reload") { - vscode.commands.executeCommand("workbench.action.reloadWindow"); - } - }); - } - } }); // Setup listeners for any selection changes in open editors diff --git a/extension/src/lang-server/codeLens.ts b/extension/src/lang-server/codeLens.ts index ec03f73e..6f36a6bd 100644 --- a/extension/src/lang-server/codeLens.ts +++ b/extension/src/lang-server/codeLens.ts @@ -4,6 +4,7 @@ import * as path from "path"; import * as os from "os"; import { DIFF_DIRECTORY, diffManager } from "../diffs"; import { getMetaKeyLabel } from "../util/util"; +import { debugPanelWebview } from "../debugPanel"; class SuggestionsCodeLensProvider implements vscode.CodeLensProvider { public provideCodeLenses( document: vscode.TextDocument, @@ -82,8 +83,61 @@ class DiffViewerCodeLensProvider implements vscode.CodeLensProvider { } } +class ConfigPyCodeLensProvider implements vscode.CodeLensProvider { + public provideCodeLenses( + document: vscode.TextDocument, + _: vscode.CancellationToken + ): vscode.CodeLens[] | Thenable<vscode.CodeLens[]> { + const codeLenses: vscode.CodeLens[] = []; + + if ( + !document.uri.fsPath.endsWith(".continue/config.py") && + !document.uri.fsPath.endsWith(".continue\\config.py") + ) { + return codeLenses; + } + + const lines = document.getText().split(os.EOL); + const lineOfModels = lines.findIndex((line) => + line.includes("models=Models(") + ); + + if (lineOfModels >= 0) { + const range = new vscode.Range(lineOfModels, 0, lineOfModels + 1, 0); + codeLenses.push( + new vscode.CodeLens(range, { + title: `+ Add a Model`, + command: "continue.addModel", + }) + ); + } + + const lineOfSystemMessage = lines.findIndex((line) => + line.replace(" ", "").includes("config=ContinueConfig(") + ); + + if (lineOfSystemMessage >= 0) { + const range = new vscode.Range( + lineOfSystemMessage, + 0, + lineOfSystemMessage + 1, + 0 + ); + codeLenses.push( + new vscode.CodeLens(range, { + title: `✏️ Edit in UI`, + command: "continue.openSettingsUI", + }) + ); + } + + return codeLenses; + } +} + let diffsCodeLensDisposable: vscode.Disposable | undefined = undefined; let suggestionsCodeLensDisposable: vscode.Disposable | undefined = undefined; +let configPyCodeLensDisposable: vscode.Disposable | undefined = undefined; export function registerAllCodeLensProviders(context: vscode.ExtensionContext) { if (suggestionsCodeLensDisposable) { @@ -92,6 +146,9 @@ export function registerAllCodeLensProviders(context: vscode.ExtensionContext) { if (diffsCodeLensDisposable) { diffsCodeLensDisposable.dispose(); } + if (configPyCodeLensDisposable) { + configPyCodeLensDisposable.dispose(); + } suggestionsCodeLensDisposable = vscode.languages.registerCodeLensProvider( "*", new SuggestionsCodeLensProvider() @@ -100,6 +157,11 @@ export function registerAllCodeLensProviders(context: vscode.ExtensionContext) { "*", new DiffViewerCodeLensProvider() ); + configPyCodeLensDisposable = vscode.languages.registerCodeLensProvider( + "*", + new ConfigPyCodeLensProvider() + ); context.subscriptions.push(suggestionsCodeLensDisposable); context.subscriptions.push(diffsCodeLensDisposable); + context.subscriptions.push(configPyCodeLensDisposable); } diff --git a/extension/src/test-suite/environmentSetup.test.ts b/extension/src/test-suite/environmentSetup.test.ts index 1c5fe97e..dd371ab1 100644 --- a/extension/src/test-suite/environmentSetup.test.ts +++ b/extension/src/test-suite/environmentSetup.test.ts @@ -6,8 +6,8 @@ import { startContinuePythonServer } from "../activation/environmentSetup"; import fetch from "node-fetch"; describe("Can start python server", () => { - test("Can start python server in under 35 seconds", async function () { - const allowedTime = 60_000; + test("Can start python server in under 80 seconds", async function () { + const allowedTime = 80_000; this.timeout(allowedTime + 10_000); console.log("Starting server in test..."); diff --git a/extension/src/util/messenger.ts b/extension/src/util/messenger.ts index 152d4a1f..29b38497 100644 --- a/extension/src/util/messenger.ts +++ b/extension/src/util/messenger.ts @@ -55,6 +55,15 @@ export class WebsocketMessenger extends Messenger { this.onMessageType(messageType, listener); } } + + newWebsocket.addEventListener("open", () => console.log("Websocket connection opened")); + newWebsocket.addEventListener("error", (error: any) => { + console.error("Websocket error occurred: ", error); + }); + newWebsocket.addEventListener("close", (error: any) => { + console.log("Websocket connection closed: ", error); + }); + return newWebsocket; } diff --git a/run.m1.spec b/run.m1.spec index bbd7bbc1..a1aa6161 100644 --- a/run.m1.spec +++ b/run.m1.spec @@ -10,7 +10,7 @@ a = Analysis( pathex=[], binaries=[], datas=[ - ('continuedev', 'continuedev'), + ('server/continuedev', 'continuedev'), (certifi.where(), 'ca_bundle'), ] + copy_metadata('replicate'), hiddenimports=['anthropic', 'github', 'ripgrepy', 'bs4', 'redbaron'], @@ -1,7 +1,7 @@ import os import sys -from continuedev.main import main +from server.main import main if getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"): ca_bundle_path = os.path.join(sys._MEIPASS, "ca_bundle", "cacert.pem") @@ -10,7 +10,7 @@ a = Analysis( pathex=[], binaries=[], datas=[ - ('continuedev', 'continuedev'), + ('server/continuedev', 'continuedev'), (certifi.where(), 'ca_bundle') ] + copy_metadata('replicate'), hiddenimports=['anthropic', 'github', 'ripgrepy', 'bs4', 'redbaron'], diff --git a/schema/json/ContextItem.json b/schema/json/ContextItem.json index 32a214d3..e68f61d3 100644 --- a/schema/json/ContextItem.json +++ b/schema/json/ContextItem.json @@ -1,6 +1,6 @@ { "title": "ContextItem", - "$ref": "#/definitions/src__continuedev__core__main__ContextItem", + "$ref": "#/definitions/continuedev__core__main__ContextItem", "definitions": { "ContextItemId": { "title": "ContextItemId", @@ -44,7 +44,7 @@ "id" ] }, - "src__continuedev__core__main__ContextItem": { + "continuedev__core__main__ContextItem": { "title": "ContextItem", "description": "A ContextItem is a single item that is stored in the ContextManager.", "type": "object", diff --git a/schema/json/ContinueConfig.json b/schema/json/ContinueConfig.json index e78bb3c9..74d24e2f 100644 --- a/schema/json/ContinueConfig.json +++ b/schema/json/ContinueConfig.json @@ -1,6 +1,6 @@ { "title": "ContinueConfig", - "$ref": "#/definitions/src__continuedev__core__config__ContinueConfig", + "$ref": "#/definitions/continuedev__core__config__ContinueConfig", "definitions": { "FunctionCall": { "title": "FunctionCall", @@ -126,6 +126,12 @@ "description": "The name of the model to be used (e.g. gpt-4, codellama)", "type": "string" }, + "max_tokens": { + "title": "Max Tokens", + "description": "The maximum number of tokens to generate.", + "default": 1024, + "type": "integer" + }, "stop_tokens": { "title": "Stop Tokens", "description": "Tokens that will stop the completion.", @@ -171,7 +177,7 @@ "model" ] }, - "src__continuedev__core__models__ContinueSDK": { + "continuedev__core__models__ContinueSDK": { "title": "ContinueSDK", "type": "object", "properties": {} @@ -202,7 +208,7 @@ } }, "sdk": { - "$ref": "#/definitions/src__continuedev__core__models__ContinueSDK" + "$ref": "#/definitions/continuedev__core__models__ContinueSDK" } }, "required": [ @@ -265,7 +271,7 @@ "type": "object", "properties": {} }, - "src__continuedev__core__context__ContinueSDK": { + "continuedev__core__context__ContinueSDK": { "title": "ContinueSDK", "description": "To avoid circular imports", "type": "object", @@ -356,7 +362,7 @@ "description": "The ContinueSDK instance accessible by the ContextProvider", "allOf": [ { - "$ref": "#/definitions/src__continuedev__core__context__ContinueSDK" + "$ref": "#/definitions/continuedev__core__context__ContinueSDK" } ] }, @@ -398,7 +404,7 @@ "dynamic" ] }, - "src__continuedev__core__config__ContinueConfig": { + "continuedev__core__config__ContinueConfig": { "title": "ContinueConfig", "description": "Continue can be deeply customized by editing the `ContinueConfig` object in `~/.continue/config.py` (`%userprofile%\\.continue\\config.py` for Windows) on your machine. This class is instantiated from the config file for every new session.", "type": "object", @@ -436,6 +442,7 @@ "system_message": null, "context_length": 2048, "model": "gpt-4", + "max_tokens": 1024, "stop_tokens": null, "timeout": 300, "verify_ssl": null, @@ -451,6 +458,7 @@ "system_message": null, "context_length": 2048, "model": "gpt-3.5-turbo", + "max_tokens": 1024, "stop_tokens": null, "timeout": 300, "verify_ssl": null, diff --git a/schema/json/FileEdit.json b/schema/json/FileEdit.json index 011e0462..b963f9b0 100644 --- a/schema/json/FileEdit.json +++ b/schema/json/FileEdit.json @@ -1,6 +1,6 @@ { "title": "FileEdit", - "$ref": "#/definitions/src__continuedev__models__filesystem_edit__FileEdit", + "$ref": "#/definitions/continuedev__models__filesystem_edit__FileEdit", "definitions": { "Position": { "title": "Position", @@ -37,7 +37,7 @@ "end" ] }, - "src__continuedev__models__filesystem_edit__FileEdit": { + "continuedev__models__filesystem_edit__FileEdit": { "title": "FileEdit", "type": "object", "properties": { diff --git a/schema/json/FileEditWithFullContents.json b/schema/json/FileEditWithFullContents.json index 2ea75bab..ad136388 100644 --- a/schema/json/FileEditWithFullContents.json +++ b/schema/json/FileEditWithFullContents.json @@ -1,6 +1,6 @@ { "title": "FileEditWithFullContents", - "$ref": "#/definitions/src__continuedev__models__filesystem_edit__FileEditWithFullContents", + "$ref": "#/definitions/continuedev__models__filesystem_edit__FileEditWithFullContents", "definitions": { "Position": { "title": "Position", @@ -59,7 +59,7 @@ "replacement" ] }, - "src__continuedev__models__filesystem_edit__FileEditWithFullContents": { + "continuedev__models__filesystem_edit__FileEditWithFullContents": { "title": "FileEditWithFullContents", "type": "object", "properties": { diff --git a/schema/json/FullState.json b/schema/json/FullState.json index aebe4b21..33cf4ad7 100644 --- a/schema/json/FullState.json +++ b/schema/json/FullState.json @@ -1,6 +1,6 @@ { "title": "FullState", - "$ref": "#/definitions/src__continuedev__core__main__FullState", + "$ref": "#/definitions/continuedev__core__main__FullState", "definitions": { "FunctionCall": { "title": "FunctionCall", @@ -332,7 +332,7 @@ "requires_query" ] }, - "src__continuedev__core__main__FullState": { + "continuedev__core__main__FullState": { "title": "FullState", "description": "A full state of the program, including the history", "type": "object", diff --git a/schema/json/History.json b/schema/json/History.json index 9575b8c3..c73a8685 100644 --- a/schema/json/History.json +++ b/schema/json/History.json @@ -1,6 +1,6 @@ { "title": "History", - "$ref": "#/definitions/src__continuedev__core__main__History", + "$ref": "#/definitions/continuedev__core__main__History", "definitions": { "FunctionCall": { "title": "FunctionCall", @@ -218,7 +218,7 @@ "depth" ] }, - "src__continuedev__core__main__History": { + "continuedev__core__main__History": { "title": "History", "description": "A history of steps taken and their results", "type": "object", diff --git a/schema/json/HistoryNode.json b/schema/json/HistoryNode.json index f9004a43..26287573 100644 --- a/schema/json/HistoryNode.json +++ b/schema/json/HistoryNode.json @@ -1,6 +1,6 @@ { "title": "HistoryNode", - "$ref": "#/definitions/src__continuedev__core__main__HistoryNode", + "$ref": "#/definitions/continuedev__core__main__HistoryNode", "definitions": { "FunctionCall": { "title": "FunctionCall", @@ -171,7 +171,7 @@ "content" ] }, - "src__continuedev__core__main__HistoryNode": { + "continuedev__core__main__HistoryNode": { "title": "HistoryNode", "description": "A point in history, a list of which make up History", "type": "object", diff --git a/schema/json/LLM.json b/schema/json/LLM.json index b5b48d6b..30d4d684 100644 --- a/schema/json/LLM.json +++ b/schema/json/LLM.json @@ -1,8 +1,8 @@ { "title": "LLM", - "$ref": "#/definitions/src__continuedev__libs__llm__LLM", + "$ref": "#/definitions/continuedev__libs__llm__LLM", "definitions": { - "src__continuedev__libs__llm__LLM": { + "continuedev__libs__llm__LLM": { "title": "LLM", "type": "object", "properties": { @@ -32,6 +32,12 @@ "description": "The name of the model to be used (e.g. gpt-4, codellama)", "type": "string" }, + "max_tokens": { + "title": "Max Tokens", + "description": "The maximum number of tokens to generate.", + "default": 1024, + "type": "integer" + }, "stop_tokens": { "title": "Stop Tokens", "description": "Tokens that will stop the completion.", diff --git a/schema/json/Models.json b/schema/json/Models.json index 9a7bd310..19044d93 100644 --- a/schema/json/Models.json +++ b/schema/json/Models.json @@ -1,6 +1,6 @@ { "title": "Models", - "$ref": "#/definitions/src__continuedev__core__models__Models", + "$ref": "#/definitions/continuedev__core__models__Models", "definitions": { "LLM": { "title": "LLM", @@ -32,6 +32,12 @@ "description": "The name of the model to be used (e.g. gpt-4, codellama)", "type": "string" }, + "max_tokens": { + "title": "Max Tokens", + "description": "The maximum number of tokens to generate.", + "default": 1024, + "type": "integer" + }, "stop_tokens": { "title": "Stop Tokens", "description": "Tokens that will stop the completion.", @@ -82,7 +88,7 @@ "type": "object", "properties": {} }, - "src__continuedev__core__models__Models": { + "continuedev__core__models__Models": { "title": "Models", "description": "Main class that holds the current model configuration", "type": "object", diff --git a/schema/json/Position.json b/schema/json/Position.json index 6b272ce7..6fc31c7d 100644 --- a/schema/json/Position.json +++ b/schema/json/Position.json @@ -1,8 +1,8 @@ { "title": "Position", - "$ref": "#/definitions/src__continuedev__models__main__Position", + "$ref": "#/definitions/continuedev__models__main__Position", "definitions": { - "src__continuedev__models__main__Position": { + "continuedev__models__main__Position": { "title": "Position", "type": "object", "properties": { diff --git a/schema/json/Range.json b/schema/json/Range.json index 75675183..8d62b622 100644 --- a/schema/json/Range.json +++ b/schema/json/Range.json @@ -1,6 +1,6 @@ { "title": "Range", - "$ref": "#/definitions/src__continuedev__models__main__Range", + "$ref": "#/definitions/continuedev__models__main__Range", "definitions": { "Position": { "title": "Position", @@ -20,7 +20,7 @@ "character" ] }, - "src__continuedev__models__main__Range": { + "continuedev__models__main__Range": { "title": "Range", "description": "A range in a file. 0-indexed.", "type": "object", diff --git a/schema/json/RangeInFile.json b/schema/json/RangeInFile.json index 1f5afaa3..243fa610 100644 --- a/schema/json/RangeInFile.json +++ b/schema/json/RangeInFile.json @@ -1,6 +1,6 @@ { "title": "RangeInFile", - "$ref": "#/definitions/src__continuedev__models__filesystem__RangeInFile", + "$ref": "#/definitions/continuedev__models__filesystem__RangeInFile", "definitions": { "Position": { "title": "Position", @@ -37,7 +37,7 @@ "end" ] }, - "src__continuedev__models__filesystem__RangeInFile": { + "continuedev__models__filesystem__RangeInFile": { "title": "RangeInFile", "type": "object", "properties": { diff --git a/schema/json/SessionInfo.json b/schema/json/SessionInfo.json index 6bccfb2f..3957a40f 100644 --- a/schema/json/SessionInfo.json +++ b/schema/json/SessionInfo.json @@ -1,8 +1,8 @@ { "title": "SessionInfo", - "$ref": "#/definitions/src__continuedev__core__main__SessionInfo", + "$ref": "#/definitions/continuedev__core__main__SessionInfo", "definitions": { - "src__continuedev__core__main__SessionInfo": { + "continuedev__core__main__SessionInfo": { "title": "SessionInfo", "type": "object", "properties": { diff --git a/schema/json/Traceback.json b/schema/json/Traceback.json index 45606a2b..0b12dced 100644 --- a/schema/json/Traceback.json +++ b/schema/json/Traceback.json @@ -1,6 +1,6 @@ { "title": "Traceback", - "$ref": "#/definitions/src__continuedev__models__main__Traceback", + "$ref": "#/definitions/continuedev__models__main__Traceback", "definitions": { "TracebackFrame": { "title": "TracebackFrame", @@ -29,7 +29,7 @@ "function" ] }, - "src__continuedev__models__main__Traceback": { + "continuedev__models__main__Traceback": { "title": "Traceback", "type": "object", "properties": { diff --git a/schema/json/TracebackFrame.json b/schema/json/TracebackFrame.json index 1907430a..d7925c6f 100644 --- a/schema/json/TracebackFrame.json +++ b/schema/json/TracebackFrame.json @@ -1,8 +1,8 @@ { "title": "TracebackFrame", - "$ref": "#/definitions/src__continuedev__models__main__TracebackFrame", + "$ref": "#/definitions/continuedev__models__main__TracebackFrame", "definitions": { - "src__continuedev__models__main__TracebackFrame": { + "continuedev__models__main__TracebackFrame": { "title": "TracebackFrame", "type": "object", "properties": { diff --git a/continuedev/README.md b/server/README.md index b6f87cb0..25fb640e 100644 --- a/continuedev/README.md +++ b/server/README.md @@ -10,7 +10,7 @@ The Continue server acts as a bridge between the Continue React app and your IDE Start it by running the following commands: -1. `cd continuedev` +1. `cd server` 2. Make sure packages are installed with `poetry install` - If poetry is not installed, you can install with ```bash @@ -18,8 +18,7 @@ Start it by running the following commands: ``` (official instructions [here](https://python-poetry.org/docs/#installing-with-the-official-installer)) 3. `poetry shell` to activate the virtual environment -4. `cd ..` -5. `python3 -m continuedev.src.continuedev.server.main` to start the server +4. `python3 -m continuedev.server.main` to start the server Once you've validated that this works, you'll often want to use a debugger, in which case we've provided a launch configuration for VS Code in `.vscode/launch.json`. To start the debugger in VS Code, ensure that the workspace directory is the root of the `continue` repo, then press F5. @@ -37,7 +36,7 @@ Once you've validated that this works, you'll often want to use a debugger, in w ## Writing Steps -See the `src/continuedev/libs/steps` folder for examples of writing a Continue step. See our documentation for tutorials. +See the `continuedev/libs/steps` folder for examples of writing a Continue step. See our documentation for tutorials. ## How to contribute @@ -71,9 +70,10 @@ cd continue/extension/scripts && python3 install_from_source.py > [!IMPORTANT] > Ensure you have a Java Runtime Environment (JRE) installed. Verify this by typing `java -> -version` in your command prompt or terminal. If a version number appears, you're set. +-version` in your command prompt or terminal. If a version number appears, you're set. > If not, download and install a JRE from Oracle's website or through a package manager, > for example Homebrew. +> > ```sh > brew install openjdk@11 > ``` @@ -83,6 +83,6 @@ cd continue/extension/scripts && python3 install_from_source.py - [Continue Server README](./README.md): learn about the core of Continue, which can be downloaded as a [PyPI package](https://pypi.org/project/continuedev/) - [VS Code Extension README](../extension/README.md): learn about the capabilities of our extension—the first implementation of Continue's IDE Protocol—which makes it possible to use use Continue in VS Code and GitHub Codespaces - [Continue GUI README](../extension/react-app/): learn about the React app that lets users interact with the server and is placed adjacent to the text editor in any supported IDE -- [Schema README](../schema/README.md): learn about the JSON Schema types generated from Pydantic models, which we use across the `continuedev/` and `extension/` directories +- [Schema README](../schema/README.md): learn about the JSON Schema types generated from Pydantic models, which we use across the `server/` and `extension/` directories - [Continue Docs README](../docs/README.md): learn how our [docs](https://continue.dev/docs) are written and built - [How to debug the VS Code Extension README](../extension/src/README.md): learn how to set up the VS Code extension, so you can debug it diff --git a/continuedev/src/continuedev/__init__.py b/server/continuedev/__init__.py index 1b4776a8..1b4776a8 100644 --- a/continuedev/src/continuedev/__init__.py +++ b/server/continuedev/__init__.py diff --git a/continuedev/src/continuedev/__main__.py b/server/continuedev/__main__.py index caaba117..caaba117 100644 --- a/continuedev/src/continuedev/__main__.py +++ b/server/continuedev/__main__.py diff --git a/continuedev/src/continuedev/core/abstract_sdk.py b/server/continuedev/core/abstract_sdk.py index fdb99d47..fdb99d47 100644 --- a/continuedev/src/continuedev/core/abstract_sdk.py +++ b/server/continuedev/core/abstract_sdk.py diff --git a/continuedev/src/continuedev/core/autopilot.py b/server/continuedev/core/autopilot.py index 15dd20d4..11c05378 100644 --- a/continuedev/src/continuedev/core/autopilot.py +++ b/server/continuedev/core/autopilot.py @@ -30,12 +30,6 @@ from ..models.main import ContinueBaseModel from ..plugins.context_providers.file import FileContextProvider from ..plugins.context_providers.highlighted_code import HighlightedCodeContextProvider from ..plugins.policies.default import DefaultPolicy -from ..plugins.steps.core.core import ( - DisplayErrorStep, - ManualEditStep, - ReversibleStep, - UserInputStep, -) from ..plugins.steps.on_traceback import DefaultOnTracebackStep from ..server.ide_protocol import AbstractIdeProtocolServer from ..server.meilisearch_server import get_meilisearch_url, stop_meilisearch @@ -54,6 +48,7 @@ from .main import ( ) from .observation import InternalErrorObservation, Observation from .sdk import ContinueSDK +from .steps import DisplayErrorStep, ManualEditStep, ReversibleStep, UserInputStep def get_error_title(e: Exception) -> str: @@ -105,10 +100,8 @@ class Autopilot(ContinueBaseModel): started: bool = False - async def start( - self, - full_state: Optional[FullState] = None, - config: Optional[ContinueConfig] = None, + async def load( + self, config: Optional[ContinueConfig] = None, only_reloading: bool = False ): self.continue_sdk = await ContinueSDK.create(self, config=config) if override_policy := self.continue_sdk.config.policy_override: @@ -123,8 +116,16 @@ class Autopilot(ContinueBaseModel): FileContextProvider(workspace_dir=self.ide.workspace_directory), ], self.continue_sdk, + only_reloading=only_reloading, ) + async def start( + self, + full_state: Optional[FullState] = None, + config: Optional[ContinueConfig] = None, + ): + await self.load(config=config, only_reloading=False) + if full_state is not None: self.history = full_state.history self.session_info = full_state.session_info @@ -146,6 +147,10 @@ class Autopilot(ContinueBaseModel): self.started = True + async def reload_config(self): + await self.load(config=None, only_reloading=True) + await self.update_subscribers() + async def cleanup(self): stop_meilisearch() @@ -440,8 +445,9 @@ class Autopilot(ContinueBaseModel): caught_error = True - is_continue_custom_exception = issubclass( - e.__class__, ContinueCustomException + is_continue_custom_exception = ( + issubclass(e.__class__, ContinueCustomException) + or e.__class__.__name__ == ContinueCustomException.__name__ ) error_string = ( diff --git a/continuedev/src/continuedev/core/config.py b/server/continuedev/core/config.py index 2bbb42cc..2bbb42cc 100644 --- a/continuedev/src/continuedev/core/config.py +++ b/server/continuedev/core/config.py diff --git a/continuedev/src/continuedev/core/context.py b/server/continuedev/core/context.py index dfc8d449..547a1593 100644 --- a/continuedev/src/continuedev/core/context.py +++ b/server/continuedev/core/context.py @@ -1,7 +1,7 @@ import asyncio import time from abc import abstractmethod -from typing import Awaitable, Callable, Dict, List +from typing import Awaitable, Callable, Dict, List, Optional from meilisearch_python_async import Client from pydantic import BaseModel, Field @@ -251,11 +251,21 @@ class ContextManager: self.context_providers = {} self.provider_titles = set() - async def start(self, context_providers: List[ContextProvider], sdk: ContinueSDK): + async def start( + self, + context_providers: List[ContextProvider], + sdk: ContinueSDK, + only_reloading: bool = False, + ): """ Starts the context manager. """ - # Use only non-meilisearch-dependent providers until it is loaded + new_context_providers = { + provider.title: provider + for provider in context_providers + if provider.title not in self.provider_titles + } + self.context_providers = { provider.title: provider for provider in context_providers } @@ -272,7 +282,7 @@ class ContextManager: logger.warning(f"Error loading meilisearch index: {e}") # Start MeiliSearch in the background without blocking - async def load_index(context_providers): + async def load_index(providers_to_load: List[ContextProvider]): running = await check_meilisearch_running() if not running: await start_meilisearch() @@ -285,10 +295,15 @@ class ContextManager: return logger.debug("Loading Meilisearch index...") - await self.load_index(sdk.ide.workspace_directory) + await self.load_index( + sdk.ide.workspace_directory, providers_to_load=providers_to_load + ) logger.debug("Loaded Meilisearch index") - create_async_task(load_index(context_providers), on_err) + providers_to_load = ( + new_context_providers if only_reloading else context_providers + ) + create_async_task(load_index(providers_to_load), on_err) @staticmethod async def update_documents(context_items: List[ContextItem], workspace_dir: str): @@ -325,12 +340,24 @@ class ContextManager: Deletes the documents in the search index. """ async with Client(get_meilisearch_url()) as search_client: - await asyncio.wait_for( - search_client.index(SEARCH_INDEX_NAME).delete_documents(ids), - timeout=20, - ) - - async def load_index(self, workspace_dir: str, should_retry: bool = True): + try: + await asyncio.wait_for( + search_client.index(SEARCH_INDEX_NAME).delete_documents(ids), + timeout=20, + ) + except asyncio.TimeoutError: + logger.warning( + "Failed to delete document from meilisearch in 20 seconds" + ) + except Exception as e: + logger.warning(f"Error deleting document from meilisearch: {e}") + + async def load_index( + self, + workspace_dir: str, + should_retry: bool = True, + providers_to_load: Optional[List[ContextProvider]] = None, + ): try: async with Client(get_meilisearch_url()) as search_client: # First, create the index if it doesn't exist @@ -389,7 +416,9 @@ class ContextManager: tasks = [ safe_load(provider) - for _, provider in self.context_providers.items() + for _, provider in ( + providers_to_load or self.context_providers + ).items() ] await asyncio.wait_for(asyncio.gather(*tasks), timeout=20) diff --git a/continuedev/src/continuedev/core/env.py b/server/continuedev/core/env.py index 60b86538..60b86538 100644 --- a/continuedev/src/continuedev/core/env.py +++ b/server/continuedev/core/env.py diff --git a/continuedev/src/continuedev/core/lsp.py b/server/continuedev/core/lsp.py index fc26c85c..fc26c85c 100644 --- a/continuedev/src/continuedev/core/lsp.py +++ b/server/continuedev/core/lsp.py diff --git a/continuedev/src/continuedev/core/main.py b/server/continuedev/core/main.py index 617a5aaa..617a5aaa 100644 --- a/continuedev/src/continuedev/core/main.py +++ b/server/continuedev/core/main.py diff --git a/continuedev/src/continuedev/core/models.py b/server/continuedev/core/models.py index 9b8d26d5..21ebd8f6 100644 --- a/continuedev/src/continuedev/core/models.py +++ b/server/continuedev/core/models.py @@ -2,8 +2,8 @@ from typing import List, Optional from pydantic import BaseModel -from ..libs.llm import LLM from ..libs.llm.anthropic import AnthropicLLM +from ..libs.llm.base import LLM from ..libs.llm.ggml import GGML from ..libs.llm.google_palm_api import GooglePaLMAPI from ..libs.llm.hf_inference_api import HuggingFaceInferenceAPI diff --git a/continuedev/src/continuedev/core/observation.py b/server/continuedev/core/observation.py index 8a5e454e..8a5e454e 100644 --- a/continuedev/src/continuedev/core/observation.py +++ b/server/continuedev/core/observation.py diff --git a/continuedev/src/continuedev/core/sdk.py b/server/continuedev/core/sdk.py index 7090283f..408168f6 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/server/continuedev/core/sdk.py @@ -2,7 +2,7 @@ import os import traceback from typing import Coroutine, List, Optional, Union -from ..libs.llm import LLM +from ..libs.llm.base import LLM from ..libs.util.devdata import dev_data_logger from ..libs.util.logging import logger from ..libs.util.paths import ( @@ -21,14 +21,6 @@ from ..models.filesystem_edit import ( FileSystemEdit, ) from ..models.main import Range -from ..plugins.steps.core.core import ( - DefaultModelEditCodeStep, - FileSystemEditStep, - MessageStep, - RangeInFileWithContents, - ShellCommandsStep, - WaitForUserConfirmationStep, -) from ..server.ide_protocol import AbstractIdeProtocolServer from .abstract_sdk import AbstractContinueSDK from .config import ContinueConfig @@ -43,6 +35,14 @@ from .main import ( ) from .models import Models from .observation import Observation +from .steps import ( + DefaultModelEditCodeStep, + FileSystemEditStep, + MessageStep, + RangeInFileWithContents, + ShellCommandsStep, + WaitForUserConfirmationStep, +) class Autopilot: @@ -64,25 +64,19 @@ class ContinueSDK(AbstractContinueSDK): self.__autopilot = autopilot self.context = autopilot.context - @classmethod - async def create( - cls, autopilot: Autopilot, config: Optional[ContinueConfig] = None - ) -> "ContinueSDK": - sdk = ContinueSDK(autopilot) - autopilot.continue_sdk = sdk - + async def load(self, config: Optional[ContinueConfig] = None): # Create necessary directories getDiffsFolderPath() try: - sdk.config = config or sdk._load_config_dot_py() + self.config = config or self._load_config_dot_py() except Exception as e: logger.error(f"Failed to load config.py: {traceback.format_exception(e)}") - sdk.config = ( + self.config = ( ContinueConfig() - if sdk._last_valid_config is None - else sdk._last_valid_config + if self._last_valid_config is None + else self._last_valid_config ) formatted_err = "\n".join(traceback.format_exception(e)) @@ -90,14 +84,14 @@ class ContinueSDK(AbstractContinueSDK): name="Invalid Continue Config File", message=formatted_err ) msg_step.description = f"Falling back to default config settings due to the following error in `~/.continue/config.py`.\n```\n{formatted_err}\n```\n\nIt's possible this was caused by an update to the Continue config format. If you'd like to see the new recommended default `config.py`, check [here](https://github.com/continuedev/continue/blob/main/continuedev/src/continuedev/libs/constants/default_config.py).\n\nIf the error is related to OpenAIServerInfo, see the updated way of using these parameters [here](https://continue.dev/docs/customization#azure-openai-service)." - sdk.history.add_node( + self.history.add_node( HistoryNode(step=msg_step, observation=None, depth=0, active=False) ) - await sdk.ide.setFileOpen(getConfigFilePath()) + await self.ide.setFileOpen(getConfigFilePath()) # Start models - sdk.models = sdk.config.models - await sdk.models.start(sdk) + self.models = self.config.models + await self.models.start(self) # Start LSP # async def start_lsp(): @@ -116,9 +110,18 @@ class ContinueSDK(AbstractContinueSDK): # When the config is loaded, setup posthog logger posthog_logger.setup( - sdk.ide.unique_id, sdk.config.allow_anonymous_telemetry, sdk.ide.ide_info + self.ide.unique_id, self.config.allow_anonymous_telemetry, self.ide.ide_info ) - dev_data_logger.setup(sdk.config.user_token, sdk.config.data_server_url) + dev_data_logger.setup(self.config.user_token, self.config.data_server_url) + + @classmethod + async def create( + cls, autopilot: Autopilot, config: Optional[ContinueConfig] = None + ) -> "ContinueSDK": + sdk = ContinueSDK(autopilot) + autopilot.continue_sdk = sdk + + await sdk.load(config=config) return sdk @@ -258,9 +261,6 @@ class ContinueSDK(AbstractContinueSDK): if e.name == "continuedev.src": convertConfigImports(shorten=True) return self._load_config_dot_py(retry=False) - elif e.name.startswith("continuedev."): - convertConfigImports(shorten=False) - return self._load_config_dot_py(retry=False) else: raise e diff --git a/continuedev/src/continuedev/plugins/steps/core/core.py b/server/continuedev/core/steps.py index ad2e88e2..5c20dd15 100644 --- a/continuedev/src/continuedev/plugins/steps/core/core.py +++ b/server/continuedev/core/steps.py @@ -4,20 +4,18 @@ import subprocess from textwrap import dedent from typing import Coroutine, List, Optional, Union -from ....core.main import ChatMessage, ContinueCustomException, Step -from ....core.observation import Observation, TextObservation, UserInputObservation -from ....libs.llm import LLM -from ....libs.llm.openai_free_trial import OpenAIFreeTrial -from ....libs.util.count_tokens import DEFAULT_MAX_TOKENS -from ....libs.util.devdata import dev_data_logger -from ....libs.util.strings import ( +from ..libs.llm.base import LLM +from ..libs.llm.openai_free_trial import OpenAIFreeTrial +from ..libs.util.count_tokens import DEFAULT_MAX_TOKENS +from ..libs.util.devdata import dev_data_logger +from ..libs.util.strings import ( dedent_and_get_common_whitespace, remove_quotes_and_escapes, ) -from ....libs.util.telemetry import posthog_logger -from ....libs.util.templating import render_prompt_template -from ....models.filesystem import FileSystem, RangeInFile, RangeInFileWithContents -from ....models.filesystem_edit import ( +from ..libs.util.telemetry import posthog_logger +from ..libs.util.templating import render_prompt_template +from ..models.filesystem import FileSystem, RangeInFile, RangeInFileWithContents +from ..models.filesystem_edit import ( EditDiff, FileEdit, FileEditWithFullContents, @@ -25,7 +23,9 @@ from ....models.filesystem_edit import ( ) # from ....libs.llm.replicate import ReplicateLLM -from ....models.main import Range +from ..models.main import Range +from .main import ChatMessage, ContinueCustomException, Step +from .observation import Observation, TextObservation, UserInputObservation class ContinueSDK: diff --git a/continuedev/src/continuedev/headless/__init__.py b/server/continuedev/headless/__init__.py index 2ecdcce6..2ecdcce6 100644 --- a/continuedev/src/continuedev/headless/__init__.py +++ b/server/continuedev/headless/__init__.py diff --git a/continuedev/src/continuedev/headless/headless_ide.py b/server/continuedev/headless/headless_ide.py index 088da2c9..088da2c9 100644 --- a/continuedev/src/continuedev/headless/headless_ide.py +++ b/server/continuedev/headless/headless_ide.py diff --git a/continuedev/__init__.py b/server/continuedev/libs/__init__.py index e69de29b..e69de29b 100644 --- a/continuedev/__init__.py +++ b/server/continuedev/libs/__init__.py diff --git a/continuedev/src/continuedev/libs/chroma/.gitignore b/server/continuedev/libs/chroma/.gitignore index 6320cd24..6320cd24 100644 --- a/continuedev/src/continuedev/libs/chroma/.gitignore +++ b/server/continuedev/libs/chroma/.gitignore diff --git a/continuedev/src/continuedev/libs/chroma/query.py b/server/continuedev/libs/chroma/query.py index d77cce49..d77cce49 100644 --- a/continuedev/src/continuedev/libs/chroma/query.py +++ b/server/continuedev/libs/chroma/query.py diff --git a/continuedev/src/continuedev/libs/chroma/update.py b/server/continuedev/libs/chroma/update.py index 7a1217f9..7a1217f9 100644 --- a/continuedev/src/continuedev/libs/chroma/update.py +++ b/server/continuedev/libs/chroma/update.py diff --git a/continuedev/src/continuedev/libs/constants/default_config.py b/server/continuedev/libs/constants/default_config.py index 92913001..a007eef1 100644 --- a/continuedev/src/continuedev/libs/constants/default_config.py +++ b/server/continuedev/libs/constants/default_config.py @@ -5,20 +5,24 @@ This is the Continue configuration file. See https://continue.dev/docs/customization to for documentation of the available options. \"\"\" -from continuedev.src.continuedev.core.models import Models -from continuedev.src.continuedev.core.config import CustomCommand, SlashCommand, ContinueConfig -from continuedev.src.continuedev.plugins.context_providers.github import GitHubIssuesContextProvider -from continuedev.src.continuedev.libs.llm.openai_free_trial import OpenAIFreeTrial +from continuedev.core.models import Models +from continuedev.core.config import CustomCommand, SlashCommand, ContinueConfig +from continuedev.libs.llm import OpenAIFreeTrial -from continuedev.src.continuedev.plugins.steps.open_config import OpenConfigStep -from continuedev.src.continuedev.plugins.steps.clear_history import ClearHistoryStep -from continuedev.src.continuedev.plugins.steps.comment_code import CommentCodeStep -from continuedev.src.continuedev.plugins.steps.share_session import ShareSessionStep -from continuedev.src.continuedev.plugins.steps.main import EditHighlightedCodeStep -from continuedev.src.continuedev.plugins.steps.cmd import GenerateShellCommandStep -from continuedev.src.continuedev.plugins.context_providers.diff import DiffContextProvider -from continuedev.src.continuedev.plugins.context_providers.url import URLContextProvider -from continuedev.src.continuedev.plugins.context_providers.terminal import TerminalContextProvider +from continuedev.plugins.context_providers import ( + DiffContextProvider, + TerminalContextProvider, + URLContextProvider, + GitHubIssuesContextProvider +) +from continuedev.plugins.steps import ( + ClearHistoryStep, + CommentCodeStep, + EditHighlightedCodeStep, + GenerateShellCommandStep, + OpenConfigStep, +) +from continuedev.plugins.steps.share_session import ShareSessionStep config = ContinueConfig( allow_anonymous_telemetry=True, diff --git a/continuedev/src/continuedev/libs/constants/main.py b/server/continuedev/libs/constants/main.py index f5964df6..f5964df6 100644 --- a/continuedev/src/continuedev/libs/constants/main.py +++ b/server/continuedev/libs/constants/main.py diff --git a/server/continuedev/libs/llm/__init__.py b/server/continuedev/libs/llm/__init__.py new file mode 100644 index 00000000..829ffede --- /dev/null +++ b/server/continuedev/libs/llm/__init__.py @@ -0,0 +1,14 @@ +from .anthropic import AnthropicLLM # noqa: F401 +from .ggml import GGML # noqa: F401 +from .google_palm_api import GooglePaLMAPI # noqa: F401 +from .hf_inference_api import HuggingFaceInferenceAPI # noqa: F401 +from .hf_tgi import HuggingFaceTGI # noqa: F401 +from .llamacpp import LlamaCpp # noqa: F401 +from .ollama import Ollama # noqa: F401 +from .openai import OpenAI # noqa: F401 +from .openai_free_trial import OpenAIFreeTrial # noqa: F401 +from .proxy_server import ProxyServer # noqa: F401 +from .queued import QueuedLLM # noqa: F401 +from .replicate import ReplicateLLM # noqa: F401 +from .text_gen_interface import TextGenUI # noqa: F401 +from .together import TogetherLLM # noqa: F401 diff --git a/continuedev/src/continuedev/libs/llm/anthropic.py b/server/continuedev/libs/llm/anthropic.py index 2430e786..7d0708f1 100644 --- a/continuedev/src/continuedev/libs/llm/anthropic.py +++ b/server/continuedev/libs/llm/anthropic.py @@ -2,7 +2,7 @@ from typing import Any, Callable, Coroutine from anthropic import AI_PROMPT, HUMAN_PROMPT, AsyncAnthropic -from ..llm import LLM, CompletionOptions +from .base import LLM, CompletionOptions from .prompts.chat import anthropic_template_messages @@ -11,7 +11,7 @@ class AnthropicLLM(LLM): Import the `AnthropicLLM` class and set it as the default model: ```python title="~/.continue/config.py" - from continuedev.src.continuedev.libs.llm.anthropic import AnthropicLLM + from continuedev.libs.llm.anthropic import AnthropicLLM config = ContinueConfig( ... diff --git a/continuedev/src/continuedev/libs/llm/__init__.py b/server/continuedev/libs/llm/base.py index e6a90ef7..d77cb9fc 100644 --- a/continuedev/src/continuedev/libs/llm/__init__.py +++ b/server/continuedev/libs/llm/base.py @@ -6,7 +6,6 @@ import certifi from pydantic import Field, validator from ...core.main import ChatMessage -from ...libs.util.devdata import dev_data_logger from ...models.main import ContinueBaseModel from ..util.count_tokens import ( DEFAULT_ARGS, @@ -16,6 +15,8 @@ from ..util.count_tokens import ( format_chat_messages, prune_raw_prompt_from_top, ) +from ..util.devdata import dev_data_logger +from ..util.telemetry import posthog_logger class CompletionOptions(ContinueBaseModel): @@ -36,7 +37,7 @@ class CompletionOptions(ContinueBaseModel): top_p: Optional[float] = Field(None, description="The top_p of the completion.") top_k: Optional[int] = Field(None, description="The top_k of the completion.") presence_penalty: Optional[float] = Field( - None, description="The presence penalty of the completion." + None, description="The presence penalty Aof the completion." ) frequency_penalty: Optional[float] = Field( None, description="The frequency penalty of the completion." @@ -57,6 +58,12 @@ class LLM(ContinueBaseModel): None, description="A title that will identify this model in the model selection dropdown", ) + + unique_id: Optional[str] = Field(None, description="The unique ID of the user.") + model: str = Field( + ..., description="The name of the model to be used (e.g. gpt-4, codellama)" + ) + system_message: Optional[str] = Field( None, description="A system message that will always be followed by the LLM" ) @@ -66,18 +73,20 @@ class LLM(ContinueBaseModel): description="The maximum context length of the LLM in tokens, as counted by count_tokens.", ) - unique_id: Optional[str] = Field(None, description="The unique ID of the user.") - model: str = Field( - ..., description="The name of the model to be used (e.g. gpt-4, codellama)" - ) - - max_tokens: int = Field( - DEFAULT_MAX_TOKENS, description="The maximum number of tokens to generate." - ) - stop_tokens: Optional[List[str]] = Field( None, description="Tokens that will stop the completion." ) + temperature: Optional[float] = Field( + None, description="The temperature of the completion." + ) + top_p: Optional[float] = Field(None, description="The top_p of the completion.") + top_k: Optional[int] = Field(None, description="The top_k of the completion.") + presence_penalty: Optional[float] = Field( + None, description="The presence penalty Aof the completion." + ) + frequency_penalty: Optional[float] = Field( + None, description="The frequency penalty of the completion." + ) timeout: Optional[int] = Field( 300, @@ -94,6 +103,10 @@ class LLM(ContinueBaseModel): None, description="Proxy URL to use when making the HTTP request", ) + headers: Optional[Dict[str, str]] = Field( + None, + description="Headers to use when making the HTTP request", + ) prompt_templates: dict = Field( {}, description='A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the "edit" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.', @@ -148,13 +161,33 @@ class LLM(ContinueBaseModel): "ca_bundle_path": { "description": "Path to a custom CA bundle to use when making the HTTP request" }, + "headers": { + "description": "Headers to use when making the HTTP request" + }, "proxy": {"description": "Proxy URL to use when making the HTTP request"}, + "stop_tokens": {"description": "Tokens that will stop the completion."}, + "temperature": { + "description": "The sampling temperature used for generation." + }, + "top_p": { + "description": "The top_p sampling parameter used for generation." + }, + "top_k": { + "description": "The top_k sampling parameter used for generation." + }, + "presence_penalty": { + "description": "The presence penalty used for completions." + }, + "frequency_penalty": { + "description": "The frequency penalty used for completions." + }, } def dict(self, **kwargs): original_dict = super().dict(**kwargs) original_dict.pop("write_log") - original_dict.pop("template_messages") + if self.template_messages is not None: + original_dict["template_messages"] = self.template_messages.__name__ original_dict.pop("unique_id") original_dict["class_name"] = self.__class__.__name__ return original_dict @@ -175,6 +208,7 @@ class LLM(ContinueBaseModel): return aiohttp.ClientSession( connector=aiohttp.TCPConnector(verify_ssl=False), timeout=aiohttp.ClientTimeout(total=self.timeout), + headers=self.headers ) else: ca_bundle_path = ( @@ -184,6 +218,7 @@ class LLM(ContinueBaseModel): return aiohttp.ClientSession( connector=aiohttp.TCPConnector(ssl_context=ssl_context), timeout=aiohttp.ClientTimeout(total=self.timeout), + headers=self.headers, ) def collect_args(self, options: CompletionOptions) -> Dict[str, Any]: @@ -235,13 +270,13 @@ class LLM(ContinueBaseModel): """Yield completion response, either streamed or not.""" options = CompletionOptions( model=model or self.model, - temperature=temperature, - top_p=top_p, - top_k=top_k, - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, + temperature=temperature or self.temperature, + top_p=top_p or self.top_p, + top_k=top_k or self.top_k, + presence_penalty=presence_penalty or self.presence_penalty, + frequency_penalty=frequency_penalty or self.frequency_penalty, stop=stop or self.stop_tokens, - max_tokens=max_tokens or self.max_tokens, + max_tokens=max_tokens, functions=functions, ) @@ -267,6 +302,10 @@ class LLM(ContinueBaseModel): "tokens_generated", {"model": self.model, "tokens": self.count_tokens(completion)}, ) + posthog_logger.capture_event( + "tokens_generated", + {"model": self.model, "tokens": self.count_tokens(completion)}, + ) async def complete( self, @@ -286,13 +325,13 @@ class LLM(ContinueBaseModel): """Yield completion response, either streamed or not.""" options = CompletionOptions( model=model or self.model, - temperature=temperature, - top_p=top_p, - top_k=top_k, - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, + temperature=temperature or self.temperature, + top_p=top_p or self.top_p, + top_k=top_k or self.top_k, + presence_penalty=presence_penalty or self.presence_penalty, + frequency_penalty=frequency_penalty or self.frequency_penalty, stop=stop or self.stop_tokens, - max_tokens=max_tokens or self.max_tokens, + max_tokens=max_tokens, functions=functions, ) @@ -315,6 +354,10 @@ class LLM(ContinueBaseModel): "tokens_generated", {"model": self.model, "tokens": self.count_tokens(completion)}, ) + posthog_logger.capture_event( + "tokens_generated", + {"model": self.model, "tokens": self.count_tokens(completion)}, + ) return completion @@ -335,13 +378,13 @@ class LLM(ContinueBaseModel): """Yield completion response, either streamed or not.""" options = CompletionOptions( model=model or self.model, - temperature=temperature, - top_p=top_p, - top_k=top_k, - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, + temperature=temperature or self.temperature, + top_p=top_p or self.top_p, + top_k=top_k or self.top_k, + presence_penalty=presence_penalty or self.presence_penalty, + frequency_penalty=frequency_penalty or self.frequency_penalty, stop=stop or self.stop_tokens, - max_tokens=max_tokens or self.max_tokens, + max_tokens=max_tokens, functions=functions, ) @@ -376,6 +419,10 @@ class LLM(ContinueBaseModel): "tokens_generated", {"model": self.model, "tokens": self.count_tokens(completion)}, ) + posthog_logger.capture_event( + "tokens_generated", + {"model": self.model, "tokens": self.count_tokens(completion)}, + ) def _stream_complete( self, prompt, options: CompletionOptions diff --git a/continuedev/src/continuedev/libs/llm/ggml.py b/server/continuedev/libs/llm/ggml.py index ae185b28..55d580a8 100644 --- a/continuedev/src/continuedev/libs/llm/ggml.py +++ b/server/continuedev/libs/llm/ggml.py @@ -1,12 +1,11 @@ import json -from typing import Any, Callable, Coroutine, Dict, List, Optional +from typing import Any, Callable, Coroutine, Dict, List, Literal, Optional from pydantic import Field from ...core.main import ChatMessage -from ..llm import LLM from ..util.logging import logger -from . import CompletionOptions +from .base import LLM, CompletionOptions from .openai import CHAT_MODELS from .prompts.chat import llama2_template_messages from .prompts.edit import simplified_edit_prompt @@ -19,7 +18,7 @@ class GGML(LLM): Once the model is running on localhost:8000, change `~/.continue/config.py` to look like this: ```python title="~/.continue/config.py" - from continuedev.src.continuedev.libs.llm.ggml import GGML + from continuedev.libs.llm.ggml import GGML config = ContinueConfig( ... @@ -39,6 +38,20 @@ class GGML(LLM): model: str = Field( "ggml", description="The name of the model to use (optional for the GGML class)" ) + + api_base: Optional[str] = Field(None, description="OpenAI API base URL.") + + api_type: Optional[Literal["azure", "openai"]] = Field( + None, description="OpenAI API type." + ) + + api_version: Optional[str] = Field( + None, description="OpenAI API version. For use with Azure OpenAI Service." + ) + + engine: Optional[str] = Field( + None, description="OpenAI engine. For use with Azure OpenAI Service." + ) template_messages: Optional[ Callable[[List[Dict[str, str]]], str] @@ -56,16 +69,32 @@ class GGML(LLM): "Content-Type": "application/json", } if self.api_key is not None: - headers["Authorization"] = f"Bearer {self.api_key}" + if self.api_type == "azure": + headers["api-key"] = self.api_key + else: + headers["Authorization"] = f"Bearer {self.api_key}" return headers + + def get_full_server_url(self, endpoint: str): + endpoint = endpoint.lstrip("/").rstrip("/") + + if self.api_type == "azure": + if self.engine is None or self.api_version is None or self.api_base is None: + raise Exception( + "For Azure OpenAI Service, you must specify engine, api_version, and api_base." + ) + + return f"{self.api_base}/openai/deployments/{self.engine}/{endpoint}?api-version={self.api_version}" + else: + return f"{self.server_url}/v1/{endpoint}" async def _raw_stream_complete(self, prompt, options): args = self.collect_args(options) async with self.create_client_session() as client_session: async with client_session.post( - f"{self.server_url}/v1/completions", + self.get_full_server_url(endpoint="completions"), json={ "prompt": prompt, "stream": True, @@ -74,6 +103,11 @@ class GGML(LLM): headers=self.get_headers(), proxy=self.proxy, ) as resp: + if resp.status != 200: + raise Exception( + f"Error calling /chat/completions endpoint: {resp.status}" + ) + async for line in resp.content.iter_any(): if line: chunks = line.decode("utf-8") @@ -103,11 +137,16 @@ class GGML(LLM): async def generator(): async with self.create_client_session() as client_session: async with client_session.post( - f"{self.server_url}/v1/chat/completions", + self.get_full_server_url(endpoint="chat/completions"), json={"messages": messages, "stream": True, **args}, headers=self.get_headers(), proxy=self.proxy, ) as resp: + if resp.status != 200: + raise Exception( + f"Error calling /chat/completions endpoint: {resp.status}" + ) + async for line, end in resp.content.iter_chunks(): json_chunk = line.decode("utf-8") chunks = json_chunk.split("\n") @@ -137,7 +176,7 @@ class GGML(LLM): async with self.create_client_session() as client_session: async with client_session.post( - f"{self.server_url}/v1/completions", + self.get_full_server_url(endpoint="completions"), json={ "prompt": prompt, **args, @@ -145,6 +184,11 @@ class GGML(LLM): headers=self.get_headers(), proxy=self.proxy, ) as resp: + if resp.status != 200: + raise Exception( + f"Error calling /chat/completions endpoint: {resp.status}" + ) + text = await resp.text() try: completion = json.loads(text)["choices"][0]["text"] diff --git a/continuedev/src/continuedev/libs/llm/google_palm_api.py b/server/continuedev/libs/llm/google_palm_api.py index 8c0b30d1..3379fefe 100644 --- a/continuedev/src/continuedev/libs/llm/google_palm_api.py +++ b/server/continuedev/libs/llm/google_palm_api.py @@ -4,7 +4,7 @@ import requests from pydantic import Field from ...core.main import ChatMessage -from ..llm import LLM +from .base import LLM class GooglePaLMAPI(LLM): @@ -12,8 +12,8 @@ class GooglePaLMAPI(LLM): The Google PaLM API is currently in public preview, so production applications are not supported yet. However, you can [create an API key in Google MakerSuite](https://makersuite.google.com/u/2/app/apikey) and begin trying out the `chat-bison-001` model. Change `~/.continue/config.py` to look like this: ```python title="~/.continue/config.py" - from continuedev.src.continuedev.core.models import Models - from continuedev.src.continuedev.libs.llm.hf_inference_api import GooglePaLMAPI + from continuedev.core.models import Models + from continuedev.libs.llm.hf_inference_api import GooglePaLMAPI config = ContinueConfig( ... diff --git a/continuedev/src/continuedev/libs/llm/hf_inference_api.py b/server/continuedev/libs/llm/hf_inference_api.py index 6c8c55a9..990ec7c8 100644 --- a/continuedev/src/continuedev/libs/llm/hf_inference_api.py +++ b/server/continuedev/libs/llm/hf_inference_api.py @@ -3,7 +3,7 @@ from typing import Callable, Dict, List, Union from huggingface_hub import InferenceClient from pydantic import Field -from ..llm import LLM, CompletionOptions +from .base import LLM, CompletionOptions from .prompts.chat import llama2_template_messages from .prompts.edit import simplified_edit_prompt @@ -13,8 +13,8 @@ class HuggingFaceInferenceAPI(LLM): Hugging Face Inference API is a great option for newly released language models. Sign up for an account and add billing [here](https://huggingface.co/settings/billing), access the Inference Endpoints [here](https://ui.endpoints.huggingface.co), click on “New endpoint”, and fill out the form (e.g. select a model like [WizardCoder-Python-34B-V1.0](https://huggingface.co/WizardLM/WizardCoder-Python-34B-V1.0)), and then deploy your model by clicking “Create Endpoint”. Change `~/.continue/config.py` to look like this: ```python title="~/.continue/config.py" - from continuedev.src.continuedev.core.models import Models - from continuedev.src.continuedev.libs.llm.hf_inference_api import HuggingFaceInferenceAPI + from continuedev.core.models import Models + from continuedev.libs.llm.hf_inference_api import HuggingFaceInferenceAPI config = ContinueConfig( ... diff --git a/continuedev/src/continuedev/libs/llm/hf_tgi.py b/server/continuedev/libs/llm/hf_tgi.py index 27d71cb4..62458db4 100644 --- a/continuedev/src/continuedev/libs/llm/hf_tgi.py +++ b/server/continuedev/libs/llm/hf_tgi.py @@ -4,7 +4,7 @@ from typing import Any, Callable, List from pydantic import Field from ...core.main import ChatMessage -from ..llm import LLM, CompletionOptions +from .base import LLM, CompletionOptions from .prompts.chat import llama2_template_messages from .prompts.edit import simplified_edit_prompt diff --git a/continuedev/src/continuedev/libs/llm/hugging_face.py b/server/continuedev/libs/llm/hugging_face.py index c2e934c0..c2e934c0 100644 --- a/continuedev/src/continuedev/libs/llm/hugging_face.py +++ b/server/continuedev/libs/llm/hugging_face.py diff --git a/continuedev/src/continuedev/libs/llm/llamacpp.py b/server/continuedev/libs/llm/llamacpp.py index c7144745..bc856a52 100644 --- a/continuedev/src/continuedev/libs/llm/llamacpp.py +++ b/server/continuedev/libs/llm/llamacpp.py @@ -3,7 +3,7 @@ from typing import Any, Callable, Dict from pydantic import Field -from ..llm import LLM +from .base import LLM from .prompts.chat import llama2_template_messages from .prompts.edit import simplified_edit_prompt @@ -19,7 +19,7 @@ class LlamaCpp(LLM): After it's up and running, change `~/.continue/config.py` to look like this: ```python title="~/.continue/config.py" - from continuedev.src.continuedev.libs.llm.llamacpp import LlamaCpp + from continuedev.libs.llm.llamacpp import LlamaCpp config = ContinueConfig( ... diff --git a/continuedev/src/continuedev/libs/llm/ollama.py b/server/continuedev/libs/llm/ollama.py index e9b421d5..82cbc852 100644 --- a/continuedev/src/continuedev/libs/llm/ollama.py +++ b/server/continuedev/libs/llm/ollama.py @@ -4,8 +4,9 @@ from typing import Callable import aiohttp from pydantic import Field -from ..llm import LLM +from ...core.main import ContinueCustomException from ..util.logging import logger +from .base import LLM from .prompts.chat import llama2_template_messages from .prompts.edit import simplified_edit_prompt @@ -15,7 +16,7 @@ class Ollama(LLM): [Ollama](https://ollama.ai/) is an application for Mac and Linux that makes it easy to locally run open-source models, including Llama-2. Download the app from the website, and it will walk you through setup in a couple of minutes. You can also read more in their [README](https://github.com/jmorganca/ollama). Continue can then be configured to use the `Ollama` LLM class: ```python title="~/.continue/config.py" - from continuedev.src.continuedev.libs.llm.ollama import Ollama + from continuedev.libs.llm.ollama import Ollama config = ContinueConfig( ... @@ -61,6 +62,14 @@ class Ollama(LLM): async def stop(self): await self._client_session.close() + async def get_downloaded_models(self): + async with self._client_session.get( + f"{self.server_url}/api/tags", + proxy=self.proxy, + ) as resp: + js_data = await resp.json() + return list(map(lambda x: x["name"], js_data["models"])) + async def _stream_complete(self, prompt, options): async with self._client_session.post( f"{self.server_url}/api/generate", @@ -72,6 +81,20 @@ class Ollama(LLM): }, proxy=self.proxy, ) as resp: + if resp.status == 400: + txt = await resp.text() + extra_msg = "" + if "no such file" in txt: + extra_msg = f"\n\nThis means that the model '{self.model}' is not downloaded.\n\nYou have the following models downloaded: {', '.join(await self.get_downloaded_models())}.\n\nTo download this model, run `ollama run {self.model}` in your terminal." + raise ContinueCustomException( + f"Ollama returned an error: {txt}{extra_msg}", + "Invalid request to Ollama", + ) + elif resp.status != 200: + raise ContinueCustomException( + f"Ollama returned an error: {await resp.text()}", + "Invalid request to Ollama", + ) async for line in resp.content.iter_any(): if line: json_chunk = line.decode("utf-8") diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/server/continuedev/libs/llm/openai.py index 7541d92e..ba29279b 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/server/continuedev/libs/llm/openai.py @@ -5,7 +5,7 @@ import openai from pydantic import Field from ...core.main import ChatMessage -from ..llm import LLM +from .base import LLM CHAT_MODELS = { "gpt-3.5-turbo", @@ -33,7 +33,7 @@ class OpenAI(LLM): If you are locally serving a model that uses an OpenAI-compatible server, you can simply change the `api_base` in the `OpenAI` class like this: ```python title="~/.continue/config.py" - from continuedev.src.continuedev.libs.llm.openai import OpenAI + from continuedev.libs.llm.openai import OpenAI config = ContinueConfig( ... @@ -118,11 +118,12 @@ class OpenAI(LLM): async for chunk in await openai.ChatCompletion.acreate( messages=[{"role": "user", "content": prompt}], **args, + headers=self.headers, ): if len(chunk.choices) > 0 and "content" in chunk.choices[0].delta: yield chunk.choices[0].delta.content else: - async for chunk in await openai.Completion.acreate(prompt=prompt, **args): + async for chunk in await openai.Completion.acreate(prompt=prompt, **args, headers=self.headers): if len(chunk.choices) > 0: yield chunk.choices[0].text @@ -133,6 +134,7 @@ class OpenAI(LLM): messages=messages, stream=True, **args, + headers=self.headers, ): if not hasattr(chunk, "choices") or len(chunk.choices) == 0: continue @@ -145,9 +147,10 @@ class OpenAI(LLM): resp = await openai.ChatCompletion.acreate( messages=[{"role": "user", "content": prompt}], **args, + headers=self.headers, ) return resp.choices[0].message.content else: return ( - (await openai.Completion.acreate(prompt=prompt, **args)).choices[0].text + (await openai.Completion.acreate(prompt=prompt, **args, headers=self.headers)).choices[0].text ) diff --git a/continuedev/src/continuedev/libs/llm/openai_free_trial.py b/server/continuedev/libs/llm/openai_free_trial.py index 6d96f8bb..b6e707f9 100644 --- a/continuedev/src/continuedev/libs/llm/openai_free_trial.py +++ b/server/continuedev/libs/llm/openai_free_trial.py @@ -1,7 +1,7 @@ from typing import Callable, List, Optional from ...core.main import ChatMessage -from . import LLM +from .base import LLM from .openai import OpenAI from .proxy_server import ProxyServer diff --git a/continuedev/src/continuedev/libs/llm/prompt_utils.py b/server/continuedev/libs/llm/prompt_utils.py index 930b5220..930b5220 100644 --- a/continuedev/src/continuedev/libs/llm/prompt_utils.py +++ b/server/continuedev/libs/llm/prompt_utils.py diff --git a/server/continuedev/libs/llm/prompts/chat.py b/server/continuedev/libs/llm/prompts/chat.py new file mode 100644 index 00000000..036f1b1a --- /dev/null +++ b/server/continuedev/libs/llm/prompts/chat.py @@ -0,0 +1,174 @@ +from textwrap import dedent +from typing import Dict, List + +from anthropic import AI_PROMPT, HUMAN_PROMPT + + +def anthropic_template_messages(messages: List[Dict[str, str]]) -> str: + prompt = "" + + # Anthropic prompt must start with a Human turn + if ( + len(messages) > 0 + and messages[0]["role"] != "user" + and messages[0]["role"] != "system" + ): + prompt += f"{HUMAN_PROMPT} Hello." + for msg in messages: + prompt += f"{HUMAN_PROMPT if (msg['role'] == 'user' or msg['role'] == 'system') else AI_PROMPT} {msg['content']} " + + prompt += AI_PROMPT + return prompt + + +def template_alpaca_messages(msgs: List[Dict[str, str]]) -> str: + prompt = "" + + if msgs[0]["role"] == "system": + prompt += f"{msgs[0]['content']}\n" + msgs.pop(0) + + for msg in msgs: + prompt += "### Instruction:\n" if msg["role"] == "user" else "### Response:\n" + prompt += f"{msg['content']}\n" + + prompt += "### Response:\n" + + return prompt + + +def raw_input_template(msgs: List[Dict[str, str]]) -> str: + return msgs[-1]["content"] + + +SQL_CODER_DEFAULT_SCHEMA = """\ +CREATE TABLE products ( + product_id INTEGER PRIMARY KEY, -- Unique ID for each product + name VARCHAR(50), -- Name of the product + price DECIMAL(10,2), -- Price of each unit of the product + quantity INTEGER -- Current quantity in stock +); + +CREATE TABLE customers ( + customer_id INTEGER PRIMARY KEY, -- Unique ID for each customer + name VARCHAR(50), -- Name of the customer + address VARCHAR(100) -- Mailing address of the customer +); + +CREATE TABLE salespeople ( + salesperson_id INTEGER PRIMARY KEY, -- Unique ID for each salesperson + name VARCHAR(50), -- Name of the salesperson + region VARCHAR(50) -- Geographic sales region +); + +CREATE TABLE sales ( + sale_id INTEGER PRIMARY KEY, -- Unique ID for each sale + product_id INTEGER, -- ID of product sold + customer_id INTEGER, -- ID of customer who made purchase + salesperson_id INTEGER, -- ID of salesperson who made the sale + sale_date DATE, -- Date the sale occurred + quantity INTEGER -- Quantity of product sold +); + +CREATE TABLE product_suppliers ( + supplier_id INTEGER PRIMARY KEY, -- Unique ID for each supplier + product_id INTEGER, -- Product ID supplied + supply_price DECIMAL(10,2) -- Unit price charged by supplier +); + +-- sales.product_id can be joined with products.product_id +-- sales.customer_id can be joined with customers.customer_id +-- sales.salesperson_id can be joined with salespeople.salesperson_id +-- product_suppliers.product_id can be joined with products.product_id +""" + + +def _sqlcoder_template_messages( + msgs: List[Dict[str, str]], schema: str = SQL_CODER_DEFAULT_SCHEMA +) -> str: + question = msgs[-1]["content"] + return f"""\ +Your task is to convert a question into a SQL query, given a Postgres database schema. +Adhere to these rules: +- **Deliberately go through the question and database schema word by word** to appropriately answer the question +- **Use Table Aliases** to prevent ambiguity. For example, `SELECT table1.col1, table2.col1 FROM table1 JOIN table2 ON table1.id = table2.id`. +- When creating a ratio, always cast the numerator as float + +### Input: +Generate a SQL query that answers the question `{question}`. +This query will run on a database whose schema is represented in this string: +{schema} + +### Response: +Based on your instructions, here is the SQL query I have generated to answer the question `{question}`: +```sql +""" + + +def sqlcoder_template_messages(schema: str = SQL_CODER_DEFAULT_SCHEMA): + if schema == "<MY_DATABASE_SCHEMA>" or schema == "": + schema = SQL_CODER_DEFAULT_SCHEMA + + def fn(msgs): + return _sqlcoder_template_messages(msgs, schema=schema) + + fn.__name__ = "sqlcoder_template_messages" + return fn + + +def llama2_template_messages(msgs: List[Dict[str, str]]) -> str: + if len(msgs) == 0: + return "" + + if msgs[0]["role"] == "assistant": + # These models aren't trained to handle assistant message coming first, + # and typically these are just introduction messages from Continue + msgs.pop(0) + + prompt = "" + has_system = msgs[0]["role"] == "system" + + if has_system and msgs[0]["content"].strip() == "": + has_system = False + msgs = msgs[1:] + + if has_system: + system_message = dedent( + f"""\ + <<SYS>> + {msgs[0]["content"]} + <</SYS>> + + """ + ) + if len(msgs) > 1: + prompt += f"[INST] {system_message}{msgs[1]['content']} [/INST]" + else: + prompt += f"[INST] {system_message} [/INST]" + return + + for i in range(2 if has_system else 0, len(msgs)): + if msgs[i]["role"] == "user": + prompt += f"[INST] {msgs[i]['content']} [/INST]" + else: + prompt += msgs[i]["content"] + " " + + return prompt + + +def code_llama_template_messages(msgs: List[Dict[str, str]]) -> str: + return f"[INST] {msgs[-1]['content']}\n[/INST]" + + +def extra_space_template_messages(msgs: List[Dict[str, str]]) -> str: + return f" {msgs[-1]['content']}" + + +def code_llama_python_template_messages(msgs: List[Dict[str, str]]) -> str: + return dedent( + f"""\ + [INST] + You are an expert Python programmer and personal assistant, here is your task: {msgs[-1]['content']} + Your answer should start with a [PYTHON] tag and end with a [/PYTHON] tag. + [/INST]""" + ) diff --git a/continuedev/src/continuedev/libs/llm/prompts/edit.py b/server/continuedev/libs/llm/prompts/edit.py index eaa694c5..eaa694c5 100644 --- a/continuedev/src/continuedev/libs/llm/prompts/edit.py +++ b/server/continuedev/libs/llm/prompts/edit.py diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/server/continuedev/libs/llm/proxy_server.py index d741fee4..7c3462eb 100644 --- a/continuedev/src/continuedev/libs/llm/proxy_server.py +++ b/server/continuedev/libs/llm/proxy_server.py @@ -5,8 +5,8 @@ from typing import List import aiohttp from ...core.main import ChatMessage -from ..llm import LLM from ..util.telemetry import posthog_logger +from .base import LLM # SERVER_URL = "http://127.0.0.1:8080" SERVER_URL = "https://proxy-server-l6vsfbzhba-uw.a.run.app" diff --git a/continuedev/src/continuedev/libs/llm/queued.py b/server/continuedev/libs/llm/queued.py index 785c5dc0..2db749eb 100644 --- a/continuedev/src/continuedev/libs/llm/queued.py +++ b/server/continuedev/libs/llm/queued.py @@ -4,7 +4,7 @@ from typing import Any, List, Union from pydantic import Field from ...core.main import ChatMessage -from . import LLM, CompletionOptions +from .base import LLM, CompletionOptions class QueuedLLM(LLM): @@ -14,7 +14,7 @@ class QueuedLLM(LLM): If you are already using another LLM class and are experiencing this problem, you can just wrap it with the QueuedLLM class like this: ```python title="~/.continue/config.py" - from continuedev.src.continuedev.libs.llm.queued import QueuedLLM + from continuedev.libs.llm.queued import QueuedLLM config = ContinueConfig( ... diff --git a/continuedev/src/continuedev/libs/llm/replicate.py b/server/continuedev/libs/llm/replicate.py index 86840572..3423193b 100644 --- a/continuedev/src/continuedev/libs/llm/replicate.py +++ b/server/continuedev/libs/llm/replicate.py @@ -5,7 +5,7 @@ import replicate from pydantic import Field from ...core.main import ChatMessage -from . import LLM +from .base import LLM from .prompts.edit import simplified_edit_prompt @@ -14,8 +14,8 @@ class ReplicateLLM(LLM): Replicate is a great option for newly released language models or models that you've deployed through their platform. Sign up for an account [here](https://replicate.ai/), copy your API key, and then select any model from the [Replicate Streaming List](https://replicate.com/collections/streaming-language-models). Change `~/.continue/config.py` to look like this: ```python title="~/.continue/config.py" - from continuedev.src.continuedev.core.models import Models - from continuedev.src.continuedev.libs.llm.replicate import ReplicateLLM + from continuedev.core.models import Models + from continuedev.libs.llm.replicate import ReplicateLLM config = ContinueConfig( ... diff --git a/continuedev/src/continuedev/libs/llm/text_gen_interface.py b/server/continuedev/libs/llm/text_gen_interface.py index f726f516..225fd3b6 100644 --- a/continuedev/src/continuedev/libs/llm/text_gen_interface.py +++ b/server/continuedev/libs/llm/text_gen_interface.py @@ -5,7 +5,7 @@ import websockets from pydantic import Field from ...core.main import ChatMessage -from . import LLM +from .base import LLM from .prompts.chat import llama2_template_messages from .prompts.edit import simplest_edit_prompt @@ -15,7 +15,7 @@ class TextGenUI(LLM): TextGenUI is a comprehensive, open-source language model UI and local server. You can set it up with an OpenAI-compatible server plugin, but if for some reason that doesn't work, you can use this class like so: ```python title="~/.continue/config.py" - from continuedev.src.continuedev.libs.llm.text_gen_interface import TextGenUI + from continuedev.libs.llm.text_gen_interface import TextGenUI config = ContinueConfig( ... diff --git a/continuedev/src/continuedev/libs/llm/together.py b/server/continuedev/libs/llm/together.py index 0274bb14..35b3a424 100644 --- a/continuedev/src/continuedev/libs/llm/together.py +++ b/server/continuedev/libs/llm/together.py @@ -5,8 +5,8 @@ import aiohttp from pydantic import Field from ...core.main import ContinueCustomException -from ..llm import LLM from ..util.logging import logger +from .base import LLM from .prompts.chat import llama2_template_messages from .prompts.edit import simplified_edit_prompt @@ -16,8 +16,8 @@ class TogetherLLM(LLM): The Together API is a cloud platform for running large AI models. You can sign up [here](https://api.together.xyz/signup), copy your API key on the initial welcome screen, and then hit the play button on any model from the [Together Models list](https://docs.together.ai/docs/models-inference). Change `~/.continue/config.py` to look like this: ```python title="~/.continue/config.py" - from continuedev.src.continuedev.core.models import Models - from continuedev.src.continuedev.libs.llm.together import TogetherLLM + from continuedev.core.models import Models + from continuedev.libs.llm.together import TogetherLLM config = ContinueConfig( ... diff --git a/continuedev/src/continuedev/libs/util/calculate_diff.py b/server/continuedev/libs/util/calculate_diff.py index 99301ae7..99301ae7 100644 --- a/continuedev/src/continuedev/libs/util/calculate_diff.py +++ b/server/continuedev/libs/util/calculate_diff.py diff --git a/continuedev/src/continuedev/libs/util/commonregex.py b/server/continuedev/libs/util/commonregex.py index c2f6bb82..c2f6bb82 100644 --- a/continuedev/src/continuedev/libs/util/commonregex.py +++ b/server/continuedev/libs/util/commonregex.py diff --git a/continuedev/src/continuedev/libs/util/copy_codebase.py b/server/continuedev/libs/util/copy_codebase.py index 78f38148..78f38148 100644 --- a/continuedev/src/continuedev/libs/util/copy_codebase.py +++ b/server/continuedev/libs/util/copy_codebase.py diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/server/continuedev/libs/util/count_tokens.py index d895a2cf..d895a2cf 100644 --- a/continuedev/src/continuedev/libs/util/count_tokens.py +++ b/server/continuedev/libs/util/count_tokens.py diff --git a/continuedev/src/continuedev/libs/util/create_async_task.py b/server/continuedev/libs/util/create_async_task.py index 232d3fa1..232d3fa1 100644 --- a/continuedev/src/continuedev/libs/util/create_async_task.py +++ b/server/continuedev/libs/util/create_async_task.py diff --git a/continuedev/src/continuedev/libs/util/devdata.py b/server/continuedev/libs/util/devdata.py index 61b4351d..61b4351d 100644 --- a/continuedev/src/continuedev/libs/util/devdata.py +++ b/server/continuedev/libs/util/devdata.py diff --git a/continuedev/src/continuedev/libs/util/edit_config.py b/server/continuedev/libs/util/edit_config.py index c77eb2e3..4dc427d2 100644 --- a/continuedev/src/continuedev/libs/util/edit_config.py +++ b/server/continuedev/libs/util/edit_config.py @@ -86,21 +86,28 @@ def escape_string(string: str) -> str: return string.replace('"', '\\"').replace("'", "\\'") -def display_val(v: Any): +def display_val(v: Any, k: str = None): + if k == "template_messages": + return v + if isinstance(v, str): return f'"{escape_string(v)}"' return str(v) +def is_default(llm, k, v): + if k == "template_messages" and llm.__fields__[k].default is not None: + return llm.__fields__[k].default.__name__ == v + return v == llm.__fields__[k].default + + def display_llm_class(llm, new: bool = False): sep = ",\n\t\t\t" args = sep.join( [ - f"{k}={display_val(v)}" + f"{k}={display_val(v, k)}" for k, v in llm.dict().items() - if k not in filtered_attrs - and v is not None - and not v == llm.__fields__[k].default + if k not in filtered_attrs and v is not None and not is_default(llm, k, v) ] ) return f"{llm.__class__.__name__}(\n\t\t\t{args}\n\t\t)" @@ -124,6 +131,10 @@ def create_string_node(string: str) -> redbaron.RedBaron: return redbaron.RedBaron(f'"{string}"')[0] +def create_literal_node(literal: str) -> redbaron.RedBaron: + return redbaron.RedBaron(literal)[0] + + def create_float_node(float: float) -> redbaron.RedBaron: return redbaron.RedBaron(f"{float}")[0] diff --git a/continuedev/src/continuedev/libs/util/errors.py b/server/continuedev/libs/util/errors.py index 46074cfc..46074cfc 100644 --- a/continuedev/src/continuedev/libs/util/errors.py +++ b/server/continuedev/libs/util/errors.py diff --git a/continuedev/src/continuedev/libs/util/filter_files.py b/server/continuedev/libs/util/filter_files.py index 6ebaa274..6ebaa274 100644 --- a/continuedev/src/continuedev/libs/util/filter_files.py +++ b/server/continuedev/libs/util/filter_files.py diff --git a/continuedev/src/continuedev/libs/util/logging.py b/server/continuedev/libs/util/logging.py index a4dc3562..a4dc3562 100644 --- a/continuedev/src/continuedev/libs/util/logging.py +++ b/server/continuedev/libs/util/logging.py diff --git a/continuedev/src/continuedev/libs/util/map_path.py b/server/continuedev/libs/util/map_path.py index 1dddc2e9..1dddc2e9 100644 --- a/continuedev/src/continuedev/libs/util/map_path.py +++ b/server/continuedev/libs/util/map_path.py diff --git a/continuedev/src/continuedev/libs/util/paths.py b/server/continuedev/libs/util/paths.py index 88c25aff..22e4b5b9 100644 --- a/continuedev/src/continuedev/libs/util/paths.py +++ b/server/continuedev/libs/util/paths.py @@ -125,7 +125,7 @@ def convertConfigImports(shorten: bool) -> str: else: migrated = re.sub( r"(?<!src\.)continuedev\.(?!src)", - "continuedev.src.continuedev.", + "continuedev.", existing_content, ) diff --git a/continuedev/src/continuedev/libs/util/queue.py b/server/continuedev/libs/util/queue.py index e1f98cc6..e1f98cc6 100644 --- a/continuedev/src/continuedev/libs/util/queue.py +++ b/server/continuedev/libs/util/queue.py diff --git a/continuedev/src/continuedev/libs/util/ripgrep.py b/server/continuedev/libs/util/ripgrep.py index f7e0af9a..f7e0af9a 100644 --- a/continuedev/src/continuedev/libs/util/ripgrep.py +++ b/server/continuedev/libs/util/ripgrep.py diff --git a/continuedev/src/continuedev/libs/util/step_name_to_steps.py b/server/continuedev/libs/util/step_name_to_steps.py index 0cca261f..25fd8ba3 100644 --- a/continuedev/src/continuedev/libs/util/step_name_to_steps.py +++ b/server/continuedev/libs/util/step_name_to_steps.py @@ -1,6 +1,7 @@ from typing import Dict from ...core.main import Step +from ...core.steps import UserInputStep from ...libs.util.logging import logger from ...plugins.recipes.AddTransformRecipe.main import AddTransformRecipe from ...plugins.recipes.CreatePipelineRecipe.main import CreatePipelineRecipe @@ -11,7 +12,6 @@ from ...plugins.recipes.DeployPipelineAirflowRecipe.main import ( from ...plugins.steps.chat import SimpleChatStep from ...plugins.steps.clear_history import ClearHistoryStep from ...plugins.steps.comment_code import CommentCodeStep -from ...plugins.steps.core.core import UserInputStep from ...plugins.steps.feedback import FeedbackStep from ...plugins.steps.help import HelpStep from ...plugins.steps.main import EditHighlightedCodeStep diff --git a/continuedev/src/continuedev/libs/util/strings.py b/server/continuedev/libs/util/strings.py index f2b6035f..f2b6035f 100644 --- a/continuedev/src/continuedev/libs/util/strings.py +++ b/server/continuedev/libs/util/strings.py diff --git a/continuedev/src/continuedev/libs/util/telemetry.py b/server/continuedev/libs/util/telemetry.py index 1772fe20..1772fe20 100644 --- a/continuedev/src/continuedev/libs/util/telemetry.py +++ b/server/continuedev/libs/util/telemetry.py diff --git a/continuedev/src/continuedev/libs/util/templating.py b/server/continuedev/libs/util/templating.py index 8d6a32fc..8d6a32fc 100644 --- a/continuedev/src/continuedev/libs/util/templating.py +++ b/server/continuedev/libs/util/templating.py diff --git a/continuedev/src/continuedev/libs/util/traceback/traceback_parsers.py b/server/continuedev/libs/util/traceback/traceback_parsers.py index 58a4f728..58a4f728 100644 --- a/continuedev/src/continuedev/libs/util/traceback/traceback_parsers.py +++ b/server/continuedev/libs/util/traceback/traceback_parsers.py diff --git a/continuedev/src/__init__.py b/server/continuedev/models/__init__.py index e69de29b..e69de29b 100644 --- a/continuedev/src/__init__.py +++ b/server/continuedev/models/__init__.py diff --git a/continuedev/src/continuedev/models/filesystem.py b/server/continuedev/models/filesystem.py index 27244c4b..27244c4b 100644 --- a/continuedev/src/continuedev/models/filesystem.py +++ b/server/continuedev/models/filesystem.py diff --git a/continuedev/src/continuedev/models/filesystem_edit.py b/server/continuedev/models/filesystem_edit.py index 9316ff46..9316ff46 100644 --- a/continuedev/src/continuedev/models/filesystem_edit.py +++ b/server/continuedev/models/filesystem_edit.py diff --git a/continuedev/src/continuedev/models/generate_json_schema.py b/server/continuedev/models/generate_json_schema.py index ad727f06..88a1db68 100644 --- a/continuedev/src/continuedev/models/generate_json_schema.py +++ b/server/continuedev/models/generate_json_schema.py @@ -6,7 +6,7 @@ from ..core.config import ContinueConfig from ..core.context import ContextItem from ..core.main import FullState, History, HistoryNode, SessionInfo from ..core.models import Models -from ..libs.llm import LLM +from ..libs.llm.base import LLM from .filesystem import FileEdit, RangeInFile from .filesystem_edit import FileEditWithFullContents from .main import Position, Range, Traceback, TracebackFrame diff --git a/continuedev/src/continuedev/models/main.py b/server/continuedev/models/main.py index 5519d718..5519d718 100644 --- a/continuedev/src/continuedev/models/main.py +++ b/server/continuedev/models/main.py diff --git a/continuedev/src/continuedev/models/reference/generate.py b/server/continuedev/models/reference/generate.py index 0ab9ba85..74912f75 100644 --- a/continuedev/src/continuedev/models/reference/generate.py +++ b/server/continuedev/models/reference/generate.py @@ -32,14 +32,14 @@ CONTEXT_PROVIDER_MODULES = [ def import_llm_module(module_name, module_title): - module_name = f"continuedev.src.continuedev.libs.llm.{module_name}" + module_name = f"continuedev.libs.llm.{module_name}" module = importlib.import_module(module_name) obj = getattr(module, module_title) return obj def import_context_provider_module(module_name, module_title): - module_name = f"continuedev.src.continuedev.plugins.context_providers.{module_name}" + module_name = f"continuedev.plugins.context_providers.{module_name}" module = importlib.import_module(module_name) obj = getattr(module, module_title) return obj @@ -91,7 +91,7 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; ) -llm_module = importlib.import_module("continuedev.src.continuedev.libs.llm") +llm_module = importlib.import_module("continuedev.libs.llm.base") ctx_obj = getattr(llm_module, "LLM") schema = ctx_obj.schema() ctx_properties = schema["properties"].keys() @@ -105,14 +105,14 @@ for module_name, module_title in LLM_MODULES: with open(f"docs/docs/reference/Models/{module_title.lower()}.md", "w") as f: f.write(markdown_docs) -config_module = importlib.import_module("continuedev.src.continuedev.core.config") +config_module = importlib.import_module("continuedev.core.config") config_obj = getattr(config_module, "ContinueConfig") schema = config_obj.schema() markdown_docs = docs_from_schema(schema, "core/config.py") with open("docs/docs/reference/config.md", "w") as f: f.write(markdown_docs) -ctx_module = importlib.import_module("continuedev.src.continuedev.core.context") +ctx_module = importlib.import_module("continuedev.core.context") ctx_obj = getattr(ctx_module, "ContextProvider") schema = ctx_obj.schema() ctx_properties = schema["properties"].keys() @@ -136,7 +136,7 @@ for module_name, module_title in CONTEXT_PROVIDER_MODULES: ) as f: f.write(markdown_docs) -# sdk_module = importlib.import_module("continuedev.src.continuedev.core.sdk") +# sdk_module = importlib.import_module("continuedev.core.sdk") # sdk_obj = getattr(sdk_module, "ContinueSDK") # schema = sdk_obj.schema() # markdown_docs = docs_from_schema(schema, "sdk", ignore_properties=[]) diff --git a/server/continuedev/plugins/context_providers/__init__.py b/server/continuedev/plugins/context_providers/__init__.py new file mode 100644 index 00000000..0123bb7b --- /dev/null +++ b/server/continuedev/plugins/context_providers/__init__.py @@ -0,0 +1,7 @@ +from .diff import DiffContextProvider # noqa: F401 +from .filetree import FileTreeContextProvider # noqa: F401 +from .github import GitHubIssuesContextProvider # noqa: F401 +from .google import GoogleContextProvider # noqa: F401 +from .search import SearchContextProvider # noqa: F401 +from .terminal import TerminalContextProvider # noqa: F401 +from .url import URLContextProvider # noqa: F401 diff --git a/continuedev/src/continuedev/plugins/context_providers/diff.py b/server/continuedev/plugins/context_providers/diff.py index 05da3547..05da3547 100644 --- a/continuedev/src/continuedev/plugins/context_providers/diff.py +++ b/server/continuedev/plugins/context_providers/diff.py diff --git a/continuedev/src/continuedev/plugins/context_providers/dynamic.py b/server/continuedev/plugins/context_providers/dynamic.py index 50567621..50567621 100644 --- a/continuedev/src/continuedev/plugins/context_providers/dynamic.py +++ b/server/continuedev/plugins/context_providers/dynamic.py diff --git a/continuedev/src/continuedev/plugins/context_providers/embeddings.py b/server/continuedev/plugins/context_providers/embeddings.py index 86cba311..86cba311 100644 --- a/continuedev/src/continuedev/plugins/context_providers/embeddings.py +++ b/server/continuedev/plugins/context_providers/embeddings.py diff --git a/continuedev/src/continuedev/plugins/context_providers/file.py b/server/continuedev/plugins/context_providers/file.py index 4cfbcfdb..4cfbcfdb 100644 --- a/continuedev/src/continuedev/plugins/context_providers/file.py +++ b/server/continuedev/plugins/context_providers/file.py diff --git a/continuedev/src/continuedev/plugins/context_providers/filetree.py b/server/continuedev/plugins/context_providers/filetree.py index 5b3d3a50..5b3d3a50 100644 --- a/continuedev/src/continuedev/plugins/context_providers/filetree.py +++ b/server/continuedev/plugins/context_providers/filetree.py diff --git a/continuedev/src/continuedev/plugins/context_providers/github.py b/server/continuedev/plugins/context_providers/github.py index c031f310..c031f310 100644 --- a/continuedev/src/continuedev/plugins/context_providers/github.py +++ b/server/continuedev/plugins/context_providers/github.py diff --git a/continuedev/src/continuedev/plugins/context_providers/google.py b/server/continuedev/plugins/context_providers/google.py index 852f4e9a..852f4e9a 100644 --- a/continuedev/src/continuedev/plugins/context_providers/google.py +++ b/server/continuedev/plugins/context_providers/google.py diff --git a/continuedev/src/continuedev/plugins/context_providers/highlighted_code.py b/server/continuedev/plugins/context_providers/highlighted_code.py index 3304a71d..3304a71d 100644 --- a/continuedev/src/continuedev/plugins/context_providers/highlighted_code.py +++ b/server/continuedev/plugins/context_providers/highlighted_code.py diff --git a/continuedev/src/continuedev/plugins/context_providers/search.py b/server/continuedev/plugins/context_providers/search.py index a36b2a0a..a36b2a0a 100644 --- a/continuedev/src/continuedev/plugins/context_providers/search.py +++ b/server/continuedev/plugins/context_providers/search.py diff --git a/continuedev/src/continuedev/plugins/context_providers/terminal.py b/server/continuedev/plugins/context_providers/terminal.py index c63239e4..c63239e4 100644 --- a/continuedev/src/continuedev/plugins/context_providers/terminal.py +++ b/server/continuedev/plugins/context_providers/terminal.py diff --git a/continuedev/src/continuedev/plugins/context_providers/url.py b/server/continuedev/plugins/context_providers/url.py index 1ed7c18e..1ed7c18e 100644 --- a/continuedev/src/continuedev/plugins/context_providers/url.py +++ b/server/continuedev/plugins/context_providers/url.py diff --git a/continuedev/src/continuedev/plugins/context_providers/util.py b/server/continuedev/plugins/context_providers/util.py index 61bea8aa..61bea8aa 100644 --- a/continuedev/src/continuedev/plugins/context_providers/util.py +++ b/server/continuedev/plugins/context_providers/util.py diff --git a/continuedev/src/continuedev/plugins/policies/commit.py b/server/continuedev/plugins/policies/commit.py index 2fa43676..2fa43676 100644 --- a/continuedev/src/continuedev/plugins/policies/commit.py +++ b/server/continuedev/plugins/policies/commit.py diff --git a/continuedev/src/continuedev/plugins/policies/default.py b/server/continuedev/plugins/policies/default.py index 574d2a1c..574d2a1c 100644 --- a/continuedev/src/continuedev/plugins/policies/default.py +++ b/server/continuedev/plugins/policies/default.py diff --git a/continuedev/src/continuedev/plugins/policies/headless.py b/server/continuedev/plugins/policies/headless.py index 56ebe31f..9fa0f3f2 100644 --- a/continuedev/src/continuedev/plugins/policies/headless.py +++ b/server/continuedev/plugins/policies/headless.py @@ -1,7 +1,7 @@ from ...core.config import ContinueConfig from ...core.main import History, Policy, Step from ...core.observation import TextObservation -from ...plugins.steps.core.core import ShellCommandsStep +from ...core.steps import ShellCommandsStep from ...plugins.steps.on_traceback import DefaultOnTracebackStep diff --git a/continuedev/src/continuedev/plugins/recipes/AddTransformRecipe/README.md b/server/continuedev/plugins/recipes/AddTransformRecipe/README.md index 78d603a2..78d603a2 100644 --- a/continuedev/src/continuedev/plugins/recipes/AddTransformRecipe/README.md +++ b/server/continuedev/plugins/recipes/AddTransformRecipe/README.md diff --git a/continuedev/src/continuedev/plugins/recipes/AddTransformRecipe/dlt_transform_docs.md b/server/continuedev/plugins/recipes/AddTransformRecipe/dlt_transform_docs.md index 864aea87..864aea87 100644 --- a/continuedev/src/continuedev/plugins/recipes/AddTransformRecipe/dlt_transform_docs.md +++ b/server/continuedev/plugins/recipes/AddTransformRecipe/dlt_transform_docs.md diff --git a/continuedev/src/continuedev/plugins/recipes/AddTransformRecipe/main.py b/server/continuedev/plugins/recipes/AddTransformRecipe/main.py index 54207399..583cef1a 100644 --- a/continuedev/src/continuedev/plugins/recipes/AddTransformRecipe/main.py +++ b/server/continuedev/plugins/recipes/AddTransformRecipe/main.py @@ -2,7 +2,7 @@ from textwrap import dedent from ....core.main import Step from ....core.sdk import ContinueSDK -from ....plugins.steps.core.core import MessageStep, WaitForUserInputStep +from ....core.steps import MessageStep, WaitForUserInputStep from .steps import AddTransformStep, SetUpChessPipelineStep diff --git a/continuedev/src/continuedev/plugins/recipes/AddTransformRecipe/steps.py b/server/continuedev/plugins/recipes/AddTransformRecipe/steps.py index c7e60f12..61638374 100644 --- a/continuedev/src/continuedev/plugins/recipes/AddTransformRecipe/steps.py +++ b/server/continuedev/plugins/recipes/AddTransformRecipe/steps.py @@ -3,8 +3,8 @@ from textwrap import dedent from ....core.main import Step from ....core.sdk import ContinueSDK, Models +from ....core.steps import MessageStep from ....libs.util.paths import find_data_file -from ....plugins.steps.core.core import MessageStep AI_ASSISTED_STRING = "(✨ AI-Assisted ✨)" diff --git a/continuedev/src/continuedev/plugins/recipes/ContinueRecipeRecipe/README.md b/server/continuedev/plugins/recipes/ContinueRecipeRecipe/README.md index df66104f..df66104f 100644 --- a/continuedev/src/continuedev/plugins/recipes/ContinueRecipeRecipe/README.md +++ b/server/continuedev/plugins/recipes/ContinueRecipeRecipe/README.md diff --git a/continuedev/src/continuedev/plugins/recipes/ContinueRecipeRecipe/main.py b/server/continuedev/plugins/recipes/ContinueRecipeRecipe/main.py index 3dff2e15..3dff2e15 100644 --- a/continuedev/src/continuedev/plugins/recipes/ContinueRecipeRecipe/main.py +++ b/server/continuedev/plugins/recipes/ContinueRecipeRecipe/main.py diff --git a/continuedev/src/continuedev/plugins/recipes/CreatePipelineRecipe/README.md b/server/continuedev/plugins/recipes/CreatePipelineRecipe/README.md index e69de29b..e69de29b 100644 --- a/continuedev/src/continuedev/plugins/recipes/CreatePipelineRecipe/README.md +++ b/server/continuedev/plugins/recipes/CreatePipelineRecipe/README.md diff --git a/continuedev/src/continuedev/plugins/recipes/CreatePipelineRecipe/main.py b/server/continuedev/plugins/recipes/CreatePipelineRecipe/main.py index 4b259769..56e6f055 100644 --- a/continuedev/src/continuedev/plugins/recipes/CreatePipelineRecipe/main.py +++ b/server/continuedev/plugins/recipes/CreatePipelineRecipe/main.py @@ -2,7 +2,7 @@ from textwrap import dedent from ....core.main import Step from ....core.sdk import ContinueSDK -from ....plugins.steps.core.core import MessageStep, WaitForUserInputStep +from ....core.steps import MessageStep, WaitForUserInputStep from .steps import RunQueryStep, SetupPipelineStep, ValidatePipelineStep diff --git a/continuedev/src/continuedev/plugins/recipes/CreatePipelineRecipe/steps.py b/server/continuedev/plugins/recipes/CreatePipelineRecipe/steps.py index 9a5ca2bb..65e7182d 100644 --- a/continuedev/src/continuedev/plugins/recipes/CreatePipelineRecipe/steps.py +++ b/server/continuedev/plugins/recipes/CreatePipelineRecipe/steps.py @@ -4,10 +4,10 @@ from textwrap import dedent from ....core.main import Step from ....core.sdk import ContinueSDK, Models +from ....core.steps import MessageStep from ....models.filesystem import RangeInFile from ....models.filesystem_edit import AddFile, FileEdit from ....models.main import Range -from ....plugins.steps.core.core import MessageStep AI_ASSISTED_STRING = "(✨ AI-Assisted ✨)" diff --git a/continuedev/src/continuedev/plugins/recipes/DDtoBQRecipe/README.md b/server/continuedev/plugins/recipes/DDtoBQRecipe/README.md index d50324f7..d50324f7 100644 --- a/continuedev/src/continuedev/plugins/recipes/DDtoBQRecipe/README.md +++ b/server/continuedev/plugins/recipes/DDtoBQRecipe/README.md diff --git a/continuedev/src/continuedev/plugins/recipes/DDtoBQRecipe/dlt_duckdb_to_bigquery_docs.md b/server/continuedev/plugins/recipes/DDtoBQRecipe/dlt_duckdb_to_bigquery_docs.md index eb68e117..eb68e117 100644 --- a/continuedev/src/continuedev/plugins/recipes/DDtoBQRecipe/dlt_duckdb_to_bigquery_docs.md +++ b/server/continuedev/plugins/recipes/DDtoBQRecipe/dlt_duckdb_to_bigquery_docs.md diff --git a/continuedev/src/continuedev/plugins/recipes/DDtoBQRecipe/main.py b/server/continuedev/plugins/recipes/DDtoBQRecipe/main.py index 5348321d..65149500 100644 --- a/continuedev/src/continuedev/plugins/recipes/DDtoBQRecipe/main.py +++ b/server/continuedev/plugins/recipes/DDtoBQRecipe/main.py @@ -2,7 +2,7 @@ from textwrap import dedent from ....core.main import Step from ....core.sdk import ContinueSDK -from ....plugins.steps.core.core import MessageStep +from ....core.steps import MessageStep from .steps import LoadDataStep, SetUpChessPipelineStep, SwitchDestinationStep # Based on the following guide: diff --git a/continuedev/src/continuedev/plugins/recipes/DDtoBQRecipe/steps.py b/server/continuedev/plugins/recipes/DDtoBQRecipe/steps.py index d6769148..dfe25d9e 100644 --- a/continuedev/src/continuedev/plugins/recipes/DDtoBQRecipe/steps.py +++ b/server/continuedev/plugins/recipes/DDtoBQRecipe/steps.py @@ -3,8 +3,8 @@ from textwrap import dedent from ....core.main import Step from ....core.sdk import ContinueSDK, Models +from ....core.steps import MessageStep from ....libs.util.paths import find_data_file -from ....plugins.steps.core.core import MessageStep from ....plugins.steps.find_and_replace import FindAndReplaceStep AI_ASSISTED_STRING = "(✨ AI-Assisted ✨)" diff --git a/continuedev/src/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/README.md b/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/README.md index e69de29b..e69de29b 100644 --- a/continuedev/src/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/README.md +++ b/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/README.md diff --git a/continuedev/src/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/main.py b/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/main.py index 8f16cb34..5b0bd320 100644 --- a/continuedev/src/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/main.py +++ b/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/main.py @@ -2,7 +2,7 @@ from textwrap import dedent from ....core.main import Step from ....core.sdk import ContinueSDK -from ....plugins.steps.core.core import MessageStep +from ....core.steps import MessageStep from ....plugins.steps.input.nl_multiselect import NLMultiselectStep from .steps import DeployAirflowStep, RunPipelineStep, SetupPipelineStep diff --git a/continuedev/src/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/steps.py b/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/steps.py index d09cf8bb..e4a932af 100644 --- a/continuedev/src/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/steps.py +++ b/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/steps.py @@ -3,7 +3,7 @@ from textwrap import dedent from ....core.main import Step from ....core.sdk import ContinueSDK, Models -from ....plugins.steps.core.core import MessageStep +from ....core.steps import MessageStep from ....plugins.steps.find_and_replace import FindAndReplaceStep AI_ASSISTED_STRING = "(✨ AI-Assisted ✨)" diff --git a/continuedev/src/continuedev/plugins/recipes/README.md b/server/continuedev/plugins/recipes/README.md index 9860b0e2..9860b0e2 100644 --- a/continuedev/src/continuedev/plugins/recipes/README.md +++ b/server/continuedev/plugins/recipes/README.md diff --git a/continuedev/src/continuedev/plugins/recipes/TemplateRecipe/README.md b/server/continuedev/plugins/recipes/TemplateRecipe/README.md index 91d1123b..91d1123b 100644 --- a/continuedev/src/continuedev/plugins/recipes/TemplateRecipe/README.md +++ b/server/continuedev/plugins/recipes/TemplateRecipe/README.md diff --git a/continuedev/src/continuedev/plugins/recipes/TemplateRecipe/main.py b/server/continuedev/plugins/recipes/TemplateRecipe/main.py index 01ae364d..01ae364d 100644 --- a/continuedev/src/continuedev/plugins/recipes/TemplateRecipe/main.py +++ b/server/continuedev/plugins/recipes/TemplateRecipe/main.py diff --git a/continuedev/src/continuedev/plugins/recipes/WritePytestsRecipe/README.md b/server/continuedev/plugins/recipes/WritePytestsRecipe/README.md index 5ce33ecb..5ce33ecb 100644 --- a/continuedev/src/continuedev/plugins/recipes/WritePytestsRecipe/README.md +++ b/server/continuedev/plugins/recipes/WritePytestsRecipe/README.md diff --git a/continuedev/src/continuedev/plugins/recipes/WritePytestsRecipe/main.py b/server/continuedev/plugins/recipes/WritePytestsRecipe/main.py index 63edabc6..63edabc6 100644 --- a/continuedev/src/continuedev/plugins/recipes/WritePytestsRecipe/main.py +++ b/server/continuedev/plugins/recipes/WritePytestsRecipe/main.py diff --git a/continuedev/src/continuedev/plugins/steps/README.md b/server/continuedev/plugins/steps/README.md index a8cae90b..a8cae90b 100644 --- a/continuedev/src/continuedev/plugins/steps/README.md +++ b/server/continuedev/plugins/steps/README.md diff --git a/server/continuedev/plugins/steps/__init__.py b/server/continuedev/plugins/steps/__init__.py new file mode 100644 index 00000000..a181a956 --- /dev/null +++ b/server/continuedev/plugins/steps/__init__.py @@ -0,0 +1,13 @@ +# from .chroma import ( +# AnswerQuestionChroma, # noqa: F401 +# CreateCodebaseIndexChroma, # noqa: F401 +# EditFileChroma, # noqa: F401 +# ) +from .clear_history import ClearHistoryStep # noqa: F401 +from .cmd import GenerateShellCommandStep # noqa: F401 +from .comment_code import CommentCodeStep # noqa: F401 +from .help import HelpStep # noqa: F401 +from .main import EditHighlightedCodeStep # noqa: F401 +from .open_config import OpenConfigStep # noqa: F401 + +# from .share_session import ShareSessionStep # noqa: F401 diff --git a/continuedev/src/continuedev/plugins/steps/chat.py b/server/continuedev/plugins/steps/chat.py index 179882bb..1b0f76f9 100644 --- a/continuedev/src/continuedev/plugins/steps/chat.py +++ b/server/continuedev/plugins/steps/chat.py @@ -11,12 +11,12 @@ from pydantic import Field from ...core.main import ChatMessage, FunctionCall, Models, Step, step_to_json_schema from ...core.sdk import ContinueSDK +from ...core.steps import MessageStep from ...libs.llm.openai import OpenAI from ...libs.llm.openai_free_trial import OpenAIFreeTrial from ...libs.util.devdata import dev_data_logger from ...libs.util.strings import remove_quotes_and_escapes from ...libs.util.telemetry import posthog_logger -from .core.core import MessageStep from .main import EditHighlightedCodeStep load_dotenv() diff --git a/continuedev/src/continuedev/plugins/steps/chroma.py b/server/continuedev/plugins/steps/chroma.py index 39b0741f..f357a872 100644 --- a/continuedev/src/continuedev/plugins/steps/chroma.py +++ b/server/continuedev/plugins/steps/chroma.py @@ -4,8 +4,8 @@ from typing import Coroutine, Union from ...core.main import Step from ...core.observation import Observation from ...core.sdk import ContinueSDK +from ...core.steps import EditFileStep from ...libs.chroma.query import ChromaIndexManager -from .core.core import EditFileStep class CreateCodebaseIndexChroma(Step): diff --git a/continuedev/src/continuedev/plugins/steps/clear_history.py b/server/continuedev/plugins/steps/clear_history.py index 8f21518b..8f21518b 100644 --- a/continuedev/src/continuedev/plugins/steps/clear_history.py +++ b/server/continuedev/plugins/steps/clear_history.py diff --git a/continuedev/src/continuedev/plugins/steps/cmd.py b/server/continuedev/plugins/steps/cmd.py index a38f6323..a38f6323 100644 --- a/continuedev/src/continuedev/plugins/steps/cmd.py +++ b/server/continuedev/plugins/steps/cmd.py diff --git a/continuedev/src/continuedev/plugins/steps/comment_code.py b/server/continuedev/plugins/steps/comment_code.py index 1eee791d..1eee791d 100644 --- a/continuedev/src/continuedev/plugins/steps/comment_code.py +++ b/server/continuedev/plugins/steps/comment_code.py diff --git a/continuedev/src/continuedev/plugins/steps/custom_command.py b/server/continuedev/plugins/steps/custom_command.py index 4128415b..4128415b 100644 --- a/continuedev/src/continuedev/plugins/steps/custom_command.py +++ b/server/continuedev/plugins/steps/custom_command.py diff --git a/continuedev/src/continuedev/plugins/steps/draft/abstract_method.py b/server/continuedev/plugins/steps/draft/abstract_method.py index 7ceefe9b..7ceefe9b 100644 --- a/continuedev/src/continuedev/plugins/steps/draft/abstract_method.py +++ b/server/continuedev/plugins/steps/draft/abstract_method.py diff --git a/continuedev/src/continuedev/plugins/steps/draft/redux.py b/server/continuedev/plugins/steps/draft/redux.py index 5a351e6f..83b5e592 100644 --- a/continuedev/src/continuedev/plugins/steps/draft/redux.py +++ b/server/continuedev/plugins/steps/draft/redux.py @@ -1,6 +1,6 @@ from ....core.main import Step from ....core.sdk import ContinueSDK -from ..core.core import EditFileStep +from ....core.steps import EditFileStep class EditReduxStateStep(Step): diff --git a/continuedev/src/continuedev/plugins/steps/draft/typeorm.py b/server/continuedev/plugins/steps/draft/typeorm.py index c79fa041..c79fa041 100644 --- a/continuedev/src/continuedev/plugins/steps/draft/typeorm.py +++ b/server/continuedev/plugins/steps/draft/typeorm.py diff --git a/continuedev/src/continuedev/plugins/steps/feedback.py b/server/continuedev/plugins/steps/feedback.py index df1142a1..df1142a1 100644 --- a/continuedev/src/continuedev/plugins/steps/feedback.py +++ b/server/continuedev/plugins/steps/feedback.py diff --git a/continuedev/src/continuedev/plugins/steps/find_and_replace.py b/server/continuedev/plugins/steps/find_and_replace.py index 287e286d..287e286d 100644 --- a/continuedev/src/continuedev/plugins/steps/find_and_replace.py +++ b/server/continuedev/plugins/steps/find_and_replace.py diff --git a/continuedev/src/continuedev/plugins/steps/help.py b/server/continuedev/plugins/steps/help.py index 148dddb8..148dddb8 100644 --- a/continuedev/src/continuedev/plugins/steps/help.py +++ b/server/continuedev/plugins/steps/help.py diff --git a/continuedev/src/continuedev/plugins/steps/input/nl_multiselect.py b/server/continuedev/plugins/steps/input/nl_multiselect.py index 721f1306..f4b5e7a6 100644 --- a/continuedev/src/continuedev/plugins/steps/input/nl_multiselect.py +++ b/server/continuedev/plugins/steps/input/nl_multiselect.py @@ -2,7 +2,7 @@ from typing import List, Union from ....core.main import Step from ....core.sdk import ContinueSDK -from ..core.core import WaitForUserInputStep +from ....core.steps import WaitForUserInputStep class NLMultiselectStep(Step): diff --git a/continuedev/src/continuedev/plugins/steps/main.py b/server/continuedev/plugins/steps/main.py index ab4fe948..936fd7e0 100644 --- a/continuedev/src/continuedev/plugins/steps/main.py +++ b/server/continuedev/plugins/steps/main.py @@ -7,14 +7,14 @@ from pydantic import BaseModel, Field from ...core.main import ContinueCustomException, Step from ...core.observation import Observation from ...core.sdk import ContinueSDK, Models -from ...libs.llm import LLM +from ...core.steps import DefaultModelEditCodeStep +from ...libs.llm.base import LLM from ...libs.llm.prompt_utils import MarkdownStyleEncoderDecoder from ...libs.util.calculate_diff import calculate_diff2 from ...libs.util.logging import logger from ...models.filesystem import RangeInFile, RangeInFileWithContents from ...models.filesystem_edit import EditDiff, FileEdit from ...models.main import Range, Traceback -from .core.core import DefaultModelEditCodeStep class Policy(BaseModel): diff --git a/continuedev/src/continuedev/plugins/steps/on_traceback.py b/server/continuedev/plugins/steps/on_traceback.py index 86894818..b72ce809 100644 --- a/continuedev/src/continuedev/plugins/steps/on_traceback.py +++ b/server/continuedev/plugins/steps/on_traceback.py @@ -4,6 +4,7 @@ from typing import Dict, List, Optional, Tuple from ...core.main import ChatMessage, ContinueCustomException, Step from ...core.sdk import ContinueSDK +from ...core.steps import UserInputStep from ...libs.util.filter_files import should_filter_path from ...libs.util.traceback.traceback_parsers import ( get_javascript_traceback, @@ -13,7 +14,6 @@ from ...libs.util.traceback.traceback_parsers import ( from ...models.filesystem import RangeInFile from ...models.main import Range, Traceback, TracebackFrame from .chat import SimpleChatStep -from .core.core import UserInputStep def extract_traceback_str(output: str) -> str: diff --git a/server/continuedev/plugins/steps/open_config.py b/server/continuedev/plugins/steps/open_config.py new file mode 100644 index 00000000..c57939f8 --- /dev/null +++ b/server/continuedev/plugins/steps/open_config.py @@ -0,0 +1,17 @@ +from textwrap import dedent + +from ...core.main import Step +from ...core.sdk import ContinueSDK +from ...libs.util.paths import getConfigFilePath + + +class OpenConfigStep(Step): + name: str = "Open config" + + async def describe(self, models): + return dedent( + 'Read [the docs](https://continue.dev/docs/customization/overview) to learn more about how you can customize Continue using `"config.py"`.' + ) + + async def run(self, sdk: ContinueSDK): + await sdk.ide.setFileOpen(getConfigFilePath()) diff --git a/continuedev/src/continuedev/plugins/steps/react.py b/server/continuedev/plugins/steps/react.py index 1b9bc265..1b9bc265 100644 --- a/continuedev/src/continuedev/plugins/steps/react.py +++ b/server/continuedev/plugins/steps/react.py diff --git a/continuedev/src/continuedev/plugins/steps/refactor.py b/server/continuedev/plugins/steps/refactor.py index 56e9e09e..56e9e09e 100644 --- a/continuedev/src/continuedev/plugins/steps/refactor.py +++ b/server/continuedev/plugins/steps/refactor.py diff --git a/continuedev/src/continuedev/plugins/steps/search_directory.py b/server/continuedev/plugins/steps/search_directory.py index 83516719..83516719 100644 --- a/continuedev/src/continuedev/plugins/steps/search_directory.py +++ b/server/continuedev/plugins/steps/search_directory.py diff --git a/continuedev/src/continuedev/plugins/steps/setup_model.py b/server/continuedev/plugins/steps/setup_model.py index 87e52f1b..87e52f1b 100644 --- a/continuedev/src/continuedev/plugins/steps/setup_model.py +++ b/server/continuedev/plugins/steps/setup_model.py diff --git a/continuedev/src/continuedev/plugins/steps/share_session.py b/server/continuedev/plugins/steps/share_session.py index 1d68dc90..1d68dc90 100644 --- a/continuedev/src/continuedev/plugins/steps/share_session.py +++ b/server/continuedev/plugins/steps/share_session.py diff --git a/continuedev/src/continuedev/plugins/steps/steps_on_startup.py b/server/continuedev/plugins/steps/steps_on_startup.py index 58d56703..58d56703 100644 --- a/continuedev/src/continuedev/plugins/steps/steps_on_startup.py +++ b/server/continuedev/plugins/steps/steps_on_startup.py diff --git a/continuedev/src/continuedev/plugins/steps/welcome.py b/server/continuedev/plugins/steps/welcome.py index ef1acfc1..ef1acfc1 100644 --- a/continuedev/src/continuedev/plugins/steps/welcome.py +++ b/server/continuedev/plugins/steps/welcome.py diff --git a/continuedev/src/continuedev/server/gui.py b/server/continuedev/server/gui.py index 26fcbd42..82767f5e 100644 --- a/continuedev/src/continuedev/server/gui.py +++ b/server/continuedev/server/gui.py @@ -10,7 +10,12 @@ from uvicorn.main import Server from ..core.main import ContextItem from ..core.models import ALL_MODEL_ROLES, MODEL_CLASSES, MODEL_MODULE_NAMES -from ..libs.llm.prompts.chat import llama2_template_messages, template_alpaca_messages +from ..core.steps import DisplayErrorStep +from ..libs.llm.prompts.chat import ( + llama2_template_messages, + sqlcoder_template_messages, + template_alpaca_messages, +) from ..libs.util.create_async_task import create_async_task from ..libs.util.edit_config import ( add_config_import, @@ -22,8 +27,6 @@ from ..libs.util.edit_config import ( from ..libs.util.logging import logger from ..libs.util.queue import AsyncSubscriptionQueue from ..libs.util.telemetry import posthog_logger -from ..plugins.steps.core.core import DisplayErrorStep -from ..plugins.steps.setup_model import SetupModelStep from .session_manager import Session, session_manager router = APIRouter(prefix="/gui", tags=["gui"]) @@ -189,7 +192,7 @@ class GUIProtocolServer: create_async_task(self.session.autopilot.set_editing_at_ids(ids), self.on_error) def on_show_logs_at_index(self, index: int): - name = "Continue Context" + name = "Continue Prompt" logs = "\n\n############################################\n\n".join( ["This is the prompt that was sent to the LLM during this step"] + self.session.autopilot.continue_sdk.history.timeline[index].logs @@ -337,19 +340,22 @@ class GUIProtocolServer: # Add the requisite import to config.py add_config_import( - f"from continuedev.src.continuedev.libs.llm.{MODEL_MODULE_NAMES[model_class]} import {model_class}" + f"from continuedev.libs.llm.{MODEL_MODULE_NAMES[model_class]} import {model_class}" ) if "template_messages" in model: add_config_import( - f"from continuedev.src.continuedev.libs.llm.prompts.chat import {model['template_messages']}" + f"from continuedev.libs.llm.prompts.chat import {model['template_messages']}" ) # Set and start the new default model if "template_messages" in model: + sqtm = sqlcoder_template_messages("<MY_DATABASE_SCHEMA>") + sqtm.__name__ = 'sqlcoder_template_messages("<MY_DATABASE_SCHEMA>")' model["template_messages"] = { "llama2_template_messages": llama2_template_messages, "template_alpaca_messages": template_alpaca_messages, + "sqlcoder_template_messages": sqtm, }[model["template_messages"]] new_model = MODEL_CLASSES[model_class](**model) models.default = new_model @@ -376,9 +382,9 @@ class GUIProtocolServer: models.__setattr__(role, models.default) # Display setup help - await self.session.autopilot.continue_sdk.run_step( - SetupModelStep(model_class=model_class) - ) + # await self.session.autopilot.continue_sdk.run_step( + # SetupModelStep(model_class=model_class) + # ) create_async_task(async_stuff(), self.on_error) else: diff --git a/continuedev/src/continuedev/server/ide.py b/server/continuedev/server/ide.py index 6a4dc738..7f9af77a 100644 --- a/continuedev/src/continuedev/server/ide.py +++ b/server/continuedev/server/ide.py @@ -13,6 +13,7 @@ from starlette.websockets import WebSocketDisconnect, WebSocketState from uvicorn.main import Server from ..core.main import ContinueCustomException +from ..core.steps import DisplayErrorStep from ..libs.util.create_async_task import create_async_task from ..libs.util.devdata import dev_data_logger from ..libs.util.logging import logger @@ -37,7 +38,6 @@ from ..models.filesystem_edit import ( RenameFile, SequentialFileSystemEdit, ) -from ..plugins.steps.core.core import DisplayErrorStep from .gui import session_manager from .ide_protocol import AbstractIdeProtocolServer from .session_manager import SessionManager @@ -456,6 +456,13 @@ class IdeProtocolServer(AbstractIdeProtocolServer): for callback in self._file_saved_callbacks: self.call_callback(callback, filepath, contents) + # If ~/.continue/config.py was saved, auto-update the SDK + if filepath.endswith(".continue/config.py") or filepath.endswith( + ".continue\\config.py" + ): + if autopilot := self.__get_autopilot(): + create_async_task(autopilot.reload_config(), self.on_error) + ## END Subscriptions ## def onMainUserInput(self, input: str): diff --git a/continuedev/src/continuedev/server/ide_protocol.py b/server/continuedev/server/ide_protocol.py index 832dd338..832dd338 100644 --- a/continuedev/src/continuedev/server/ide_protocol.py +++ b/server/continuedev/server/ide_protocol.py diff --git a/continuedev/src/continuedev/server/main.py b/server/continuedev/server/main.py index c5540d7d..c5540d7d 100644 --- a/continuedev/src/continuedev/server/main.py +++ b/server/continuedev/server/main.py diff --git a/continuedev/src/continuedev/server/meilisearch_server.py b/server/continuedev/server/meilisearch_server.py index 6ce4d61c..93761ce1 100644 --- a/continuedev/src/continuedev/server/meilisearch_server.py +++ b/server/continuedev/server/meilisearch_server.py @@ -28,9 +28,9 @@ async def download_meilisearch(): """ serverPath = getServerFolderPath() - logger.debug("Downloading MeiliSearch...") if os.name == "nt": + logger.debug("Downloading MeiliSearch for Windows...") download_url = "https://github.com/meilisearch/meilisearch/releases/download/v1.3.2/meilisearch-windows-amd64.exe" download_path = getMeilisearchExePath() if not os.path.exists(download_path): @@ -42,6 +42,7 @@ async def download_meilisearch(): # cwd=serverPath, # ) else: + logger.debug("Downloading MeiliSearch with curl...") subprocess.run( "curl -L https://install.meilisearch.com | sh", shell=True, diff --git a/continuedev/src/continuedev/server/session_manager.py b/server/continuedev/server/session_manager.py index f0080104..f0080104 100644 --- a/continuedev/src/continuedev/server/session_manager.py +++ b/server/continuedev/server/session_manager.py diff --git a/continuedev/dev_requirements.txt b/server/dev_requirements.txt index 2fa7631b..2fa7631b 100644 --- a/continuedev/dev_requirements.txt +++ b/server/dev_requirements.txt diff --git a/continuedev/install-dependencies.sh b/server/install-dependencies.sh index 8f1b5d27..8f1b5d27 100755 --- a/continuedev/install-dependencies.sh +++ b/server/install-dependencies.sh diff --git a/server/main.py b/server/main.py new file mode 100644 index 00000000..c40f9b96 --- /dev/null +++ b/server/main.py @@ -0,0 +1,5 @@ +from .continuedev.server.main import run_server + + +def main(): + run_server() diff --git a/continuedev/notes.md b/server/notes.md index 469d4950..469d4950 100644 --- a/continuedev/notes.md +++ b/server/notes.md diff --git a/continuedev/poetry.lock b/server/poetry.lock index 23ff9094..23ff9094 100644 --- a/continuedev/poetry.lock +++ b/server/poetry.lock diff --git a/continuedev/poetry.toml b/server/poetry.toml index ab1033bd..ab1033bd 100644 --- a/continuedev/poetry.toml +++ b/server/poetry.toml diff --git a/continuedev/pyproject.toml b/server/pyproject.toml index 0e80d051..4085306d 100644 --- a/continuedev/pyproject.toml +++ b/server/pyproject.toml @@ -36,7 +36,7 @@ python-lsp-server = "^1.7.4" huggingface-hub = "^0.16.4" [tool.poetry.scripts] -typegen = "src.continuedev.models.generate_json_schema:main" +typegen = "continuedev.models.generate_json_schema:main" [tool.poetry.group.dev.dependencies] pytest = "^7.4.1" diff --git a/continuedev/requirements.txt b/server/requirements.txt index d430f20d..d430f20d 100644 --- a/continuedev/requirements.txt +++ b/server/requirements.txt diff --git a/continuedev/src/continuedev/libs/__init__.py b/server/tests/__init__.py index e69de29b..e69de29b 100644 --- a/continuedev/src/continuedev/libs/__init__.py +++ b/server/tests/__init__.py diff --git a/continuedev/src/continuedev/tests/llm_test.py b/server/tests/llm_test.py index 8c4fe0c6..a016b464 100644 --- a/continuedev/src/continuedev/tests/llm_test.py +++ b/server/tests/llm_test.py @@ -3,16 +3,15 @@ import os from functools import wraps import pytest -from dotenv import load_dotenv - from continuedev.core.main import ChatMessage -from continuedev.libs.llm import LLM, CompletionOptions from continuedev.libs.llm.anthropic import AnthropicLLM +from continuedev.libs.llm.base import LLM, CompletionOptions from continuedev.libs.llm.ggml import GGML from continuedev.libs.llm.openai import OpenAI from continuedev.libs.llm.together import TogetherLLM from continuedev.libs.util.count_tokens import DEFAULT_ARGS -from continuedev.tests.util.prompts import tokyo_test_pair +from dotenv import load_dotenv +from util.prompts import tokyo_test_pair load_dotenv() diff --git a/continuedev/src/continuedev/tests/step_test.py b/server/tests/step_test.py index 61f39d18..a9132dd3 100644 --- a/continuedev/src/continuedev/tests/step_test.py +++ b/server/tests/step_test.py @@ -1,13 +1,12 @@ import pytest - from continuedev.core.config import ContinueConfig +from continuedev.core.steps import UserInputStep from continuedev.headless import start_headless_session from continuedev.models.filesystem import Range, RangeInFileWithContents from continuedev.plugins.steps.chat import SimpleChatStep -from continuedev.plugins.steps.core.core import UserInputStep from continuedev.plugins.steps.main import EditHighlightedCodeStep from continuedev.plugins.steps.on_traceback import DefaultOnTracebackStep -from continuedev.tests.util.prompts import dotenv_test_pair, tokyo_test_pair +from util.prompts import dotenv_test_pair, tokyo_test_pair TEST_CONFIG = ContinueConfig() diff --git a/continuedev/src/continuedev/models/__init__.py b/server/tests/util/__init__.py index e69de29b..e69de29b 100644 --- a/continuedev/src/continuedev/models/__init__.py +++ b/server/tests/util/__init__.py diff --git a/continuedev/src/continuedev/tests/util/config.py b/server/tests/util/config.py index dd0e1f13..370933a0 100644 --- a/continuedev/src/continuedev/tests/util/config.py +++ b/server/tests/util/config.py @@ -1,6 +1,6 @@ -from continuedev.src.continuedev.core.config import ContinueConfig -from continuedev.src.continuedev.core.models import Models -from continuedev.src.continuedev.libs.llm.openai_free_trial import OpenAIFreeTrial +from continuedev.core.config import ContinueConfig +from continuedev.core.models import Models +from continuedev.libs.llm.openai_free_trial import OpenAIFreeTrial config = ContinueConfig( allow_anonymous_telemetry=False, diff --git a/continuedev/src/continuedev/tests/util/openai_mock.py b/server/tests/util/openai_mock.py index 763c5647..763c5647 100644 --- a/continuedev/src/continuedev/tests/util/openai_mock.py +++ b/server/tests/util/openai_mock.py diff --git a/continuedev/src/continuedev/tests/util/prompts.py b/server/tests/util/prompts.py index e84ddc82..e84ddc82 100644 --- a/continuedev/src/continuedev/tests/util/prompts.py +++ b/server/tests/util/prompts.py |