summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/main.yaml5
-rw-r--r--continuedev/src/continuedev/libs/llm/llamacpp.py110
-rw-r--r--continuedev/src/continuedev/libs/llm/ollama.py25
-rw-r--r--continuedev/src/continuedev/libs/llm/openai.py2
-rw-r--r--continuedev/src/continuedev/libs/llm/prompts/chat.py56
-rw-r--r--continuedev/src/continuedev/libs/llm/together.py21
-rw-r--r--docs/docs/customization.md25
-rw-r--r--docs/docs/walkthroughs/codellama.md3
-rw-r--r--extension/package-lock.json336
-rw-r--r--extension/package.json1
-rw-r--r--extension/src/activation/environmentSetup.ts7
11 files changed, 506 insertions, 85 deletions
diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml
index f53cd7be..10cfc2a1 100644
--- a/.github/workflows/main.yaml
+++ b/.github/workflows/main.yaml
@@ -196,6 +196,11 @@ jobs:
cd extension
npm ci
+ - name: Publish (Open VSX Registry)
+ run: |
+ cd extension
+ npx ovsx publish -p ${{ secrets.VSX_REGISTRY_TOKEN }} --packagePath ./build/*.vsix
+
- name: Publish
run: |
cd extension
diff --git a/continuedev/src/continuedev/libs/llm/llamacpp.py b/continuedev/src/continuedev/libs/llm/llamacpp.py
index bdcf8612..9e424fde 100644
--- a/continuedev/src/continuedev/libs/llm/llamacpp.py
+++ b/continuedev/src/continuedev/libs/llm/llamacpp.py
@@ -1,5 +1,5 @@
+import asyncio
import json
-from textwrap import dedent
from typing import Any, Callable, Coroutine, Dict, Generator, List, Optional, Union
import aiohttp
@@ -12,50 +12,7 @@ from ..util.count_tokens import (
count_tokens,
format_chat_messages,
)
-
-
-def llama2_template_messages(msgs: ChatMessage) -> str:
- if len(msgs) == 0:
- return ""
-
- prompt = ""
- has_system = msgs[0]["role"] == "system"
- if has_system:
- system_message = dedent(
- f"""\
- <<SYS>>
- {msgs[0]["content"]}
- <</SYS>>
-
- """
- )
- if len(msgs) > 1:
- prompt += f"[INST] {system_message}{msgs[1]['content']} [/INST]"
- else:
- prompt += f"[INST] {system_message} [/INST]"
- return
-
- for i in range(2 if has_system else 0, len(msgs)):
- if msgs[i]["role"] == "user":
- prompt += f"[INST] {msgs[i]['content']} [/INST]"
- else:
- prompt += msgs[i]["content"]
-
- return prompt
-
-
-def code_llama_template_messages(msgs: ChatMessage) -> str:
- return f"[INST] {msgs[-1]['content']} [/INST]"
-
-
-def code_llama_python_template_messages(msgs: ChatMessage) -> str:
- return dedent(
- f"""\
- [INST]
- You are an expert Python programmer and personal assistant, here is your task: {msgs[-1]['content']}
- Your answer should start with a [PYTHON] tag and end with a [/PYTHON] tag.
- [/INST]"""
- )
+from .prompts.chat import code_llama_template_messages
class LlamaCpp(LLM):
@@ -63,8 +20,10 @@ class LlamaCpp(LLM):
server_url: str = "http://localhost:8080"
verify_ssl: Optional[bool] = None
- template_messages: Callable[[List[ChatMessage]], str] = llama2_template_messages
- llama_cpp_args: Dict[str, Any] = {"stop": ["[INST]"]}
+ template_messages: Callable[[List[ChatMessage]], str] = code_llama_template_messages
+ llama_cpp_args: Dict[str, Any] = {"stop": ["[INST]"], "grammar": "root ::= "}
+
+ use_command: Optional[str] = None
requires_write_log = True
write_log: Optional[Callable[[str], None]] = None
@@ -114,6 +73,23 @@ class LlamaCpp(LLM):
return args
+ async def stream_from_main(self, prompt: str):
+ cmd = self.use_command.split(" ") + ["-p", prompt]
+ process = await asyncio.create_subprocess_exec(
+ *cmd, stdout=asyncio.subprocess.PIPE
+ )
+
+ total = ""
+ async for line in process.stdout:
+ chunk = line.decode().strip()
+ if "llama_print_timings" in total + chunk:
+ process.terminate()
+ return
+ total += chunk
+ yield chunk
+
+ await process.wait()
+
async def stream_complete(
self, prompt, with_history: List[ChatMessage] = None, **kwargs
) -> Generator[Union[Any, List, Dict], None, None]:
@@ -171,7 +147,7 @@ class LlamaCpp(LLM):
prompt = self.template_messages(messages)
headers = {"Content-Type": "application/json"}
- async def generator():
+ async def server_generator():
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(verify_ssl=self.verify_ssl)
) as client_session:
@@ -189,6 +165,12 @@ class LlamaCpp(LLM):
"role": "assistant",
}
+ async def command_generator():
+ async for line in self.stream_from_main(prompt):
+ yield {"content": line, "role": "assistant"}
+
+ generator = command_generator if self.use_command else server_generator
+
# Because quite often the first attempt fails, and it works thereafter
self.write_log(f"Prompt: \n\n{prompt}")
completion = ""
@@ -205,15 +187,23 @@ class LlamaCpp(LLM):
args = {**self.default_args, **kwargs}
self.write_log(f"Prompt: \n\n{prompt}")
- async with aiohttp.ClientSession(
- connector=aiohttp.TCPConnector(verify_ssl=self.verify_ssl)
- ) as client_session:
- async with client_session.post(
- f"{self.server_url}/completion",
- json={"prompt": prompt, **self._transform_args(args)},
- headers={"Content-Type": "application/json"},
- ) as resp:
- json_resp = await resp.json()
- completion = json_resp["content"]
- self.write_log(f"Completion: \n\n{completion}")
- return completion
+
+ if self.use_command:
+ completion = ""
+ async for line in self.stream_from_main(prompt):
+ completion += line
+ self.write_log(f"Completion: \n\n{completion}")
+ return completion
+ else:
+ async with aiohttp.ClientSession(
+ connector=aiohttp.TCPConnector(verify_ssl=self.verify_ssl)
+ ) as client_session:
+ async with client_session.post(
+ f"{self.server_url}/completion",
+ json={"prompt": prompt, **self._transform_args(args)},
+ headers={"Content-Type": "application/json"},
+ ) as resp:
+ json_resp = await resp.json()
+ completion = json_resp["content"]
+ self.write_log(f"Completion: \n\n{completion}")
+ return completion
diff --git a/continuedev/src/continuedev/libs/llm/ollama.py b/continuedev/src/continuedev/libs/llm/ollama.py
index df2b2238..c754e54d 100644
--- a/continuedev/src/continuedev/libs/llm/ollama.py
+++ b/continuedev/src/continuedev/libs/llm/ollama.py
@@ -116,6 +116,7 @@ class Ollama(LLM):
"model": self.model,
},
) as resp:
+ url_decode_buffer = ""
async for line in resp.content.iter_any():
if line:
try:
@@ -125,7 +126,16 @@ class Ollama(LLM):
if chunk.strip() != "":
j = json.loads(chunk)
if "response" in j:
- yield urllib.parse.unquote(j["response"])
+ url_decode_buffer += j["response"]
+
+ if (
+ "&" in url_decode_buffer
+ and url_decode_buffer.index("&")
+ > len(url_decode_buffer) - 5
+ ):
+ continue
+ yield urllib.parse.unquote(url_decode_buffer)
+ url_decode_buffer = ""
except:
raise Exception(str(line[0]))
@@ -153,6 +163,7 @@ class Ollama(LLM):
},
) as resp:
# This is streaming application/json instaed of text/event-stream
+ url_decode_buffer = ""
async for line in resp.content.iter_chunks():
if line[1]:
try:
@@ -162,10 +173,20 @@ class Ollama(LLM):
if chunk.strip() != "":
j = json.loads(chunk)
if "response" in j:
+ url_decode_buffer += j["response"]
+ if (
+ "&" in url_decode_buffer
+ and url_decode_buffer.index("&")
+ > len(url_decode_buffer) - 5
+ ):
+ continue
yield {
"role": "assistant",
- "content": urllib.parse.unquote(j["response"]),
+ "content": urllib.parse.unquote(
+ url_decode_buffer
+ ),
}
+ url_decode_buffer = ""
except:
raise Exception(str(line[0]))
diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py
index 48e773a3..ea77397f 100644
--- a/continuedev/src/continuedev/libs/llm/openai.py
+++ b/continuedev/src/continuedev/libs/llm/openai.py
@@ -166,6 +166,8 @@ class OpenAI(LLM):
messages=messages,
**args,
):
+ if len(chunk.choices) == 0:
+ continue
yield chunk.choices[0].delta
if "content" in chunk.choices[0].delta:
completion += chunk.choices[0].delta.content
diff --git a/continuedev/src/continuedev/libs/llm/prompts/chat.py b/continuedev/src/continuedev/libs/llm/prompts/chat.py
new file mode 100644
index 00000000..110dfaae
--- /dev/null
+++ b/continuedev/src/continuedev/libs/llm/prompts/chat.py
@@ -0,0 +1,56 @@
+from textwrap import dedent
+
+from ....core.main import ChatMessage
+
+
+def llama2_template_messages(msgs: ChatMessage) -> str:
+ if len(msgs) == 0:
+ return ""
+
+ prompt = ""
+ has_system = msgs[0]["role"] == "system"
+
+ if has_system and msgs[0]["content"].strip() == "":
+ has_system = False
+ msgs = msgs[1:]
+
+ if has_system:
+ system_message = dedent(
+ f"""\
+ <<SYS>>
+ {msgs[0]["content"]}
+ <</SYS>>
+
+ """
+ )
+ if len(msgs) > 1:
+ prompt += f"[INST] {system_message}{msgs[1]['content']} [/INST]"
+ else:
+ prompt += f"[INST] {system_message} [/INST]"
+ return
+
+ for i in range(2 if has_system else 0, len(msgs)):
+ if msgs[i]["role"] == "user":
+ prompt += f"[INST] {msgs[i]['content']} [/INST]"
+ else:
+ prompt += msgs[i]["content"]
+
+ return prompt
+
+
+def code_llama_template_messages(msgs: ChatMessage) -> str:
+ return f"[INST] {msgs[-1]['content']}\n[/INST]"
+
+
+def extra_space_template_messages(msgs: ChatMessage) -> str:
+ return f" {msgs[-1]['content']}"
+
+
+def code_llama_python_template_messages(msgs: ChatMessage) -> str:
+ return dedent(
+ f"""\
+ [INST]
+ You are an expert Python programmer and personal assistant, here is your task: {msgs[-1]['content']}
+ Your answer should start with a [PYTHON] tag and end with a [/PYTHON] tag.
+ [/INST]"""
+ )
diff --git a/continuedev/src/continuedev/libs/llm/together.py b/continuedev/src/continuedev/libs/llm/together.py
index 4baf0b6c..ddae91a9 100644
--- a/continuedev/src/continuedev/libs/llm/together.py
+++ b/continuedev/src/continuedev/libs/llm/together.py
@@ -6,6 +6,7 @@ import aiohttp
from ...core.main import ChatMessage
from ..llm import LLM
from ..util.count_tokens import DEFAULT_ARGS, compile_chat_messages, count_tokens
+from .prompts.chat import llama2_template_messages
class TogetherLLM(LLM):
@@ -41,20 +42,6 @@ class TogetherLLM(LLM):
def count_tokens(self, text: str):
return count_tokens(self.name, text)
- def convert_to_prompt(self, chat_messages: List[ChatMessage]) -> str:
- system_message = None
- if chat_messages[0]["role"] == "system":
- system_message = chat_messages.pop(0)["content"]
-
- prompt = "\n"
- if system_message:
- prompt += f"<human>: Hi!\n<bot>: {system_message}\n"
- for message in chat_messages:
- prompt += f'<{"human" if message["role"] == "user" else "bot"}>: {message["content"]}\n'
-
- prompt += "<bot>:"
- return prompt
-
async def stream_complete(
self, prompt, with_history: List[ChatMessage] = None, **kwargs
) -> Generator[Union[Any, List, Dict], None, None]:
@@ -75,7 +62,7 @@ class TogetherLLM(LLM):
async with self._client_session.post(
f"{self.base_url}/inference",
- json={"prompt": self.convert_to_prompt(messages), **args},
+ json={"prompt": llama2_template_messages(messages), **args},
headers={"Authorization": f"Bearer {self.api_key}"},
) as resp:
async for line in resp.content.iter_any():
@@ -102,7 +89,7 @@ class TogetherLLM(LLM):
async with self._client_session.post(
f"{self.base_url}/inference",
- json={"prompt": self.convert_to_prompt(messages), **args},
+ json={"prompt": llama2_template_messages(messages), **args},
headers={"Authorization": f"Bearer {self.api_key}"},
) as resp:
async for line in resp.content.iter_chunks():
@@ -141,7 +128,7 @@ class TogetherLLM(LLM):
)
async with self._client_session.post(
f"{self.base_url}/inference",
- json={"prompt": self.convert_to_prompt(messages), **args},
+ json={"prompt": llama2_template_messages(messages), **args},
headers={"Authorization": f"Bearer {self.api_key}"},
) as resp:
try:
diff --git a/docs/docs/customization.md b/docs/docs/customization.md
index a1a9111e..096b42b2 100644
--- a/docs/docs/customization.md
+++ b/docs/docs/customization.md
@@ -2,6 +2,25 @@
Continue can be deeply customized by editing the `ContinueConfig` object in `~/.continue/config.py` on your machine. This file is created the first time you run Continue.
+## Summary of Models
+
+Commercial Models
+
+- [MaybeProxyOpenAI](#adding-an-openai-api-key) - Use gpt-4 or gpt-3.5-turbo free with our API key, or with your API key. gpt-4 is probably the most capable model.
+- [OpenAI](#azure-openai-service) - Use any OpenAI model with your own key. Can also change the base URL if you have a server that uses the OpenAI API format, including using the Azure OpenAI service, LocalAI, etc.
+- [AnthropicLLM](#claude-2) - Use claude-2 with your Anthropic API key. Claude 2 is also highly capable, and has a 100,000 token context window.
+
+Local Models
+
+- [Ollama](#run-llama-2-locally-with-ollama) - If you have a Mac, Ollama is the simplest way to run open-source models like Code Llama.
+- [GGML](#local-models-with-ggml) - Use llama-cpp-python to run a local server with any open-source model.
+- [LlamaCpp](#llama-cpp) - Use llama.cpp directly instead of llama-cpp-python.
+
+Open-Source Models (not local)
+
+- [TogetherLLM](#together) - Use any model from the [Together Models list](https://docs.together.ai/docs/models-inference) with your Together API key.
+- [ReplicateLLM](#replicate) - Use any open-source model from the [Replicate Streaming List](https://replicate.com/collections/streaming-language-models) with your Replicate API key.
+
## Change the default LLM
In `config.py`, you'll find the `models` property:
@@ -18,7 +37,7 @@ config = ContinueConfig(
)
```
-The `default` model is the one used for most operations, including responding to your messages and editing code. The `medium` model is used for summarization tasks that require less quality. There are also `small` and `large` roles that can be filled, but all will fall back to `default` if not set. The values of these fields must be of the [`LLM`](https://github.com/continuedev/continue/blob/main/continuedev/src/continuedev/libs/llm/__init__.py) class, which implements methods for retrieving and streaming completions from an LLM.
+The `default` and `medium` properties are different _model roles_. This allows different models to be used for different tasks. The available roles are `default`, `small`, `medium`, `large`, `edit`, and `chat`. `edit` is used when you use the '/edit' slash command, `chat` is used for all chat responses, and `medium` is used for summarizing. If not set, all roles will fall back to `default`. The values of these fields must be of the [`LLM`](https://github.com/continuedev/continue/blob/main/continuedev/src/continuedev/libs/llm/__init__.py) class, which implements methods for retrieving and streaming completions from an LLM.
Below, we describe the `LLM` classes available in the Continue core library, and how they can be used.
@@ -129,7 +148,7 @@ config = ContinueConfig(
...
models=Models(
default=ReplicateLLM(
- model="replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
+ model="replicate/codellama-13b-instruct:da5676342de1a5a335b848383af297f592b816b950a43d251a0a9edd0113604b",
api_key="my-replicate-api-key")
)
)
@@ -156,7 +175,7 @@ config = ContinueConfig(
default=OpenAI(api_key="my-api-key", model="gpt-3.5-turbo", openai_server_info=OpenAIServerInfo(
api_base="https://my-azure-openai-instance.openai.azure.com/",
engine="my-azure-openai-deployment",
- api_version="2023-03-15-preview", # NOTE: It is recommended not to change api_version. Newer versions may not work correctly.
+ api_version="2023-03-15-preview",
api_type="azure"
))
)
diff --git a/docs/docs/walkthroughs/codellama.md b/docs/docs/walkthroughs/codellama.md
index fdede918..68e99948 100644
--- a/docs/docs/walkthroughs/codellama.md
+++ b/docs/docs/walkthroughs/codellama.md
@@ -31,6 +31,7 @@ config = ContinueConfig(
1. Download Ollama [here](https://ollama.ai/) (it should walk you through the rest of these steps)
2. Open a terminal and run `ollama pull codellama`\*
3. Change your Continue config file to look like this:
+
```python
from continuedev.src.continuedev.libs.llm.ollama import Ollama
@@ -59,7 +60,7 @@ config = ContinueConfig(
...
models=Models(
default=ReplicateLLM(
- model="<CODE_LLAMA_MODEL_ID>",
+ model="replicate/codellama-13b-instruct:da5676342de1a5a335b848383af297f592b816b950a43d251a0a9edd0113604b",
api_key="<MY_REPLICATE_API_KEY>")
)
)
diff --git a/extension/package-lock.json b/extension/package-lock.json
index 9a7d23bf..6e318d6d 100644
--- a/extension/package-lock.json
+++ b/extension/package-lock.json
@@ -44,6 +44,7 @@
"glob": "^8.0.3",
"json-schema-to-typescript": "^12.0.0",
"mocha": "^10.1.0",
+ "ovsx": "^0.8.3",
"ts-jest": "^29.1.1",
"typescript": "^4.9.3",
"vite": "^4.3.9",
@@ -2758,6 +2759,147 @@
"node": ">=16"
}
},
+ "node_modules/@vscode/vsce": {
+ "version": "2.20.1",
+ "resolved": "https://registry.npmjs.org/@vscode/vsce/-/vsce-2.20.1.tgz",
+ "integrity": "sha512-ilbvoqvR/1/zseRPBAzYR6aKqSJ+jvda4/BqIwOqTxajpvLtEpK3kMLs77+dJdrlygS+VrP7Yhad8j0ukyD96g==",
+ "dev": true,
+ "dependencies": {
+ "azure-devops-node-api": "^11.0.1",
+ "chalk": "^2.4.2",
+ "cheerio": "^1.0.0-rc.9",
+ "commander": "^6.1.0",
+ "glob": "^7.0.6",
+ "hosted-git-info": "^4.0.2",
+ "jsonc-parser": "^3.2.0",
+ "leven": "^3.1.0",
+ "markdown-it": "^12.3.2",
+ "mime": "^1.3.4",
+ "minimatch": "^3.0.3",
+ "parse-semver": "^1.1.1",
+ "read": "^1.0.7",
+ "semver": "^7.5.2",
+ "tmp": "^0.2.1",
+ "typed-rest-client": "^1.8.4",
+ "url-join": "^4.0.1",
+ "xml2js": "^0.5.0",
+ "yauzl": "^2.3.1",
+ "yazl": "^2.2.2"
+ },
+ "bin": {
+ "vsce": "vsce"
+ },
+ "engines": {
+ "node": ">= 14"
+ },
+ "optionalDependencies": {
+ "keytar": "^7.7.0"
+ }
+ },
+ "node_modules/@vscode/vsce/node_modules/ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "dev": true,
+ "dependencies": {
+ "color-convert": "^1.9.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@vscode/vsce/node_modules/chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@vscode/vsce/node_modules/color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "dev": true,
+ "dependencies": {
+ "color-name": "1.1.3"
+ }
+ },
+ "node_modules/@vscode/vsce/node_modules/color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
+ "dev": true
+ },
+ "node_modules/@vscode/vsce/node_modules/escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/@vscode/vsce/node_modules/glob": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "dev": true,
+ "dependencies": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.1.1",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ },
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/@vscode/vsce/node_modules/has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@vscode/vsce/node_modules/supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "dev": true,
+ "dependencies": {
+ "has-flag": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@vscode/vsce/node_modules/xml2js": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.5.0.tgz",
+ "integrity": "sha512-drPFnkQJik/O+uPKpqSgr22mpuFHqKdbS835iAQrUC73L2F5WkboIRd63ai/2Yg6I1jzifPFKH2NTK+cfglkIA==",
+ "dev": true,
+ "dependencies": {
+ "sax": ">=0.6.0",
+ "xmlbuilder": "~11.0.0"
+ },
+ "engines": {
+ "node": ">=4.0.0"
+ }
+ },
"node_modules/abbrev": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
@@ -6003,6 +6145,24 @@
"node": ">=4"
}
},
+ "node_modules/is-ci": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz",
+ "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==",
+ "dev": true,
+ "dependencies": {
+ "ci-info": "^2.0.0"
+ },
+ "bin": {
+ "is-ci": "bin.js"
+ }
+ },
+ "node_modules/is-ci/node_modules/ci-info": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz",
+ "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==",
+ "dev": true
+ },
"node_modules/is-core-module": {
"version": "2.12.1",
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.1.tgz",
@@ -7097,6 +7257,12 @@
"node": ">=6"
}
},
+ "node_modules/jsonc-parser": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.2.0.tgz",
+ "integrity": "sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==",
+ "dev": true
+ },
"node_modules/jsonfile": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
@@ -8637,6 +8803,27 @@
"node": ">=0.10.0"
}
},
+ "node_modules/ovsx": {
+ "version": "0.8.3",
+ "resolved": "https://registry.npmjs.org/ovsx/-/ovsx-0.8.3.tgz",
+ "integrity": "sha512-LG7wTzy4eYV/KolFeO4AwWPzQSARvCONzd5oHQlNvYOlji2r/zjbdK8pyObZN84uZlk6rQBWrJrAdJfh/SX0Hg==",
+ "dev": true,
+ "dependencies": {
+ "@vscode/vsce": "^2.19.0",
+ "commander": "^6.1.0",
+ "follow-redirects": "^1.14.6",
+ "is-ci": "^2.0.0",
+ "leven": "^3.1.0",
+ "semver": "^7.5.2",
+ "tmp": "^0.2.1"
+ },
+ "bin": {
+ "ovsx": "lib/ovsx"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
"node_modules/p-cancelable": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-2.1.1.tgz",
@@ -13672,6 +13859,117 @@
"unzipper": "^0.10.11"
}
},
+ "@vscode/vsce": {
+ "version": "2.20.1",
+ "resolved": "https://registry.npmjs.org/@vscode/vsce/-/vsce-2.20.1.tgz",
+ "integrity": "sha512-ilbvoqvR/1/zseRPBAzYR6aKqSJ+jvda4/BqIwOqTxajpvLtEpK3kMLs77+dJdrlygS+VrP7Yhad8j0ukyD96g==",
+ "dev": true,
+ "requires": {
+ "azure-devops-node-api": "^11.0.1",
+ "chalk": "^2.4.2",
+ "cheerio": "^1.0.0-rc.9",
+ "commander": "^6.1.0",
+ "glob": "^7.0.6",
+ "hosted-git-info": "^4.0.2",
+ "jsonc-parser": "^3.2.0",
+ "keytar": "^7.7.0",
+ "leven": "^3.1.0",
+ "markdown-it": "^12.3.2",
+ "mime": "^1.3.4",
+ "minimatch": "^3.0.3",
+ "parse-semver": "^1.1.1",
+ "read": "^1.0.7",
+ "semver": "^7.5.2",
+ "tmp": "^0.2.1",
+ "typed-rest-client": "^1.8.4",
+ "url-join": "^4.0.1",
+ "xml2js": "^0.5.0",
+ "yauzl": "^2.3.1",
+ "yazl": "^2.2.2"
+ },
+ "dependencies": {
+ "ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "dev": true,
+ "requires": {
+ "color-convert": "^1.9.0"
+ }
+ },
+ "chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ }
+ },
+ "color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "dev": true,
+ "requires": {
+ "color-name": "1.1.3"
+ }
+ },
+ "color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
+ "dev": true
+ },
+ "escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
+ "dev": true
+ },
+ "glob": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "dev": true,
+ "requires": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.1.1",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ },
+ "has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
+ "dev": true
+ },
+ "supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ },
+ "xml2js": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.5.0.tgz",
+ "integrity": "sha512-drPFnkQJik/O+uPKpqSgr22mpuFHqKdbS835iAQrUC73L2F5WkboIRd63ai/2Yg6I1jzifPFKH2NTK+cfglkIA==",
+ "dev": true,
+ "requires": {
+ "sax": ">=0.6.0",
+ "xmlbuilder": "~11.0.0"
+ }
+ }
+ }
+ },
"abbrev": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
@@ -16066,6 +16364,23 @@
"resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz",
"integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ=="
},
+ "is-ci": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz",
+ "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==",
+ "dev": true,
+ "requires": {
+ "ci-info": "^2.0.0"
+ },
+ "dependencies": {
+ "ci-info": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz",
+ "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==",
+ "dev": true
+ }
+ }
+ },
"is-core-module": {
"version": "2.12.1",
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.1.tgz",
@@ -16908,6 +17223,12 @@
"integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
"dev": true
},
+ "jsonc-parser": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.2.0.tgz",
+ "integrity": "sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==",
+ "dev": true
+ },
"jsonfile": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
@@ -17978,6 +18299,21 @@
"integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==",
"dev": true
},
+ "ovsx": {
+ "version": "0.8.3",
+ "resolved": "https://registry.npmjs.org/ovsx/-/ovsx-0.8.3.tgz",
+ "integrity": "sha512-LG7wTzy4eYV/KolFeO4AwWPzQSARvCONzd5oHQlNvYOlji2r/zjbdK8pyObZN84uZlk6rQBWrJrAdJfh/SX0Hg==",
+ "dev": true,
+ "requires": {
+ "@vscode/vsce": "^2.19.0",
+ "commander": "^6.1.0",
+ "follow-redirects": "^1.14.6",
+ "is-ci": "^2.0.0",
+ "leven": "^3.1.0",
+ "semver": "^7.5.2",
+ "tmp": "^0.2.1"
+ }
+ },
"p-cancelable": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-2.1.1.tgz",
diff --git a/extension/package.json b/extension/package.json
index 2aaa4ff3..8224cfea 100644
--- a/extension/package.json
+++ b/extension/package.json
@@ -230,6 +230,7 @@
"glob": "^8.0.3",
"json-schema-to-typescript": "^12.0.0",
"mocha": "^10.1.0",
+ "ovsx": "^0.8.3",
"ts-jest": "^29.1.1",
"typescript": "^4.9.3",
"vite": "^4.3.9",
diff --git a/extension/src/activation/environmentSetup.ts b/extension/src/activation/environmentSetup.ts
index 2067f0fb..6b434756 100644
--- a/extension/src/activation/environmentSetup.ts
+++ b/extension/src/activation/environmentSetup.ts
@@ -330,8 +330,11 @@ export async function startContinuePythonServer(redownload: boolean = true) {
child.unref();
}
} catch (e: any) {
- console.log("Error starting server:", e);
- retry(e);
+ if (attempts < maxAttempts) {
+ retry(e);
+ } else {
+ throw e;
+ }
}
};