summaryrefslogtreecommitdiff
path: root/schema/json/ContinueConfig.json
diff options
context:
space:
mode:
authorNate Sesti <33237525+sestinj@users.noreply.github.com>2023-10-09 18:37:27 -0700
committerGitHub <noreply@github.com>2023-10-09 18:37:27 -0700
commitf09150617ed2454f3074bcf93f53aae5ae637d40 (patch)
tree5cfe614a64d921dfe58b049f426d67a8b832c71f /schema/json/ContinueConfig.json
parent985304a213f620cdff3f8f65f74ed7e3b79be29d (diff)
downloadsncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.gz
sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.bz2
sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.zip
Preview (#541)
* Strong typing (#533) * refactor: :recycle: get rid of continuedev.src.continuedev structure * refactor: :recycle: switching back to server folder * feat: :sparkles: make config.py imports shorter * feat: :bookmark: publish as pre-release vscode extension * refactor: :recycle: refactor and add more completion params to ui * build: :building_construction: download from preview S3 * fix: :bug: fix paths * fix: :green_heart: package:pre-release * ci: :green_heart: more time for tests * fix: :green_heart: fix build scripts * fix: :bug: fix import in run.py * fix: :bookmark: update version to try again * ci: 💚 Update package.json version [skip ci] * refactor: :fire: don't check for old extensions version * fix: :bug: small bug fixes * fix: :bug: fix config.py import paths * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: platform-specific builds test #1 * feat: :green_heart: ship with binary * fix: :green_heart: fix copy statement to include.exe for windows * fix: :green_heart: cd extension before packaging * chore: :loud_sound: count tokens generated * fix: :green_heart: remove npm_config_arch * fix: :green_heart: publish as pre-release! * chore: :bookmark: update version * perf: :green_heart: hardcode distro paths * fix: :bug: fix yaml syntax error * chore: :bookmark: update version * fix: :green_heart: update permissions and version * feat: :bug: kill old server if needed * feat: :lipstick: update marketplace icon for pre-release * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: auto-reload for config.py * feat: :wrench: update default config.py imports * feat: :sparkles: codelens in config.py * feat: :sparkles: select model param count from UI * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: more model options, ollama error handling * perf: :zap: don't show server loading immediately * fix: :bug: fixing small UI details * ci: 💚 Update package.json version [skip ci] * feat: :rocket: headers param on LLM class * fix: :bug: fix headers for openai.;y * feat: :sparkles: highlight code on cmd+shift+L * ci: 💚 Update package.json version [skip ci] * feat: :lipstick: sticky top bar in gui.tsx * fix: :loud_sound: websocket logging and horizontal scrollbar * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: allow AzureOpenAI Service through GGML * ci: 💚 Update package.json version [skip ci] * fix: :bug: fix automigration * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: upload binaries in ci, download apple silicon * chore: :fire: remove notes * fix: :green_heart: use curl to download binary * fix: :green_heart: set permissions on apple silicon binary * fix: :green_heart: testing * fix: :green_heart: cleanup file * fix: :green_heart: fix preview.yaml * fix: :green_heart: only upload once per binary * fix: :green_heart: install rosetta * ci: :green_heart: download binary after tests * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: prepare ci for merge to main --------- Co-authored-by: GitHub Action <action@github.com>
Diffstat (limited to 'schema/json/ContinueConfig.json')
-rw-r--r--schema/json/ContinueConfig.json20
1 files changed, 14 insertions, 6 deletions
diff --git a/schema/json/ContinueConfig.json b/schema/json/ContinueConfig.json
index e78bb3c9..74d24e2f 100644
--- a/schema/json/ContinueConfig.json
+++ b/schema/json/ContinueConfig.json
@@ -1,6 +1,6 @@
{
"title": "ContinueConfig",
- "$ref": "#/definitions/src__continuedev__core__config__ContinueConfig",
+ "$ref": "#/definitions/continuedev__core__config__ContinueConfig",
"definitions": {
"FunctionCall": {
"title": "FunctionCall",
@@ -126,6 +126,12 @@
"description": "The name of the model to be used (e.g. gpt-4, codellama)",
"type": "string"
},
+ "max_tokens": {
+ "title": "Max Tokens",
+ "description": "The maximum number of tokens to generate.",
+ "default": 1024,
+ "type": "integer"
+ },
"stop_tokens": {
"title": "Stop Tokens",
"description": "Tokens that will stop the completion.",
@@ -171,7 +177,7 @@
"model"
]
},
- "src__continuedev__core__models__ContinueSDK": {
+ "continuedev__core__models__ContinueSDK": {
"title": "ContinueSDK",
"type": "object",
"properties": {}
@@ -202,7 +208,7 @@
}
},
"sdk": {
- "$ref": "#/definitions/src__continuedev__core__models__ContinueSDK"
+ "$ref": "#/definitions/continuedev__core__models__ContinueSDK"
}
},
"required": [
@@ -265,7 +271,7 @@
"type": "object",
"properties": {}
},
- "src__continuedev__core__context__ContinueSDK": {
+ "continuedev__core__context__ContinueSDK": {
"title": "ContinueSDK",
"description": "To avoid circular imports",
"type": "object",
@@ -356,7 +362,7 @@
"description": "The ContinueSDK instance accessible by the ContextProvider",
"allOf": [
{
- "$ref": "#/definitions/src__continuedev__core__context__ContinueSDK"
+ "$ref": "#/definitions/continuedev__core__context__ContinueSDK"
}
]
},
@@ -398,7 +404,7 @@
"dynamic"
]
},
- "src__continuedev__core__config__ContinueConfig": {
+ "continuedev__core__config__ContinueConfig": {
"title": "ContinueConfig",
"description": "Continue can be deeply customized by editing the `ContinueConfig` object in `~/.continue/config.py` (`%userprofile%\\.continue\\config.py` for Windows) on your machine. This class is instantiated from the config file for every new session.",
"type": "object",
@@ -436,6 +442,7 @@
"system_message": null,
"context_length": 2048,
"model": "gpt-4",
+ "max_tokens": 1024,
"stop_tokens": null,
"timeout": 300,
"verify_ssl": null,
@@ -451,6 +458,7 @@
"system_message": null,
"context_length": 2048,
"model": "gpt-3.5-turbo",
+ "max_tokens": 1024,
"stop_tokens": null,
"timeout": 300,
"verify_ssl": null,