summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTuowen Zhao <ztuowen@gmail.com>2023-10-19 00:20:03 -0700
committerTuowen Zhao <ztuowen@gmail.com>2023-10-19 00:20:03 -0700
commit7f487f3bde49398f52595f3e062a329dae898039 (patch)
tree065bdad448797ba3bc97389303ca18c3856af542
parent770d34113daa4ff8cb6fc6fa2ddaa37351750060 (diff)
downloadsncontinue-7f487f3bde49398f52595f3e062a329dae898039.tar.gz
sncontinue-7f487f3bde49398f52595f3e062a329dae898039.tar.bz2
sncontinue-7f487f3bde49398f52595f3e062a329dae898039.zip
update schema
-rw-r--r--extension/schema/ContinueConfig.d.ts53
-rw-r--r--extension/schema/LLM.d.ts49
-rw-r--r--extension/schema/Models.d.ts49
-rw-r--r--schema/json/ContinueConfig.json97
-rw-r--r--schema/json/LLM.json61
-rw-r--r--schema/json/Models.json57
6 files changed, 272 insertions, 94 deletions
diff --git a/extension/schema/ContinueConfig.d.ts b/extension/schema/ContinueConfig.d.ts
index b9eb92ff..7a4bdf8f 100644
--- a/extension/schema/ContinueConfig.d.ts
+++ b/extension/schema/ContinueConfig.d.ts
@@ -40,6 +40,14 @@ export type Models = Models1;
*/
export type Title = string;
/**
+ * The unique ID of the user.
+ */
+export type UniqueId = string;
+/**
+ * The name of the model to be used (e.g. gpt-4, codellama)
+ */
+export type Model = string;
+/**
* A system message that will always be followed by the LLM
*/
export type SystemMessage1 = string;
@@ -48,21 +56,29 @@ export type SystemMessage1 = string;
*/
export type ContextLength = number;
/**
- * The unique ID of the user.
+ * Tokens that will stop the completion.
*/
-export type UniqueId = string;
+export type StopTokens = string[];
/**
- * The name of the model to be used (e.g. gpt-4, codellama)
+ * The temperature of the completion.
*/
-export type Model = string;
+export type Temperature = number;
/**
- * The maximum number of tokens to generate.
+ * The top_p of the completion.
*/
-export type MaxTokens = number;
+export type TopP = number;
/**
- * Tokens that will stop the completion.
+ * The top_k of the completion.
*/
-export type StopTokens = string[];
+export type TopK = number;
+/**
+ * The presence penalty Aof the completion.
+ */
+export type PresencePenalty = number;
+/**
+ * The frequency penalty of the completion.
+ */
+export type FrequencyPenalty = number;
/**
* Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.
*/
@@ -87,7 +103,7 @@ export type Saved = LLM[];
/**
* The temperature parameter for sampling from the LLM. Higher temperatures will result in more random output, while lower temperatures will result in more predictable output. This value ranges from 0 to 1.
*/
-export type Temperature = number;
+export type Temperature1 = number;
export type Name3 = string;
export type Prompt = string;
export type Description1 = string;
@@ -173,7 +189,7 @@ export interface ContinueConfig1 {
disallowed_steps?: DisallowedSteps;
allow_anonymous_telemetry?: AllowAnonymousTelemetry;
models?: Models;
- temperature?: Temperature;
+ temperature?: Temperature1;
custom_commands?: CustomCommands;
slash_commands?: SlashCommands;
on_traceback?: OnTraceback;
@@ -222,21 +238,32 @@ export interface Models1 {
}
export interface LLM {
title?: Title;
- system_message?: SystemMessage1;
- context_length?: ContextLength;
unique_id?: UniqueId;
model: Model;
- max_tokens?: MaxTokens;
+ system_message?: SystemMessage1;
+ context_length?: ContextLength;
stop_tokens?: StopTokens;
+ temperature?: Temperature;
+ top_p?: TopP;
+ top_k?: TopK;
+ presence_penalty?: PresencePenalty;
+ frequency_penalty?: FrequencyPenalty;
timeout?: Timeout;
verify_ssl?: VerifySsl;
ca_bundle_path?: CaBundlePath;
proxy?: Proxy;
+ headers?: Headers;
prompt_templates?: PromptTemplates;
api_key?: ApiKey;
[k: string]: unknown;
}
/**
+ * Headers to use when making the HTTP request
+ */
+export interface Headers {
+ [k: string]: string;
+}
+/**
* A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the "edit" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.
*/
export interface PromptTemplates {
diff --git a/extension/schema/LLM.d.ts b/extension/schema/LLM.d.ts
index fbd5aa41..f17f969a 100644
--- a/extension/schema/LLM.d.ts
+++ b/extension/schema/LLM.d.ts
@@ -11,6 +11,14 @@ export type LLM = LLM1;
*/
export type Title = string;
/**
+ * The unique ID of the user.
+ */
+export type UniqueId = string;
+/**
+ * The name of the model to be used (e.g. gpt-4, codellama)
+ */
+export type Model = string;
+/**
* A system message that will always be followed by the LLM
*/
export type SystemMessage = string;
@@ -19,21 +27,29 @@ export type SystemMessage = string;
*/
export type ContextLength = number;
/**
- * The unique ID of the user.
+ * Tokens that will stop the completion.
*/
-export type UniqueId = string;
+export type StopTokens = string[];
/**
- * The name of the model to be used (e.g. gpt-4, codellama)
+ * The temperature of the completion.
*/
-export type Model = string;
+export type Temperature = number;
/**
- * The maximum number of tokens to generate.
+ * The top_p of the completion.
*/
-export type MaxTokens = number;
+export type TopP = number;
/**
- * Tokens that will stop the completion.
+ * The top_k of the completion.
*/
-export type StopTokens = string[];
+export type TopK = number;
+/**
+ * The presence penalty Aof the completion.
+ */
+export type PresencePenalty = number;
+/**
+ * The frequency penalty of the completion.
+ */
+export type FrequencyPenalty = number;
/**
* Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.
*/
@@ -57,21 +73,32 @@ export type ApiKey = string;
export interface LLM1 {
title?: Title;
- system_message?: SystemMessage;
- context_length?: ContextLength;
unique_id?: UniqueId;
model: Model;
- max_tokens?: MaxTokens;
+ system_message?: SystemMessage;
+ context_length?: ContextLength;
stop_tokens?: StopTokens;
+ temperature?: Temperature;
+ top_p?: TopP;
+ top_k?: TopK;
+ presence_penalty?: PresencePenalty;
+ frequency_penalty?: FrequencyPenalty;
timeout?: Timeout;
verify_ssl?: VerifySsl;
ca_bundle_path?: CaBundlePath;
proxy?: Proxy;
+ headers?: Headers;
prompt_templates?: PromptTemplates;
api_key?: ApiKey;
[k: string]: unknown;
}
/**
+ * Headers to use when making the HTTP request
+ */
+export interface Headers {
+ [k: string]: string;
+}
+/**
* A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the "edit" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.
*/
export interface PromptTemplates {
diff --git a/extension/schema/Models.d.ts b/extension/schema/Models.d.ts
index 9ce79b60..a74c94bf 100644
--- a/extension/schema/Models.d.ts
+++ b/extension/schema/Models.d.ts
@@ -11,6 +11,14 @@ export type Models = Models1;
*/
export type Title = string;
/**
+ * The unique ID of the user.
+ */
+export type UniqueId = string;
+/**
+ * The name of the model to be used (e.g. gpt-4, codellama)
+ */
+export type Model = string;
+/**
* A system message that will always be followed by the LLM
*/
export type SystemMessage = string;
@@ -19,21 +27,29 @@ export type SystemMessage = string;
*/
export type ContextLength = number;
/**
- * The unique ID of the user.
+ * Tokens that will stop the completion.
*/
-export type UniqueId = string;
+export type StopTokens = string[];
/**
- * The name of the model to be used (e.g. gpt-4, codellama)
+ * The temperature of the completion.
*/
-export type Model = string;
+export type Temperature = number;
/**
- * The maximum number of tokens to generate.
+ * The top_p of the completion.
*/
-export type MaxTokens = number;
+export type TopP = number;
/**
- * Tokens that will stop the completion.
+ * The top_k of the completion.
*/
-export type StopTokens = string[];
+export type TopK = number;
+/**
+ * The presence penalty Aof the completion.
+ */
+export type PresencePenalty = number;
+/**
+ * The frequency penalty of the completion.
+ */
+export type FrequencyPenalty = number;
/**
* Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.
*/
@@ -70,21 +86,32 @@ export interface Models1 {
}
export interface LLM {
title?: Title;
- system_message?: SystemMessage;
- context_length?: ContextLength;
unique_id?: UniqueId;
model: Model;
- max_tokens?: MaxTokens;
+ system_message?: SystemMessage;
+ context_length?: ContextLength;
stop_tokens?: StopTokens;
+ temperature?: Temperature;
+ top_p?: TopP;
+ top_k?: TopK;
+ presence_penalty?: PresencePenalty;
+ frequency_penalty?: FrequencyPenalty;
timeout?: Timeout;
verify_ssl?: VerifySsl;
ca_bundle_path?: CaBundlePath;
proxy?: Proxy;
+ headers?: Headers;
prompt_templates?: PromptTemplates;
api_key?: ApiKey;
[k: string]: unknown;
}
/**
+ * Headers to use when making the HTTP request
+ */
+export interface Headers {
+ [k: string]: string;
+}
+/**
* A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the "edit" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.
*/
export interface PromptTemplates {
diff --git a/schema/json/ContinueConfig.json b/schema/json/ContinueConfig.json
index 74d24e2f..ab3834f8 100644
--- a/schema/json/ContinueConfig.json
+++ b/schema/json/ContinueConfig.json
@@ -105,17 +105,6 @@
"description": "A title that will identify this model in the model selection dropdown",
"type": "string"
},
- "system_message": {
- "title": "System Message",
- "description": "A system message that will always be followed by the LLM",
- "type": "string"
- },
- "context_length": {
- "title": "Context Length",
- "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.",
- "default": 2048,
- "type": "integer"
- },
"unique_id": {
"title": "Unique Id",
"description": "The unique ID of the user.",
@@ -126,10 +115,15 @@
"description": "The name of the model to be used (e.g. gpt-4, codellama)",
"type": "string"
},
- "max_tokens": {
- "title": "Max Tokens",
- "description": "The maximum number of tokens to generate.",
- "default": 1024,
+ "system_message": {
+ "title": "System Message",
+ "description": "A system message that will always be followed by the LLM",
+ "type": "string"
+ },
+ "context_length": {
+ "title": "Context Length",
+ "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.",
+ "default": 2048,
"type": "integer"
},
"stop_tokens": {
@@ -140,6 +134,31 @@
"type": "string"
}
},
+ "temperature": {
+ "title": "Temperature",
+ "description": "The temperature of the completion.",
+ "type": "number"
+ },
+ "top_p": {
+ "title": "Top P",
+ "description": "The top_p of the completion.",
+ "type": "number"
+ },
+ "top_k": {
+ "title": "Top K",
+ "description": "The top_k of the completion.",
+ "type": "integer"
+ },
+ "presence_penalty": {
+ "title": "Presence Penalty",
+ "description": "The presence penalty Aof the completion.",
+ "type": "number"
+ },
+ "frequency_penalty": {
+ "title": "Frequency Penalty",
+ "description": "The frequency penalty of the completion.",
+ "type": "number"
+ },
"timeout": {
"title": "Timeout",
"description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.",
@@ -161,6 +180,14 @@
"description": "Proxy URL to use when making the HTTP request",
"type": "string"
},
+ "headers": {
+ "title": "Headers",
+ "description": "Headers to use when making the HTTP request",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
"prompt_templates": {
"title": "Prompt Templates",
"description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.",
@@ -438,36 +465,52 @@
"description": "Configuration for the models used by Continue. Read more about how to configure models in the documentation.",
"default": {
"default": {
- "title": null,
+ "title": "CodeLlama-7b-Instruct",
+ "model": "codellama:7b-instruct",
"system_message": null,
"context_length": 2048,
- "model": "gpt-4",
- "max_tokens": 1024,
"stop_tokens": null,
+ "temperature": null,
+ "top_p": null,
+ "top_k": null,
+ "presence_penalty": null,
+ "frequency_penalty": null,
"timeout": 300,
"verify_ssl": null,
"ca_bundle_path": null,
"proxy": null,
- "prompt_templates": {},
+ "headers": null,
+ "prompt_templates": {
+ "edit": "Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags."
+ },
+ "template_messages": "llama2_template_messages",
"api_key": null,
- "llm": null,
- "class_name": "OpenAIFreeTrial"
+ "server_url": "http://localhost:11434",
+ "class_name": "Ollama"
},
"summarize": {
- "title": null,
+ "title": "CodeLlama-7b-Instruct",
+ "model": "codellama:7b-instruct",
"system_message": null,
"context_length": 2048,
- "model": "gpt-3.5-turbo",
- "max_tokens": 1024,
"stop_tokens": null,
+ "temperature": null,
+ "top_p": null,
+ "top_k": null,
+ "presence_penalty": null,
+ "frequency_penalty": null,
"timeout": 300,
"verify_ssl": null,
"ca_bundle_path": null,
"proxy": null,
- "prompt_templates": {},
+ "headers": null,
+ "prompt_templates": {
+ "edit": "Consider the following code:\n```\n{{{code_to_edit}}}\n```\nEdit the code to perfectly satisfy the following user request:\n{{{user_input}}}\nOutput nothing except for the code. No code block, no English explanation, no start/end tags."
+ },
+ "template_messages": "llama2_template_messages",
"api_key": null,
- "llm": null,
- "class_name": "OpenAIFreeTrial"
+ "server_url": "http://localhost:11434",
+ "class_name": "Ollama"
},
"edit": null,
"chat": null,
diff --git a/schema/json/LLM.json b/schema/json/LLM.json
index 30d4d684..cde480e8 100644
--- a/schema/json/LLM.json
+++ b/schema/json/LLM.json
@@ -1,8 +1,8 @@
{
"title": "LLM",
- "$ref": "#/definitions/continuedev__libs__llm__LLM",
+ "$ref": "#/definitions/continuedev__libs__llm__base__LLM",
"definitions": {
- "continuedev__libs__llm__LLM": {
+ "continuedev__libs__llm__base__LLM": {
"title": "LLM",
"type": "object",
"properties": {
@@ -11,17 +11,6 @@
"description": "A title that will identify this model in the model selection dropdown",
"type": "string"
},
- "system_message": {
- "title": "System Message",
- "description": "A system message that will always be followed by the LLM",
- "type": "string"
- },
- "context_length": {
- "title": "Context Length",
- "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.",
- "default": 2048,
- "type": "integer"
- },
"unique_id": {
"title": "Unique Id",
"description": "The unique ID of the user.",
@@ -32,10 +21,15 @@
"description": "The name of the model to be used (e.g. gpt-4, codellama)",
"type": "string"
},
- "max_tokens": {
- "title": "Max Tokens",
- "description": "The maximum number of tokens to generate.",
- "default": 1024,
+ "system_message": {
+ "title": "System Message",
+ "description": "A system message that will always be followed by the LLM",
+ "type": "string"
+ },
+ "context_length": {
+ "title": "Context Length",
+ "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.",
+ "default": 2048,
"type": "integer"
},
"stop_tokens": {
@@ -46,6 +40,31 @@
"type": "string"
}
},
+ "temperature": {
+ "title": "Temperature",
+ "description": "The temperature of the completion.",
+ "type": "number"
+ },
+ "top_p": {
+ "title": "Top P",
+ "description": "The top_p of the completion.",
+ "type": "number"
+ },
+ "top_k": {
+ "title": "Top K",
+ "description": "The top_k of the completion.",
+ "type": "integer"
+ },
+ "presence_penalty": {
+ "title": "Presence Penalty",
+ "description": "The presence penalty Aof the completion.",
+ "type": "number"
+ },
+ "frequency_penalty": {
+ "title": "Frequency Penalty",
+ "description": "The frequency penalty of the completion.",
+ "type": "number"
+ },
"timeout": {
"title": "Timeout",
"description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.",
@@ -67,6 +86,14 @@
"description": "Proxy URL to use when making the HTTP request",
"type": "string"
},
+ "headers": {
+ "title": "Headers",
+ "description": "Headers to use when making the HTTP request",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
"prompt_templates": {
"title": "Prompt Templates",
"description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.",
diff --git a/schema/json/Models.json b/schema/json/Models.json
index 19044d93..361ae9eb 100644
--- a/schema/json/Models.json
+++ b/schema/json/Models.json
@@ -11,17 +11,6 @@
"description": "A title that will identify this model in the model selection dropdown",
"type": "string"
},
- "system_message": {
- "title": "System Message",
- "description": "A system message that will always be followed by the LLM",
- "type": "string"
- },
- "context_length": {
- "title": "Context Length",
- "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.",
- "default": 2048,
- "type": "integer"
- },
"unique_id": {
"title": "Unique Id",
"description": "The unique ID of the user.",
@@ -32,10 +21,15 @@
"description": "The name of the model to be used (e.g. gpt-4, codellama)",
"type": "string"
},
- "max_tokens": {
- "title": "Max Tokens",
- "description": "The maximum number of tokens to generate.",
- "default": 1024,
+ "system_message": {
+ "title": "System Message",
+ "description": "A system message that will always be followed by the LLM",
+ "type": "string"
+ },
+ "context_length": {
+ "title": "Context Length",
+ "description": "The maximum context length of the LLM in tokens, as counted by count_tokens.",
+ "default": 2048,
"type": "integer"
},
"stop_tokens": {
@@ -46,6 +40,31 @@
"type": "string"
}
},
+ "temperature": {
+ "title": "Temperature",
+ "description": "The temperature of the completion.",
+ "type": "number"
+ },
+ "top_p": {
+ "title": "Top P",
+ "description": "The top_p of the completion.",
+ "type": "number"
+ },
+ "top_k": {
+ "title": "Top K",
+ "description": "The top_k of the completion.",
+ "type": "integer"
+ },
+ "presence_penalty": {
+ "title": "Presence Penalty",
+ "description": "The presence penalty Aof the completion.",
+ "type": "number"
+ },
+ "frequency_penalty": {
+ "title": "Frequency Penalty",
+ "description": "The frequency penalty of the completion.",
+ "type": "number"
+ },
"timeout": {
"title": "Timeout",
"description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.",
@@ -67,6 +86,14 @@
"description": "Proxy URL to use when making the HTTP request",
"type": "string"
},
+ "headers": {
+ "title": "Headers",
+ "description": "Headers to use when making the HTTP request",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
"prompt_templates": {
"title": "Prompt Templates",
"description": "A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the \"edit\" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.",