summaryrefslogtreecommitdiff
path: root/extension/schema
diff options
context:
space:
mode:
Diffstat (limited to 'extension/schema')
-rw-r--r--extension/schema/ContinueConfig.d.ts53
-rw-r--r--extension/schema/LLM.d.ts49
-rw-r--r--extension/schema/Models.d.ts49
3 files changed, 116 insertions, 35 deletions
diff --git a/extension/schema/ContinueConfig.d.ts b/extension/schema/ContinueConfig.d.ts
index b9eb92ff..7a4bdf8f 100644
--- a/extension/schema/ContinueConfig.d.ts
+++ b/extension/schema/ContinueConfig.d.ts
@@ -40,6 +40,14 @@ export type Models = Models1;
*/
export type Title = string;
/**
+ * The unique ID of the user.
+ */
+export type UniqueId = string;
+/**
+ * The name of the model to be used (e.g. gpt-4, codellama)
+ */
+export type Model = string;
+/**
* A system message that will always be followed by the LLM
*/
export type SystemMessage1 = string;
@@ -48,21 +56,29 @@ export type SystemMessage1 = string;
*/
export type ContextLength = number;
/**
- * The unique ID of the user.
+ * Tokens that will stop the completion.
*/
-export type UniqueId = string;
+export type StopTokens = string[];
/**
- * The name of the model to be used (e.g. gpt-4, codellama)
+ * The temperature of the completion.
*/
-export type Model = string;
+export type Temperature = number;
/**
- * The maximum number of tokens to generate.
+ * The top_p of the completion.
*/
-export type MaxTokens = number;
+export type TopP = number;
/**
- * Tokens that will stop the completion.
+ * The top_k of the completion.
*/
-export type StopTokens = string[];
+export type TopK = number;
+/**
+ * The presence penalty Aof the completion.
+ */
+export type PresencePenalty = number;
+/**
+ * The frequency penalty of the completion.
+ */
+export type FrequencyPenalty = number;
/**
* Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.
*/
@@ -87,7 +103,7 @@ export type Saved = LLM[];
/**
* The temperature parameter for sampling from the LLM. Higher temperatures will result in more random output, while lower temperatures will result in more predictable output. This value ranges from 0 to 1.
*/
-export type Temperature = number;
+export type Temperature1 = number;
export type Name3 = string;
export type Prompt = string;
export type Description1 = string;
@@ -173,7 +189,7 @@ export interface ContinueConfig1 {
disallowed_steps?: DisallowedSteps;
allow_anonymous_telemetry?: AllowAnonymousTelemetry;
models?: Models;
- temperature?: Temperature;
+ temperature?: Temperature1;
custom_commands?: CustomCommands;
slash_commands?: SlashCommands;
on_traceback?: OnTraceback;
@@ -222,21 +238,32 @@ export interface Models1 {
}
export interface LLM {
title?: Title;
- system_message?: SystemMessage1;
- context_length?: ContextLength;
unique_id?: UniqueId;
model: Model;
- max_tokens?: MaxTokens;
+ system_message?: SystemMessage1;
+ context_length?: ContextLength;
stop_tokens?: StopTokens;
+ temperature?: Temperature;
+ top_p?: TopP;
+ top_k?: TopK;
+ presence_penalty?: PresencePenalty;
+ frequency_penalty?: FrequencyPenalty;
timeout?: Timeout;
verify_ssl?: VerifySsl;
ca_bundle_path?: CaBundlePath;
proxy?: Proxy;
+ headers?: Headers;
prompt_templates?: PromptTemplates;
api_key?: ApiKey;
[k: string]: unknown;
}
/**
+ * Headers to use when making the HTTP request
+ */
+export interface Headers {
+ [k: string]: string;
+}
+/**
* A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the "edit" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.
*/
export interface PromptTemplates {
diff --git a/extension/schema/LLM.d.ts b/extension/schema/LLM.d.ts
index fbd5aa41..f17f969a 100644
--- a/extension/schema/LLM.d.ts
+++ b/extension/schema/LLM.d.ts
@@ -11,6 +11,14 @@ export type LLM = LLM1;
*/
export type Title = string;
/**
+ * The unique ID of the user.
+ */
+export type UniqueId = string;
+/**
+ * The name of the model to be used (e.g. gpt-4, codellama)
+ */
+export type Model = string;
+/**
* A system message that will always be followed by the LLM
*/
export type SystemMessage = string;
@@ -19,21 +27,29 @@ export type SystemMessage = string;
*/
export type ContextLength = number;
/**
- * The unique ID of the user.
+ * Tokens that will stop the completion.
*/
-export type UniqueId = string;
+export type StopTokens = string[];
/**
- * The name of the model to be used (e.g. gpt-4, codellama)
+ * The temperature of the completion.
*/
-export type Model = string;
+export type Temperature = number;
/**
- * The maximum number of tokens to generate.
+ * The top_p of the completion.
*/
-export type MaxTokens = number;
+export type TopP = number;
/**
- * Tokens that will stop the completion.
+ * The top_k of the completion.
*/
-export type StopTokens = string[];
+export type TopK = number;
+/**
+ * The presence penalty Aof the completion.
+ */
+export type PresencePenalty = number;
+/**
+ * The frequency penalty of the completion.
+ */
+export type FrequencyPenalty = number;
/**
* Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.
*/
@@ -57,21 +73,32 @@ export type ApiKey = string;
export interface LLM1 {
title?: Title;
- system_message?: SystemMessage;
- context_length?: ContextLength;
unique_id?: UniqueId;
model: Model;
- max_tokens?: MaxTokens;
+ system_message?: SystemMessage;
+ context_length?: ContextLength;
stop_tokens?: StopTokens;
+ temperature?: Temperature;
+ top_p?: TopP;
+ top_k?: TopK;
+ presence_penalty?: PresencePenalty;
+ frequency_penalty?: FrequencyPenalty;
timeout?: Timeout;
verify_ssl?: VerifySsl;
ca_bundle_path?: CaBundlePath;
proxy?: Proxy;
+ headers?: Headers;
prompt_templates?: PromptTemplates;
api_key?: ApiKey;
[k: string]: unknown;
}
/**
+ * Headers to use when making the HTTP request
+ */
+export interface Headers {
+ [k: string]: string;
+}
+/**
* A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the "edit" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.
*/
export interface PromptTemplates {
diff --git a/extension/schema/Models.d.ts b/extension/schema/Models.d.ts
index 9ce79b60..a74c94bf 100644
--- a/extension/schema/Models.d.ts
+++ b/extension/schema/Models.d.ts
@@ -11,6 +11,14 @@ export type Models = Models1;
*/
export type Title = string;
/**
+ * The unique ID of the user.
+ */
+export type UniqueId = string;
+/**
+ * The name of the model to be used (e.g. gpt-4, codellama)
+ */
+export type Model = string;
+/**
* A system message that will always be followed by the LLM
*/
export type SystemMessage = string;
@@ -19,21 +27,29 @@ export type SystemMessage = string;
*/
export type ContextLength = number;
/**
- * The unique ID of the user.
+ * Tokens that will stop the completion.
*/
-export type UniqueId = string;
+export type StopTokens = string[];
/**
- * The name of the model to be used (e.g. gpt-4, codellama)
+ * The temperature of the completion.
*/
-export type Model = string;
+export type Temperature = number;
/**
- * The maximum number of tokens to generate.
+ * The top_p of the completion.
*/
-export type MaxTokens = number;
+export type TopP = number;
/**
- * Tokens that will stop the completion.
+ * The top_k of the completion.
*/
-export type StopTokens = string[];
+export type TopK = number;
+/**
+ * The presence penalty Aof the completion.
+ */
+export type PresencePenalty = number;
+/**
+ * The frequency penalty of the completion.
+ */
+export type FrequencyPenalty = number;
/**
* Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.
*/
@@ -70,21 +86,32 @@ export interface Models1 {
}
export interface LLM {
title?: Title;
- system_message?: SystemMessage;
- context_length?: ContextLength;
unique_id?: UniqueId;
model: Model;
- max_tokens?: MaxTokens;
+ system_message?: SystemMessage;
+ context_length?: ContextLength;
stop_tokens?: StopTokens;
+ temperature?: Temperature;
+ top_p?: TopP;
+ top_k?: TopK;
+ presence_penalty?: PresencePenalty;
+ frequency_penalty?: FrequencyPenalty;
timeout?: Timeout;
verify_ssl?: VerifySsl;
ca_bundle_path?: CaBundlePath;
proxy?: Proxy;
+ headers?: Headers;
prompt_templates?: PromptTemplates;
api_key?: ApiKey;
[k: string]: unknown;
}
/**
+ * Headers to use when making the HTTP request
+ */
+export interface Headers {
+ [k: string]: string;
+}
+/**
* A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the "edit" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.
*/
export interface PromptTemplates {