summaryrefslogtreecommitdiff
path: root/extension/schema/LLM.d.ts
blob: f17f969a1fe86bff2b4f1f18d3f03c3b1745fbac (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
/* eslint-disable */
/**
 * This file was automatically generated by json-schema-to-typescript.
 * DO NOT MODIFY IT BY HAND. Instead, modify the source JSONSchema file,
 * and run json-schema-to-typescript to regenerate this file.
 */

export type LLM = LLM1;
/**
 * A title that will identify this model in the model selection dropdown
 */
export type Title = string;
/**
 * The unique ID of the user.
 */
export type UniqueId = string;
/**
 * The name of the model to be used (e.g. gpt-4, codellama)
 */
export type Model = string;
/**
 * A system message that will always be followed by the LLM
 */
export type SystemMessage = string;
/**
 * The maximum context length of the LLM in tokens, as counted by count_tokens.
 */
export type ContextLength = number;
/**
 * Tokens that will stop the completion.
 */
export type StopTokens = string[];
/**
 * The temperature of the completion.
 */
export type Temperature = number;
/**
 * The top_p of the completion.
 */
export type TopP = number;
/**
 * The top_k of the completion.
 */
export type TopK = number;
/**
 * The presence penalty Aof the completion.
 */
export type PresencePenalty = number;
/**
 * The frequency penalty of the completion.
 */
export type FrequencyPenalty = number;
/**
 * Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.
 */
export type Timeout = number;
/**
 * Whether to verify SSL certificates for requests.
 */
export type VerifySsl = boolean;
/**
 * Path to a custom CA bundle to use when making the HTTP request
 */
export type CaBundlePath = string;
/**
 * Proxy URL to use when making the HTTP request
 */
export type Proxy = string;
/**
 * The API key for the LLM provider.
 */
export type ApiKey = string;

export interface LLM1 {
  title?: Title;
  unique_id?: UniqueId;
  model: Model;
  system_message?: SystemMessage;
  context_length?: ContextLength;
  stop_tokens?: StopTokens;
  temperature?: Temperature;
  top_p?: TopP;
  top_k?: TopK;
  presence_penalty?: PresencePenalty;
  frequency_penalty?: FrequencyPenalty;
  timeout?: Timeout;
  verify_ssl?: VerifySsl;
  ca_bundle_path?: CaBundlePath;
  proxy?: Proxy;
  headers?: Headers;
  prompt_templates?: PromptTemplates;
  api_key?: ApiKey;
  [k: string]: unknown;
}
/**
 * Headers to use when making the HTTP request
 */
export interface Headers {
  [k: string]: string;
}
/**
 * A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the "edit" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.
 */
export interface PromptTemplates {
  [k: string]: unknown;
}