summaryrefslogtreecommitdiff
path: root/server/continuedev/libs/llm/prompts
diff options
context:
space:
mode:
authorNate Sesti <33237525+sestinj@users.noreply.github.com>2023-10-09 18:37:27 -0700
committerGitHub <noreply@github.com>2023-10-09 18:37:27 -0700
commitf09150617ed2454f3074bcf93f53aae5ae637d40 (patch)
tree5cfe614a64d921dfe58b049f426d67a8b832c71f /server/continuedev/libs/llm/prompts
parent985304a213f620cdff3f8f65f74ed7e3b79be29d (diff)
downloadsncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.gz
sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.bz2
sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.zip
Preview (#541)
* Strong typing (#533) * refactor: :recycle: get rid of continuedev.src.continuedev structure * refactor: :recycle: switching back to server folder * feat: :sparkles: make config.py imports shorter * feat: :bookmark: publish as pre-release vscode extension * refactor: :recycle: refactor and add more completion params to ui * build: :building_construction: download from preview S3 * fix: :bug: fix paths * fix: :green_heart: package:pre-release * ci: :green_heart: more time for tests * fix: :green_heart: fix build scripts * fix: :bug: fix import in run.py * fix: :bookmark: update version to try again * ci: 💚 Update package.json version [skip ci] * refactor: :fire: don't check for old extensions version * fix: :bug: small bug fixes * fix: :bug: fix config.py import paths * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: platform-specific builds test #1 * feat: :green_heart: ship with binary * fix: :green_heart: fix copy statement to include.exe for windows * fix: :green_heart: cd extension before packaging * chore: :loud_sound: count tokens generated * fix: :green_heart: remove npm_config_arch * fix: :green_heart: publish as pre-release! * chore: :bookmark: update version * perf: :green_heart: hardcode distro paths * fix: :bug: fix yaml syntax error * chore: :bookmark: update version * fix: :green_heart: update permissions and version * feat: :bug: kill old server if needed * feat: :lipstick: update marketplace icon for pre-release * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: auto-reload for config.py * feat: :wrench: update default config.py imports * feat: :sparkles: codelens in config.py * feat: :sparkles: select model param count from UI * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: more model options, ollama error handling * perf: :zap: don't show server loading immediately * fix: :bug: fixing small UI details * ci: 💚 Update package.json version [skip ci] * feat: :rocket: headers param on LLM class * fix: :bug: fix headers for openai.;y * feat: :sparkles: highlight code on cmd+shift+L * ci: 💚 Update package.json version [skip ci] * feat: :lipstick: sticky top bar in gui.tsx * fix: :loud_sound: websocket logging and horizontal scrollbar * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: allow AzureOpenAI Service through GGML * ci: 💚 Update package.json version [skip ci] * fix: :bug: fix automigration * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: upload binaries in ci, download apple silicon * chore: :fire: remove notes * fix: :green_heart: use curl to download binary * fix: :green_heart: set permissions on apple silicon binary * fix: :green_heart: testing * fix: :green_heart: cleanup file * fix: :green_heart: fix preview.yaml * fix: :green_heart: only upload once per binary * fix: :green_heart: install rosetta * ci: :green_heart: download binary after tests * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: prepare ci for merge to main --------- Co-authored-by: GitHub Action <action@github.com>
Diffstat (limited to 'server/continuedev/libs/llm/prompts')
-rw-r--r--server/continuedev/libs/llm/prompts/chat.py174
-rw-r--r--server/continuedev/libs/llm/prompts/edit.py27
2 files changed, 201 insertions, 0 deletions
diff --git a/server/continuedev/libs/llm/prompts/chat.py b/server/continuedev/libs/llm/prompts/chat.py
new file mode 100644
index 00000000..036f1b1a
--- /dev/null
+++ b/server/continuedev/libs/llm/prompts/chat.py
@@ -0,0 +1,174 @@
+from textwrap import dedent
+from typing import Dict, List
+
+from anthropic import AI_PROMPT, HUMAN_PROMPT
+
+
+def anthropic_template_messages(messages: List[Dict[str, str]]) -> str:
+ prompt = ""
+
+ # Anthropic prompt must start with a Human turn
+ if (
+ len(messages) > 0
+ and messages[0]["role"] != "user"
+ and messages[0]["role"] != "system"
+ ):
+ prompt += f"{HUMAN_PROMPT} Hello."
+ for msg in messages:
+ prompt += f"{HUMAN_PROMPT if (msg['role'] == 'user' or msg['role'] == 'system') else AI_PROMPT} {msg['content']} "
+
+ prompt += AI_PROMPT
+ return prompt
+
+
+def template_alpaca_messages(msgs: List[Dict[str, str]]) -> str:
+ prompt = ""
+
+ if msgs[0]["role"] == "system":
+ prompt += f"{msgs[0]['content']}\n"
+ msgs.pop(0)
+
+ for msg in msgs:
+ prompt += "### Instruction:\n" if msg["role"] == "user" else "### Response:\n"
+ prompt += f"{msg['content']}\n"
+
+ prompt += "### Response:\n"
+
+ return prompt
+
+
+def raw_input_template(msgs: List[Dict[str, str]]) -> str:
+ return msgs[-1]["content"]
+
+
+SQL_CODER_DEFAULT_SCHEMA = """\
+CREATE TABLE products (
+ product_id INTEGER PRIMARY KEY, -- Unique ID for each product
+ name VARCHAR(50), -- Name of the product
+ price DECIMAL(10,2), -- Price of each unit of the product
+ quantity INTEGER -- Current quantity in stock
+);
+
+CREATE TABLE customers (
+ customer_id INTEGER PRIMARY KEY, -- Unique ID for each customer
+ name VARCHAR(50), -- Name of the customer
+ address VARCHAR(100) -- Mailing address of the customer
+);
+
+CREATE TABLE salespeople (
+ salesperson_id INTEGER PRIMARY KEY, -- Unique ID for each salesperson
+ name VARCHAR(50), -- Name of the salesperson
+ region VARCHAR(50) -- Geographic sales region
+);
+
+CREATE TABLE sales (
+ sale_id INTEGER PRIMARY KEY, -- Unique ID for each sale
+ product_id INTEGER, -- ID of product sold
+ customer_id INTEGER, -- ID of customer who made purchase
+ salesperson_id INTEGER, -- ID of salesperson who made the sale
+ sale_date DATE, -- Date the sale occurred
+ quantity INTEGER -- Quantity of product sold
+);
+
+CREATE TABLE product_suppliers (
+ supplier_id INTEGER PRIMARY KEY, -- Unique ID for each supplier
+ product_id INTEGER, -- Product ID supplied
+ supply_price DECIMAL(10,2) -- Unit price charged by supplier
+);
+
+-- sales.product_id can be joined with products.product_id
+-- sales.customer_id can be joined with customers.customer_id
+-- sales.salesperson_id can be joined with salespeople.salesperson_id
+-- product_suppliers.product_id can be joined with products.product_id
+"""
+
+
+def _sqlcoder_template_messages(
+ msgs: List[Dict[str, str]], schema: str = SQL_CODER_DEFAULT_SCHEMA
+) -> str:
+ question = msgs[-1]["content"]
+ return f"""\
+Your task is to convert a question into a SQL query, given a Postgres database schema.
+Adhere to these rules:
+- **Deliberately go through the question and database schema word by word** to appropriately answer the question
+- **Use Table Aliases** to prevent ambiguity. For example, `SELECT table1.col1, table2.col1 FROM table1 JOIN table2 ON table1.id = table2.id`.
+- When creating a ratio, always cast the numerator as float
+
+### Input:
+Generate a SQL query that answers the question `{question}`.
+This query will run on a database whose schema is represented in this string:
+{schema}
+
+### Response:
+Based on your instructions, here is the SQL query I have generated to answer the question `{question}`:
+```sql
+"""
+
+
+def sqlcoder_template_messages(schema: str = SQL_CODER_DEFAULT_SCHEMA):
+ if schema == "<MY_DATABASE_SCHEMA>" or schema == "":
+ schema = SQL_CODER_DEFAULT_SCHEMA
+
+ def fn(msgs):
+ return _sqlcoder_template_messages(msgs, schema=schema)
+
+ fn.__name__ = "sqlcoder_template_messages"
+ return fn
+
+
+def llama2_template_messages(msgs: List[Dict[str, str]]) -> str:
+ if len(msgs) == 0:
+ return ""
+
+ if msgs[0]["role"] == "assistant":
+ # These models aren't trained to handle assistant message coming first,
+ # and typically these are just introduction messages from Continue
+ msgs.pop(0)
+
+ prompt = ""
+ has_system = msgs[0]["role"] == "system"
+
+ if has_system and msgs[0]["content"].strip() == "":
+ has_system = False
+ msgs = msgs[1:]
+
+ if has_system:
+ system_message = dedent(
+ f"""\
+ <<SYS>>
+ {msgs[0]["content"]}
+ <</SYS>>
+
+ """
+ )
+ if len(msgs) > 1:
+ prompt += f"[INST] {system_message}{msgs[1]['content']} [/INST]"
+ else:
+ prompt += f"[INST] {system_message} [/INST]"
+ return
+
+ for i in range(2 if has_system else 0, len(msgs)):
+ if msgs[i]["role"] == "user":
+ prompt += f"[INST] {msgs[i]['content']} [/INST]"
+ else:
+ prompt += msgs[i]["content"] + " "
+
+ return prompt
+
+
+def code_llama_template_messages(msgs: List[Dict[str, str]]) -> str:
+ return f"[INST] {msgs[-1]['content']}\n[/INST]"
+
+
+def extra_space_template_messages(msgs: List[Dict[str, str]]) -> str:
+ return f" {msgs[-1]['content']}"
+
+
+def code_llama_python_template_messages(msgs: List[Dict[str, str]]) -> str:
+ return dedent(
+ f"""\
+ [INST]
+ You are an expert Python programmer and personal assistant, here is your task: {msgs[-1]['content']}
+ Your answer should start with a [PYTHON] tag and end with a [/PYTHON] tag.
+ [/INST]"""
+ )
diff --git a/server/continuedev/libs/llm/prompts/edit.py b/server/continuedev/libs/llm/prompts/edit.py
new file mode 100644
index 00000000..eaa694c5
--- /dev/null
+++ b/server/continuedev/libs/llm/prompts/edit.py
@@ -0,0 +1,27 @@
+from textwrap import dedent
+
+simplified_edit_prompt = dedent(
+ """\
+ Consider the following code:
+ ```
+ {{{code_to_edit}}}
+ ```
+ Edit the code to perfectly satisfy the following user request:
+ {{{user_input}}}
+ Output nothing except for the code. No code block, no English explanation, no start/end tags."""
+)
+
+simplest_edit_prompt = dedent(
+ """\
+ Here is the code before editing:
+ ```
+ {{{code_to_edit}}}
+ ```
+
+ Here is the edit requested:
+ "{{{user_input}}}"
+
+ Here is the code after editing:"""
+)
+
+codellama_infill_edit_prompt = "{{file_prefix}}<FILL>{{file_suffix}}"