summaryrefslogtreecommitdiff
path: root/server/tests/util
diff options
context:
space:
mode:
Diffstat (limited to 'server/tests/util')
-rw-r--r--server/tests/util/__init__.py0
-rw-r--r--server/tests/util/config.py19
-rw-r--r--server/tests/util/openai_mock.py139
-rw-r--r--server/tests/util/prompts.py2
4 files changed, 160 insertions, 0 deletions
diff --git a/server/tests/util/__init__.py b/server/tests/util/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/server/tests/util/__init__.py
diff --git a/server/tests/util/config.py b/server/tests/util/config.py
new file mode 100644
index 00000000..370933a0
--- /dev/null
+++ b/server/tests/util/config.py
@@ -0,0 +1,19 @@
+from continuedev.core.config import ContinueConfig
+from continuedev.core.models import Models
+from continuedev.libs.llm.openai_free_trial import OpenAIFreeTrial
+
+config = ContinueConfig(
+ allow_anonymous_telemetry=False,
+ models=Models(
+ default=OpenAIFreeTrial(api_key="", model="gpt-4"),
+ summarize=OpenAIFreeTrial(
+ api_key="",
+ model="gpt-3.5-turbo",
+ ),
+ ),
+ system_message=None,
+ temperature=0.5,
+ custom_commands=[],
+ slash_commands=[],
+ context_providers=[],
+)
diff --git a/server/tests/util/openai_mock.py b/server/tests/util/openai_mock.py
new file mode 100644
index 00000000..763c5647
--- /dev/null
+++ b/server/tests/util/openai_mock.py
@@ -0,0 +1,139 @@
+import asyncio
+import os
+import random
+import subprocess
+from typing import Dict, List, Optional
+
+from fastapi import FastAPI
+from fastapi.responses import StreamingResponse
+from pydantic import BaseModel
+
+openai = FastAPI()
+
+
+class CompletionBody(BaseModel):
+ prompt: str
+ max_tokens: Optional[int] = 60
+ stream: Optional[bool] = False
+
+ class Config:
+ extra = "allow"
+
+
+@openai.post("/completions")
+@openai.post("/v1/completions")
+async def mock_completion(item: CompletionBody):
+ prompt = item.prompt
+
+ text = "This is a fake completion."
+
+ if item.stream:
+
+ async def stream_text():
+ for i in range(len(text)):
+ word = random.choice(prompt.split())
+ yield {
+ "choices": [
+ {
+ "delta": {"role": "assistant", "content": word},
+ "finish_reason": None,
+ "index": 0,
+ }
+ ],
+ "created": 1677825464,
+ "id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
+ "model": "gpt-3.5-turbo-0301",
+ "object": "chat.completion.chunk",
+ }
+ await asyncio.sleep(0.1)
+
+ return StreamingResponse(stream_text(), media_type="text/plain")
+
+ return {
+ "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7",
+ "object": "text_completion",
+ "created": 1589478378,
+ "model": "gpt-3.5-turbo",
+ "choices": [
+ {
+ "text": text,
+ "index": 0,
+ "logprobs": None,
+ "finish_reason": "length",
+ }
+ ],
+ "usage": {"prompt_tokens": 5, "completion_tokens": 7, "total_tokens": 12},
+ }
+
+
+class ChatBody(BaseModel):
+ messages: List[Dict[str, str]]
+ max_tokens: Optional[int] = None
+ stream: Optional[bool] = False
+
+ class Config:
+ extra = "allow"
+
+
+@openai.post("/v1/chat/completions")
+async def mock_chat_completion(item: ChatBody):
+ text = "This is a fake completion."
+
+ if item.stream:
+
+ async def stream_text():
+ for i in range(len(text)):
+ word = text[i]
+ yield {
+ "choices": [
+ {
+ "delta": {"role": "assistant", "content": word},
+ "finish_reason": None,
+ "index": 0,
+ }
+ ],
+ "created": 1677825464,
+ "id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
+ "model": "gpt-3.5-turbo-0301",
+ "object": "chat.completion.chunk",
+ }
+ await asyncio.sleep(0.1)
+
+ return StreamingResponse(stream_text(), media_type="text/plain")
+
+ return {
+ "id": "chatcmpl-123",
+ "object": "chat.completion",
+ "created": 1677652288,
+ "model": "gpt-3.5-turbo-0613",
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": text,
+ },
+ "finish_reason": "stop",
+ }
+ ],
+ "usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21},
+ }
+
+
+def start_openai(port: int = 8000):
+ server = subprocess.Popen(
+ [
+ "uvicorn",
+ "openai_mock:openai",
+ "--host",
+ "127.0.0.1",
+ "--port",
+ str(port),
+ ],
+ cwd=os.path.dirname(__file__),
+ )
+ return server
+
+
+if __name__ == "__main__":
+ start_openai()
diff --git a/server/tests/util/prompts.py b/server/tests/util/prompts.py
new file mode 100644
index 00000000..e84ddc82
--- /dev/null
+++ b/server/tests/util/prompts.py
@@ -0,0 +1,2 @@
+tokyo_test_pair = ("Output a single word, that being the capital of Japan:", "tokyo")
+dotenv_test_pair = ("ModuleNotFoundError: No module named 'dotenv'", "python-dotenv") \ No newline at end of file