From f09150617ed2454f3074bcf93f53aae5ae637d40 Mon Sep 17 00:00:00 2001 From: Nate Sesti <33237525+sestinj@users.noreply.github.com> Date: Mon, 9 Oct 2023 18:37:27 -0700 Subject: Preview (#541) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Strong typing (#533) * refactor: :recycle: get rid of continuedev.src.continuedev structure * refactor: :recycle: switching back to server folder * feat: :sparkles: make config.py imports shorter * feat: :bookmark: publish as pre-release vscode extension * refactor: :recycle: refactor and add more completion params to ui * build: :building_construction: download from preview S3 * fix: :bug: fix paths * fix: :green_heart: package:pre-release * ci: :green_heart: more time for tests * fix: :green_heart: fix build scripts * fix: :bug: fix import in run.py * fix: :bookmark: update version to try again * ci: 💚 Update package.json version [skip ci] * refactor: :fire: don't check for old extensions version * fix: :bug: small bug fixes * fix: :bug: fix config.py import paths * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: platform-specific builds test #1 * feat: :green_heart: ship with binary * fix: :green_heart: fix copy statement to include.exe for windows * fix: :green_heart: cd extension before packaging * chore: :loud_sound: count tokens generated * fix: :green_heart: remove npm_config_arch * fix: :green_heart: publish as pre-release! * chore: :bookmark: update version * perf: :green_heart: hardcode distro paths * fix: :bug: fix yaml syntax error * chore: :bookmark: update version * fix: :green_heart: update permissions and version * feat: :bug: kill old server if needed * feat: :lipstick: update marketplace icon for pre-release * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: auto-reload for config.py * feat: :wrench: update default config.py imports * feat: :sparkles: codelens in config.py * feat: :sparkles: select model param count from UI * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: more model options, ollama error handling * perf: :zap: don't show server loading immediately * fix: :bug: fixing small UI details * ci: 💚 Update package.json version [skip ci] * feat: :rocket: headers param on LLM class * fix: :bug: fix headers for openai.;y * feat: :sparkles: highlight code on cmd+shift+L * ci: 💚 Update package.json version [skip ci] * feat: :lipstick: sticky top bar in gui.tsx * fix: :loud_sound: websocket logging and horizontal scrollbar * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: allow AzureOpenAI Service through GGML * ci: 💚 Update package.json version [skip ci] * fix: :bug: fix automigration * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: upload binaries in ci, download apple silicon * chore: :fire: remove notes * fix: :green_heart: use curl to download binary * fix: :green_heart: set permissions on apple silicon binary * fix: :green_heart: testing * fix: :green_heart: cleanup file * fix: :green_heart: fix preview.yaml * fix: :green_heart: only upload once per binary * fix: :green_heart: install rosetta * ci: :green_heart: download binary after tests * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: prepare ci for merge to main --------- Co-authored-by: GitHub Action --- server/tests/util/openai_mock.py | 139 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 139 insertions(+) create mode 100644 server/tests/util/openai_mock.py (limited to 'server/tests/util/openai_mock.py') diff --git a/server/tests/util/openai_mock.py b/server/tests/util/openai_mock.py new file mode 100644 index 00000000..763c5647 --- /dev/null +++ b/server/tests/util/openai_mock.py @@ -0,0 +1,139 @@ +import asyncio +import os +import random +import subprocess +from typing import Dict, List, Optional + +from fastapi import FastAPI +from fastapi.responses import StreamingResponse +from pydantic import BaseModel + +openai = FastAPI() + + +class CompletionBody(BaseModel): + prompt: str + max_tokens: Optional[int] = 60 + stream: Optional[bool] = False + + class Config: + extra = "allow" + + +@openai.post("/completions") +@openai.post("/v1/completions") +async def mock_completion(item: CompletionBody): + prompt = item.prompt + + text = "This is a fake completion." + + if item.stream: + + async def stream_text(): + for i in range(len(text)): + word = random.choice(prompt.split()) + yield { + "choices": [ + { + "delta": {"role": "assistant", "content": word}, + "finish_reason": None, + "index": 0, + } + ], + "created": 1677825464, + "id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", + "model": "gpt-3.5-turbo-0301", + "object": "chat.completion.chunk", + } + await asyncio.sleep(0.1) + + return StreamingResponse(stream_text(), media_type="text/plain") + + return { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-3.5-turbo", + "choices": [ + { + "text": text, + "index": 0, + "logprobs": None, + "finish_reason": "length", + } + ], + "usage": {"prompt_tokens": 5, "completion_tokens": 7, "total_tokens": 12}, + } + + +class ChatBody(BaseModel): + messages: List[Dict[str, str]] + max_tokens: Optional[int] = None + stream: Optional[bool] = False + + class Config: + extra = "allow" + + +@openai.post("/v1/chat/completions") +async def mock_chat_completion(item: ChatBody): + text = "This is a fake completion." + + if item.stream: + + async def stream_text(): + for i in range(len(text)): + word = text[i] + yield { + "choices": [ + { + "delta": {"role": "assistant", "content": word}, + "finish_reason": None, + "index": 0, + } + ], + "created": 1677825464, + "id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", + "model": "gpt-3.5-turbo-0301", + "object": "chat.completion.chunk", + } + await asyncio.sleep(0.1) + + return StreamingResponse(stream_text(), media_type="text/plain") + + return { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": text, + }, + "finish_reason": "stop", + } + ], + "usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21}, + } + + +def start_openai(port: int = 8000): + server = subprocess.Popen( + [ + "uvicorn", + "openai_mock:openai", + "--host", + "127.0.0.1", + "--port", + str(port), + ], + cwd=os.path.dirname(__file__), + ) + return server + + +if __name__ == "__main__": + start_openai() -- cgit v1.2.3-70-g09d2