From f09150617ed2454f3074bcf93f53aae5ae637d40 Mon Sep 17 00:00:00 2001 From: Nate Sesti <33237525+sestinj@users.noreply.github.com> Date: Mon, 9 Oct 2023 18:37:27 -0700 Subject: Preview (#541) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Strong typing (#533) * refactor: :recycle: get rid of continuedev.src.continuedev structure * refactor: :recycle: switching back to server folder * feat: :sparkles: make config.py imports shorter * feat: :bookmark: publish as pre-release vscode extension * refactor: :recycle: refactor and add more completion params to ui * build: :building_construction: download from preview S3 * fix: :bug: fix paths * fix: :green_heart: package:pre-release * ci: :green_heart: more time for tests * fix: :green_heart: fix build scripts * fix: :bug: fix import in run.py * fix: :bookmark: update version to try again * ci: 💚 Update package.json version [skip ci] * refactor: :fire: don't check for old extensions version * fix: :bug: small bug fixes * fix: :bug: fix config.py import paths * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: platform-specific builds test #1 * feat: :green_heart: ship with binary * fix: :green_heart: fix copy statement to include.exe for windows * fix: :green_heart: cd extension before packaging * chore: :loud_sound: count tokens generated * fix: :green_heart: remove npm_config_arch * fix: :green_heart: publish as pre-release! * chore: :bookmark: update version * perf: :green_heart: hardcode distro paths * fix: :bug: fix yaml syntax error * chore: :bookmark: update version * fix: :green_heart: update permissions and version * feat: :bug: kill old server if needed * feat: :lipstick: update marketplace icon for pre-release * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: auto-reload for config.py * feat: :wrench: update default config.py imports * feat: :sparkles: codelens in config.py * feat: :sparkles: select model param count from UI * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: more model options, ollama error handling * perf: :zap: don't show server loading immediately * fix: :bug: fixing small UI details * ci: 💚 Update package.json version [skip ci] * feat: :rocket: headers param on LLM class * fix: :bug: fix headers for openai.;y * feat: :sparkles: highlight code on cmd+shift+L * ci: 💚 Update package.json version [skip ci] * feat: :lipstick: sticky top bar in gui.tsx * fix: :loud_sound: websocket logging and horizontal scrollbar * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: allow AzureOpenAI Service through GGML * ci: 💚 Update package.json version [skip ci] * fix: :bug: fix automigration * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: upload binaries in ci, download apple silicon * chore: :fire: remove notes * fix: :green_heart: use curl to download binary * fix: :green_heart: set permissions on apple silicon binary * fix: :green_heart: testing * fix: :green_heart: cleanup file * fix: :green_heart: fix preview.yaml * fix: :green_heart: only upload once per binary * fix: :green_heart: install rosetta * ci: :green_heart: download binary after tests * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: prepare ci for merge to main --------- Co-authored-by: GitHub Action --- server/tests/llm_test.py | 179 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 179 insertions(+) create mode 100644 server/tests/llm_test.py (limited to 'server/tests/llm_test.py') diff --git a/server/tests/llm_test.py b/server/tests/llm_test.py new file mode 100644 index 00000000..a016b464 --- /dev/null +++ b/server/tests/llm_test.py @@ -0,0 +1,179 @@ +import asyncio +import os +from functools import wraps + +import pytest +from continuedev.core.main import ChatMessage +from continuedev.libs.llm.anthropic import AnthropicLLM +from continuedev.libs.llm.base import LLM, CompletionOptions +from continuedev.libs.llm.ggml import GGML +from continuedev.libs.llm.openai import OpenAI +from continuedev.libs.llm.together import TogetherLLM +from continuedev.libs.util.count_tokens import DEFAULT_ARGS +from dotenv import load_dotenv +from util.prompts import tokyo_test_pair + +load_dotenv() + + +SPEND_MONEY = True + + +def start_model(model): + def write_log(msg: str): + pass + + asyncio.run(model.start(write_log=write_log, unique_id="test_unique_id")) + + +def async_test(func): + @wraps(func) + def wrapper(*args, **kwargs): + return asyncio.run(func(*args, **kwargs)) + + return wrapper + + +class TestBaseLLM: + model = "gpt-3.5-turbo" + context_length = 4096 + system_message = "test_system_message" + + def setup_class(cls): + cls.llm = LLM( + model=cls.model, + context_length=cls.context_length, + system_message=cls.system_message, + ) + + start_model(cls.llm) + + def test_llm_is_instance(self): + assert isinstance(self.llm, LLM) + + def test_llm_collect_args(self): + options = CompletionOptions(model=self.model) + assert self.llm.collect_args(options) == { + **DEFAULT_ARGS, + "model": self.model, + } + + @pytest.mark.skipif(SPEND_MONEY is False, reason="Costs money") + @async_test + async def test_completion(self): + if self.llm.__class__.__name__ == "LLM": + pytest.skip("Skipping abstract LLM") + + resp = await self.llm.complete(tokyo_test_pair[0], temperature=0.0) + assert isinstance(resp, str) + assert resp.strip().lower() == tokyo_test_pair[1] + + @pytest.mark.skipif(SPEND_MONEY is False, reason="Costs money") + @async_test + async def test_stream_chat(self): + if self.llm.__class__.__name__ == "LLM": + pytest.skip("Skipping abstract LLM") + + completion = "" + role = None + async for chunk in self.llm.stream_chat( + messages=[ + ChatMessage( + role="user", content=tokyo_test_pair[0], summary=tokyo_test_pair[0] + ) + ], + temperature=0.0, + ): + assert isinstance(chunk, dict) + if "content" in chunk: + completion += chunk["content"] + if "role" in chunk: + role = chunk["role"] + + assert role == "assistant" + assert completion.strip().lower() == tokyo_test_pair[1] + + @pytest.mark.skipif(SPEND_MONEY is False, reason="Costs money") + @async_test + async def test_stream_complete(self): + if self.llm.__class__.__name__ == "LLM": + pytest.skip("Skipping abstract LLM") + + completion = "" + async for chunk in self.llm.stream_complete( + tokyo_test_pair[0], temperature=0.0 + ): + assert isinstance(chunk, str) + completion += chunk + + assert completion.strip().lower() == tokyo_test_pair[1] + + +class TestOpenAI(TestBaseLLM): + def setup_class(cls): + super().setup_class(cls) + cls.llm = OpenAI( + model=cls.model, + context_length=cls.context_length, + system_message=cls.system_message, + api_key=os.environ["OPENAI_API_KEY"], + # api_base=f"http://localhost:{port}", + ) + start_model(cls.llm) + # cls.server = start_openai(port=port) + + # def teardown_class(cls): + # cls.server.terminate() + + @pytest.mark.asyncio + @pytest.mark.skipif(SPEND_MONEY is False, reason="Costs money") + async def test_completion(self): + resp = await self.llm.complete( + "Output a single word, that being the capital of Japan:" + ) + assert isinstance(resp, str) + assert resp.strip().lower() == tokyo_test_pair[1] + + +class TestGGML(TestBaseLLM): + def setup_class(cls): + super().setup_class(cls) + cls.llm = GGML( + model="gpt-3.5-turbo", + context_length=cls.context_length, + system_message=cls.system_message, + server_url="https://api.openai.com", + api_key=os.environ["OPENAI_API_KEY"], + ) + start_model(cls.llm) + + +@pytest.mark.skipif(True, reason="Together is not working") +class TestTogetherLLM(TestBaseLLM): + def setup_class(cls): + super().setup_class(cls) + cls.llm = TogetherLLM( + api_key=os.environ["TOGETHER_API_KEY"], + ) + start_model(cls.llm) + + +class TestAnthropicLLM(TestBaseLLM): + def setup_class(cls): + super().setup_class(cls) + cls.llm = AnthropicLLM(api_key=os.environ["ANTHROPIC_API_KEY"]) + start_model(cls.llm) + + def test_llm_collect_args(self): + options = CompletionOptions(model=self.model) + assert self.llm.collect_args(options) == { + "max_tokens_to_sample": DEFAULT_ARGS["max_tokens"], + "temperature": DEFAULT_ARGS["temperature"], + "model": self.model, + } + + +if __name__ == "__main__": + import pytest + + pytest.main() -- cgit v1.2.3-70-g09d2