summaryrefslogtreecommitdiff
path: root/continuedev/src
diff options
context:
space:
mode:
Diffstat (limited to 'continuedev/src')
-rw-r--r--continuedev/src/continuedev/core/config.py5
-rw-r--r--continuedev/src/continuedev/libs/llm/proxy_server.py8
-rw-r--r--continuedev/src/continuedev/server/main.py25
-rw-r--r--continuedev/src/continuedev/steps/chat.py3
-rw-r--r--continuedev/src/continuedev/steps/core/core.py2
5 files changed, 38 insertions, 5 deletions
diff --git a/continuedev/src/continuedev/core/config.py b/continuedev/src/continuedev/core/config.py
index 6af0878d..70c4876e 100644
--- a/continuedev/src/continuedev/core/config.py
+++ b/continuedev/src/continuedev/core/config.py
@@ -82,6 +82,7 @@ class ContinueConfig(BaseModel):
allow_anonymous_telemetry: Optional[bool] = True
default_model: Literal["gpt-3.5-turbo", "gpt-3.5-turbo-16k",
"gpt-4", "claude-2", "ggml"] = 'gpt-4'
+ temperature: Optional[float] = 0.5
custom_commands: Optional[List[CustomCommand]] = [CustomCommand(
name="test",
description="This is an example custom command. Use /config to edit it and create more",
@@ -98,6 +99,10 @@ class ContinueConfig(BaseModel):
def default_slash_commands_validator(cls, v):
return DEFAULT_SLASH_COMMANDS
+ @validator('temperature', pre=True)
+ def temperature_validator(cls, v):
+ return max(0.0, min(1.0, v))
+
def load_config(config_file: str) -> ContinueConfig:
"""
diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py
index 18e0e6f4..bd50fe02 100644
--- a/continuedev/src/continuedev/libs/llm/proxy_server.py
+++ b/continuedev/src/continuedev/libs/llm/proxy_server.py
@@ -1,8 +1,10 @@
from functools import cached_property
import json
+import traceback
from typing import Any, Callable, Coroutine, Dict, Generator, List, Literal, Union
import aiohttp
+from ..util.telemetry import capture_event
from ...core.main import ChatMessage
from ..llm import LLM
from ..util.count_tokens import DEFAULT_ARGS, DEFAULT_MAX_TOKENS, compile_chat_messages, CHAT_MODELS, count_tokens, format_chat_messages
@@ -81,8 +83,10 @@ class ProxyServer(LLM):
yield loaded_chunk
if "content" in loaded_chunk:
completion += loaded_chunk["content"]
- except:
- raise Exception(str(line[0]))
+ except Exception as e:
+ capture_event(self.unique_id, "proxy_server_parse_error", {
+ "error_title": "Proxy server stream_chat parsing failed", "error_message": '\n'.join(traceback.format_exception(e))})
+
self.write_log(f"Completion: \n\n{completion}")
async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]:
diff --git a/continuedev/src/continuedev/server/main.py b/continuedev/src/continuedev/server/main.py
index aa093853..42dc0cc1 100644
--- a/continuedev/src/continuedev/server/main.py
+++ b/continuedev/src/continuedev/server/main.py
@@ -1,5 +1,6 @@
+import time
+import psutil
import os
-import sys
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from .ide import router as ide_router
@@ -51,9 +52,31 @@ def cleanup():
session_manager.persist_session(session_id)
+def cpu_usage_report():
+ process = psutil.Process(os.getpid())
+ # Call cpu_percent once to start measurement, but ignore the result
+ process.cpu_percent(interval=None)
+ # Wait for a short period of time
+ time.sleep(1)
+ # Call cpu_percent again to get the CPU usage over the interval
+ cpu_usage = process.cpu_percent(interval=None)
+ print(f"CPU usage: {cpu_usage}%")
+
+
atexit.register(cleanup)
+
if __name__ == "__main__":
try:
+ # import threading
+
+ # def cpu_usage_loop():
+ # while True:
+ # cpu_usage_report()
+ # time.sleep(2)
+
+ # cpu_thread = threading.Thread(target=cpu_usage_loop)
+ # cpu_thread.start()
+
run_server()
except Exception as e:
cleanup()
diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py
index 8c03969e..aade1ea1 100644
--- a/continuedev/src/continuedev/steps/chat.py
+++ b/continuedev/src/continuedev/steps/chat.py
@@ -29,7 +29,8 @@ class SimpleChatStep(Step):
completion = ""
messages = self.messages or await sdk.get_chat_context()
- generator = sdk.models.default.stream_chat(messages, temperature=0.5)
+ generator = sdk.models.default.stream_chat(
+ messages, temperature=sdk.config.temperature)
try:
async for chunk in generator:
if sdk.current_step_was_deleted():
diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py
index 2b049ecc..4afc36e8 100644
--- a/continuedev/src/continuedev/steps/core/core.py
+++ b/continuedev/src/continuedev/steps/core/core.py
@@ -492,7 +492,7 @@ Please output the code to be inserted at the cursor in order to fulfill the user
role="user", content=f"```\n{rif.contents}\n```\n\nUser request: \"{self.user_input}\"\n\nThis is the code after changing to perfectly comply with the user request. It does not include any placeholder code, only real implementations:\n\n```\n", summary=self.user_input)]
generator = model_to_use.stream_chat(
- messages, temperature=0, max_tokens=max_tokens)
+ messages, temperature=sdk.config.temperature, max_tokens=max_tokens)
try:
async for chunk in generator: