summaryrefslogtreecommitdiff
path: root/continuedev
diff options
context:
space:
mode:
Diffstat (limited to 'continuedev')
-rw-r--r--continuedev/src/continuedev/core/context.py11
-rw-r--r--continuedev/src/continuedev/plugins/context_providers/file.py22
2 files changed, 22 insertions, 11 deletions
diff --git a/continuedev/src/continuedev/core/context.py b/continuedev/src/continuedev/core/context.py
index b1f68b50..48c14ed6 100644
--- a/continuedev/src/continuedev/core/context.py
+++ b/continuedev/src/continuedev/core/context.py
@@ -176,14 +176,21 @@ class ContextManager:
"id": item.description.id.to_string(),
"name": item.description.name,
"description": item.description.description,
- "content": item.content
+ "content": item.content,
+ "workspace_dir": workspace_dir,
}
for item in context_items
]
if len(documents) > 0:
try:
async with Client('http://localhost:7700') as search_client:
- await asyncio.wait_for(search_client.index(SEARCH_INDEX_NAME).add_documents(documents), timeout=5)
+ # The index is currently shared by all workspaces
+ globalSearchIndex = await search_client.get_index(SEARCH_INDEX_NAME)
+ await asyncio.wait_for(asyncio.gather(
+ # Ensure that the index has the correct filterable attributes
+ globalSearchIndex.update_filterable_attributes(["workspace_dir"]),
+ globalSearchIndex.add_documents(documents)
+ ), timeout=5)
except Exception as e:
logger.debug(f"Error loading meilisearch index: {e}")
diff --git a/continuedev/src/continuedev/plugins/context_providers/file.py b/continuedev/src/continuedev/plugins/context_providers/file.py
index 31aa5423..b40092af 100644
--- a/continuedev/src/continuedev/plugins/context_providers/file.py
+++ b/continuedev/src/continuedev/plugins/context_providers/file.py
@@ -54,33 +54,37 @@ class FileContextProvider(ContextProvider):
list(filter(lambda d: f"**/{d}", DEFAULT_IGNORE_DIRS))
async def provide_context_items(self, workspace_dir: str) -> List[ContextItem]:
- filepaths = []
+ absolute_filepaths: List[str] = []
for root, dir_names, file_names in os.walk(workspace_dir):
dir_names[:] = [d for d in dir_names if not any(
fnmatch(d, pattern) for pattern in self.ignore_patterns)]
for file_name in file_names:
- filepaths.append(os.path.join(root, file_name))
+ absolute_filepaths.append(os.path.join(root, file_name))
- if len(filepaths) > 1000:
+ if len(absolute_filepaths) > 1000:
break
- if len(filepaths) > 1000:
+ if len(absolute_filepaths) > 1000:
break
items = []
- for file in filepaths:
- content = get_file_contents(file)
+ for absolute_filepath in absolute_filepaths:
+ content = get_file_contents(absolute_filepath)
if content is None:
continue # no pun intended
+
+ relative_to_workspace = os.path.relpath(absolute_filepath, workspace_dir)
items.append(ContextItem(
content=content[:min(2000, len(content))],
description=ContextItemDescription(
- name=os.path.basename(file),
- description=file,
+ name=os.path.basename(absolute_filepath),
+ # We should add the full path to the ContextItem
+ # It warrants a data modeling discussion and has no immediate use case
+ description=relative_to_workspace,
id=ContextItemId(
provider_title=self.title,
- item_id=remove_meilisearch_disallowed_chars(file)
+ item_id=remove_meilisearch_disallowed_chars(absolute_filepath)
)
)
))