Resolve uv.lock conflicts by aligning with origin/main
This commit is contained in:
@@ -83,9 +83,7 @@ def create_diskann_embedding_server(
|
||||
|
||||
logger.info(f"Loading PassageManager with metadata_file_path: {passages_file}")
|
||||
passages = PassageManager(meta["passage_sources"], metadata_file_path=passages_file)
|
||||
logger.info(
|
||||
f"Loaded PassageManager with {len(passages.global_offset_map)} passages from metadata"
|
||||
)
|
||||
logger.info(f"Loaded PassageManager with {len(passages)} passages from metadata")
|
||||
|
||||
# Import protobuf after ensuring the path is correct
|
||||
try:
|
||||
|
||||
@@ -4,8 +4,8 @@ build-backend = "scikit_build_core.build"
|
||||
|
||||
[project]
|
||||
name = "leann-backend-diskann"
|
||||
version = "0.3.0"
|
||||
dependencies = ["leann-core==0.3.0", "numpy", "protobuf>=3.19.0"]
|
||||
version = "0.3.2"
|
||||
dependencies = ["leann-core==0.3.2", "numpy", "protobuf>=3.19.0"]
|
||||
|
||||
[tool.scikit-build]
|
||||
# Key: simplified CMake path
|
||||
|
||||
@@ -90,9 +90,7 @@ def create_hnsw_embedding_server(
|
||||
embedding_dim: int = int(meta.get("dimensions", 0))
|
||||
except Exception:
|
||||
embedding_dim = 0
|
||||
logger.info(
|
||||
f"Loaded PassageManager with {len(passages.global_offset_map)} passages from metadata"
|
||||
)
|
||||
logger.info(f"Loaded PassageManager with {len(passages)} passages from metadata")
|
||||
|
||||
# (legacy ZMQ thread removed; using shutdown-capable server only)
|
||||
|
||||
|
||||
@@ -6,10 +6,10 @@ build-backend = "scikit_build_core.build"
|
||||
|
||||
[project]
|
||||
name = "leann-backend-hnsw"
|
||||
version = "0.3.0"
|
||||
version = "0.3.2"
|
||||
description = "Custom-built HNSW (Faiss) backend for the Leann toolkit."
|
||||
dependencies = [
|
||||
"leann-core==0.3.0",
|
||||
"leann-core==0.3.2",
|
||||
"numpy",
|
||||
"pyzmq>=23.0.0",
|
||||
"msgpack>=1.0.0",
|
||||
|
||||
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "leann-core"
|
||||
version = "0.3.0"
|
||||
version = "0.3.2"
|
||||
description = "Core API and plugin system for LEANN"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.9"
|
||||
|
||||
@@ -119,9 +119,12 @@ class PassageManager:
|
||||
def __init__(
|
||||
self, passage_sources: list[dict[str, Any]], metadata_file_path: Optional[str] = None
|
||||
):
|
||||
self.offset_maps = {}
|
||||
self.passage_files = {}
|
||||
self.global_offset_map = {} # Combined map for fast lookup
|
||||
self.offset_maps: dict[str, dict[str, int]] = {}
|
||||
self.passage_files: dict[str, str] = {}
|
||||
# Avoid materializing a single gigantic global map to reduce memory
|
||||
# footprint on very large corpora (e.g., 60M+ passages). Instead, keep
|
||||
# per-shard maps and do a lightweight per-shard lookup on demand.
|
||||
self._total_count: int = 0
|
||||
|
||||
# Derive index base name for standard sibling fallbacks, e.g., <index_name>.passages.*
|
||||
index_name_base = None
|
||||
@@ -142,12 +145,25 @@ class PassageManager:
|
||||
default_name: Optional[str],
|
||||
source_dict: dict[str, Any],
|
||||
) -> list[Path]:
|
||||
"""
|
||||
Build an ordered list of candidate paths. For relative paths specified in
|
||||
metadata, prefer resolution relative to the metadata file directory first,
|
||||
then fall back to CWD-based resolution, and finally to conventional
|
||||
sibling defaults (e.g., <index_base>.passages.idx / .jsonl).
|
||||
"""
|
||||
candidates: list[Path] = []
|
||||
# 1) Primary as-is (absolute or relative)
|
||||
# 1) Primary path
|
||||
if primary:
|
||||
p = Path(primary)
|
||||
candidates.append(p if p.is_absolute() else (Path.cwd() / p))
|
||||
# 2) metadata-relative explicit relative key
|
||||
if p.is_absolute():
|
||||
candidates.append(p)
|
||||
else:
|
||||
# Prefer metadata-relative resolution for relative paths
|
||||
if metadata_file_path:
|
||||
candidates.append(Path(metadata_file_path).parent / p)
|
||||
# Also consider CWD-relative as a fallback for legacy layouts
|
||||
candidates.append(Path.cwd() / p)
|
||||
# 2) metadata-relative explicit relative key (if present)
|
||||
if metadata_file_path and source_dict.get(relative_key):
|
||||
candidates.append(Path(metadata_file_path).parent / source_dict[relative_key])
|
||||
# 3) metadata-relative standard sibling filename
|
||||
@@ -177,23 +193,28 @@ class PassageManager:
|
||||
raise FileNotFoundError(f"Passage index file not found: {index_file}")
|
||||
|
||||
with open(index_file, "rb") as f:
|
||||
offset_map = pickle.load(f)
|
||||
offset_map: dict[str, int] = pickle.load(f)
|
||||
self.offset_maps[passage_file] = offset_map
|
||||
self.passage_files[passage_file] = passage_file
|
||||
|
||||
# Build global map for O(1) lookup
|
||||
for passage_id, offset in offset_map.items():
|
||||
self.global_offset_map[passage_id] = (passage_file, offset)
|
||||
self._total_count += len(offset_map)
|
||||
|
||||
def get_passage(self, passage_id: str) -> dict[str, Any]:
|
||||
if passage_id in self.global_offset_map:
|
||||
passage_file, offset = self.global_offset_map[passage_id]
|
||||
# Lazy file opening - only open when needed
|
||||
with open(passage_file, encoding="utf-8") as f:
|
||||
f.seek(offset)
|
||||
return json.loads(f.readline())
|
||||
# Fast path: check each shard map (there are typically few shards).
|
||||
# This avoids building a massive combined dict while keeping lookups
|
||||
# bounded by the number of shards.
|
||||
for passage_file, offset_map in self.offset_maps.items():
|
||||
try:
|
||||
offset = offset_map[passage_id]
|
||||
with open(passage_file, encoding="utf-8") as f:
|
||||
f.seek(offset)
|
||||
return json.loads(f.readline())
|
||||
except KeyError:
|
||||
continue
|
||||
raise KeyError(f"Passage ID not found: {passage_id}")
|
||||
|
||||
def __len__(self) -> int:
|
||||
return self._total_count
|
||||
|
||||
|
||||
class LeannBuilder:
|
||||
def __init__(
|
||||
@@ -587,7 +608,9 @@ class LeannSearcher:
|
||||
logger.info(f" Additional kwargs: {kwargs}")
|
||||
|
||||
# Smart top_k detection and adjustment
|
||||
total_docs = len(self.passage_manager.global_offset_map)
|
||||
# Use PassageManager length (sum of shard sizes) to avoid
|
||||
# depending on a massive combined map
|
||||
total_docs = len(self.passage_manager)
|
||||
original_top_k = top_k
|
||||
if top_k > total_docs:
|
||||
top_k = total_docs
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import argparse
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from llama_index.core import SimpleDirectoryReader
|
||||
from llama_index.core.node_parser import SentenceSplitter
|
||||
@@ -180,6 +181,29 @@ Examples:
|
||||
default=50,
|
||||
help="Code chunk overlap (default: 50)",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--use-ast-chunking",
|
||||
action="store_true",
|
||||
help="Enable AST-aware chunking for code files (requires astchunk)",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--ast-chunk-size",
|
||||
type=int,
|
||||
default=768,
|
||||
help="AST chunk size in characters (default: 768)",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--ast-chunk-overlap",
|
||||
type=int,
|
||||
default=96,
|
||||
help="AST chunk overlap in characters (default: 96)",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--ast-fallback-traditional",
|
||||
action="store_true",
|
||||
default=True,
|
||||
help="Fall back to traditional chunking if AST chunking fails (default: True)",
|
||||
)
|
||||
|
||||
# Search command
|
||||
search_parser = subparsers.add_parser("search", help="Search documents")
|
||||
@@ -206,6 +230,11 @@ Examples:
|
||||
default="global",
|
||||
help="Pruning strategy (default: global)",
|
||||
)
|
||||
search_parser.add_argument(
|
||||
"--non-interactive",
|
||||
action="store_true",
|
||||
help="Non-interactive mode: automatically select index without prompting",
|
||||
)
|
||||
|
||||
# Ask command
|
||||
ask_parser = subparsers.add_parser("ask", help="Ask questions")
|
||||
@@ -405,13 +434,9 @@ Examples:
|
||||
print("💡 Get started:")
|
||||
print(" leann build my-docs --docs ./documents")
|
||||
else:
|
||||
projects_count = len(
|
||||
[
|
||||
p
|
||||
for p in valid_projects
|
||||
if (p / ".leann" / "indexes").exists()
|
||||
and list((p / ".leann" / "indexes").iterdir())
|
||||
]
|
||||
# Count only projects that have at least one discoverable index
|
||||
projects_count = sum(
|
||||
1 for p in valid_projects if len(self._discover_indexes_in_project(p)) > 0
|
||||
)
|
||||
print(f"📊 Total: {total_indexes} indexes across {projects_count} projects")
|
||||
|
||||
@@ -461,26 +486,35 @@ Examples:
|
||||
)
|
||||
|
||||
# 2. Apps format: *.leann.meta.json files anywhere in the project
|
||||
cli_indexes_dir = project_path / ".leann" / "indexes"
|
||||
for meta_file in project_path.rglob("*.leann.meta.json"):
|
||||
if meta_file.is_file():
|
||||
# Extract index name from filename (remove .leann.meta.json extension)
|
||||
index_name = meta_file.name.replace(".leann.meta.json", "")
|
||||
# Skip CLI-built indexes (which store meta under .leann/indexes/<name>/)
|
||||
try:
|
||||
if cli_indexes_dir.exists() and cli_indexes_dir in meta_file.parents:
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
# Use the parent directory name as the app index display name
|
||||
display_name = meta_file.parent.name
|
||||
# Extract file base used to store files
|
||||
file_base = meta_file.name.replace(".leann.meta.json", "")
|
||||
|
||||
# Apps indexes are considered complete if the .leann.meta.json file exists
|
||||
status = "✅"
|
||||
|
||||
# Calculate total size of all related files
|
||||
# Calculate total size of all related files (use file base)
|
||||
size_mb = 0
|
||||
try:
|
||||
index_dir = meta_file.parent
|
||||
for related_file in index_dir.glob(f"{index_name}.leann*"):
|
||||
for related_file in index_dir.glob(f"{file_base}.leann*"):
|
||||
size_mb += related_file.stat().st_size / (1024 * 1024)
|
||||
except (OSError, PermissionError):
|
||||
pass
|
||||
|
||||
indexes.append(
|
||||
{
|
||||
"name": index_name,
|
||||
"name": display_name,
|
||||
"type": "app",
|
||||
"status": status,
|
||||
"size_mb": size_mb,
|
||||
@@ -534,13 +568,79 @@ Examples:
|
||||
if not project_path.exists():
|
||||
continue
|
||||
|
||||
# 1) CLI-format index under .leann/indexes/<name>
|
||||
index_dir = project_path / ".leann" / "indexes" / index_name
|
||||
if index_dir.exists():
|
||||
is_current = project_path == current_path
|
||||
matches.append(
|
||||
{"project_path": project_path, "index_dir": index_dir, "is_current": is_current}
|
||||
{
|
||||
"project_path": project_path,
|
||||
"index_dir": index_dir,
|
||||
"is_current": is_current,
|
||||
"kind": "cli",
|
||||
}
|
||||
)
|
||||
|
||||
# 2) App-format indexes
|
||||
# We support two ways of addressing apps:
|
||||
# a) by the file base (e.g., `pdf_documents`)
|
||||
# b) by the parent directory name (e.g., `new_txt`)
|
||||
seen_app_meta = set()
|
||||
|
||||
# 2a) by file base
|
||||
for meta_file in project_path.rglob(f"{index_name}.leann.meta.json"):
|
||||
if meta_file.is_file():
|
||||
# Skip CLI-built indexes' meta under .leann/indexes
|
||||
try:
|
||||
cli_indexes_dir = project_path / ".leann" / "indexes"
|
||||
if cli_indexes_dir.exists() and cli_indexes_dir in meta_file.parents:
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
is_current = project_path == current_path
|
||||
key = (str(project_path), str(meta_file))
|
||||
if key in seen_app_meta:
|
||||
continue
|
||||
seen_app_meta.add(key)
|
||||
matches.append(
|
||||
{
|
||||
"project_path": project_path,
|
||||
"files_dir": meta_file.parent,
|
||||
"meta_file": meta_file,
|
||||
"is_current": is_current,
|
||||
"kind": "app",
|
||||
"display_name": meta_file.parent.name,
|
||||
"file_base": meta_file.name.replace(".leann.meta.json", ""),
|
||||
}
|
||||
)
|
||||
|
||||
# 2b) by parent directory name
|
||||
for meta_file in project_path.rglob("*.leann.meta.json"):
|
||||
if meta_file.is_file() and meta_file.parent.name == index_name:
|
||||
# Skip CLI-built indexes' meta under .leann/indexes
|
||||
try:
|
||||
cli_indexes_dir = project_path / ".leann" / "indexes"
|
||||
if cli_indexes_dir.exists() and cli_indexes_dir in meta_file.parents:
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
is_current = project_path == current_path
|
||||
key = (str(project_path), str(meta_file))
|
||||
if key in seen_app_meta:
|
||||
continue
|
||||
seen_app_meta.add(key)
|
||||
matches.append(
|
||||
{
|
||||
"project_path": project_path,
|
||||
"files_dir": meta_file.parent,
|
||||
"meta_file": meta_file,
|
||||
"is_current": is_current,
|
||||
"kind": "app",
|
||||
"display_name": meta_file.parent.name,
|
||||
"file_base": meta_file.name.replace(".leann.meta.json", ""),
|
||||
}
|
||||
)
|
||||
|
||||
# Sort: current project first, then by project name
|
||||
matches.sort(key=lambda x: (not x["is_current"], x["project_path"].name))
|
||||
return matches
|
||||
@@ -548,8 +648,8 @@ Examples:
|
||||
def _remove_single_match(self, match, index_name: str, force: bool):
|
||||
"""Handle removal when only one match is found"""
|
||||
project_path = match["project_path"]
|
||||
index_dir = match["index_dir"]
|
||||
is_current = match["is_current"]
|
||||
kind = match.get("kind", "cli")
|
||||
|
||||
if is_current:
|
||||
location_info = "current project"
|
||||
@@ -560,7 +660,10 @@ Examples:
|
||||
|
||||
print(f"✅ Found 1 index named '{index_name}':")
|
||||
print(f" {emoji} Location: {location_info}")
|
||||
print(f" 📍 Path: {project_path}")
|
||||
if kind == "cli":
|
||||
print(f" 📍 Path: {project_path / '.leann' / 'indexes' / index_name}")
|
||||
else:
|
||||
print(f" 📍 Meta: {match['meta_file']}")
|
||||
|
||||
if not force:
|
||||
if not is_current:
|
||||
@@ -572,9 +675,22 @@ Examples:
|
||||
print(" ❌ Removal cancelled.")
|
||||
return False
|
||||
|
||||
return self._delete_index_directory(
|
||||
index_dir, index_name, project_path if not is_current else None
|
||||
)
|
||||
if kind == "cli":
|
||||
return self._delete_index_directory(
|
||||
match["index_dir"],
|
||||
index_name,
|
||||
project_path if not is_current else None,
|
||||
is_app=False,
|
||||
)
|
||||
else:
|
||||
return self._delete_index_directory(
|
||||
match["files_dir"],
|
||||
match.get("display_name", index_name),
|
||||
project_path if not is_current else None,
|
||||
is_app=True,
|
||||
meta_file=match.get("meta_file"),
|
||||
app_file_base=match.get("file_base"),
|
||||
)
|
||||
|
||||
def _remove_from_multiple_matches(self, matches, index_name: str, force: bool):
|
||||
"""Handle removal when multiple matches are found"""
|
||||
@@ -585,19 +701,34 @@ Examples:
|
||||
for i, match in enumerate(matches, 1):
|
||||
project_path = match["project_path"]
|
||||
is_current = match["is_current"]
|
||||
kind = match.get("kind", "cli")
|
||||
|
||||
if is_current:
|
||||
print(f" {i}. 🏠 Current project")
|
||||
print(f" 📍 {project_path}")
|
||||
print(f" {i}. 🏠 Current project ({'CLI' if kind == 'cli' else 'APP'})")
|
||||
else:
|
||||
print(f" {i}. 📂 {project_path.name}")
|
||||
print(f" 📍 {project_path}")
|
||||
print(f" {i}. 📂 {project_path.name} ({'CLI' if kind == 'cli' else 'APP'})")
|
||||
|
||||
# Show path details
|
||||
if kind == "cli":
|
||||
print(f" 📍 {project_path / '.leann' / 'indexes' / index_name}")
|
||||
else:
|
||||
print(f" 📍 {match['meta_file']}")
|
||||
|
||||
# Show size info
|
||||
try:
|
||||
size_mb = sum(
|
||||
f.stat().st_size for f in match["index_dir"].iterdir() if f.is_file()
|
||||
) / (1024 * 1024)
|
||||
if kind == "cli":
|
||||
size_mb = sum(
|
||||
f.stat().st_size for f in match["index_dir"].iterdir() if f.is_file()
|
||||
) / (1024 * 1024)
|
||||
else:
|
||||
file_base = match.get("file_base")
|
||||
size_mb = 0.0
|
||||
if file_base:
|
||||
size_mb = sum(
|
||||
f.stat().st_size
|
||||
for f in match["files_dir"].glob(f"{file_base}.leann*")
|
||||
if f.is_file()
|
||||
) / (1024 * 1024)
|
||||
print(f" 📦 Size: {size_mb:.1f} MB")
|
||||
except (OSError, PermissionError):
|
||||
pass
|
||||
@@ -621,8 +752,8 @@ Examples:
|
||||
if 0 <= choice_idx < len(matches):
|
||||
selected_match = matches[choice_idx]
|
||||
project_path = selected_match["project_path"]
|
||||
index_dir = selected_match["index_dir"]
|
||||
is_current = selected_match["is_current"]
|
||||
kind = selected_match.get("kind", "cli")
|
||||
|
||||
location = "current project" if is_current else f"'{project_path.name}' project"
|
||||
print(f" 🎯 Selected: Remove from {location}")
|
||||
@@ -635,9 +766,22 @@ Examples:
|
||||
print(" ❌ Confirmation failed. Removal cancelled.")
|
||||
return False
|
||||
|
||||
return self._delete_index_directory(
|
||||
index_dir, index_name, project_path if not is_current else None
|
||||
)
|
||||
if kind == "cli":
|
||||
return self._delete_index_directory(
|
||||
selected_match["index_dir"],
|
||||
index_name,
|
||||
project_path if not is_current else None,
|
||||
is_app=False,
|
||||
)
|
||||
else:
|
||||
return self._delete_index_directory(
|
||||
selected_match["files_dir"],
|
||||
selected_match.get("display_name", index_name),
|
||||
project_path if not is_current else None,
|
||||
is_app=True,
|
||||
meta_file=selected_match.get("meta_file"),
|
||||
app_file_base=selected_match.get("file_base"),
|
||||
)
|
||||
else:
|
||||
print(" ❌ Invalid choice. Removal cancelled.")
|
||||
return False
|
||||
@@ -647,21 +791,65 @@ Examples:
|
||||
return False
|
||||
|
||||
def _delete_index_directory(
|
||||
self, index_dir: Path, index_name: str, project_path: Optional[Path] = None
|
||||
self,
|
||||
index_dir: Path,
|
||||
index_display_name: str,
|
||||
project_path: Optional[Path] = None,
|
||||
is_app: bool = False,
|
||||
meta_file: Optional[Path] = None,
|
||||
app_file_base: Optional[str] = None,
|
||||
):
|
||||
"""Actually delete the index directory"""
|
||||
"""Delete a CLI index directory or APP index files safely."""
|
||||
try:
|
||||
import shutil
|
||||
if is_app:
|
||||
removed = 0
|
||||
errors = 0
|
||||
# Delete only files that belong to this app index (based on file base)
|
||||
pattern_base = app_file_base or ""
|
||||
for f in index_dir.glob(f"{pattern_base}.leann*"):
|
||||
try:
|
||||
f.unlink()
|
||||
removed += 1
|
||||
except Exception:
|
||||
errors += 1
|
||||
# Best-effort: also remove the meta file if specified and still exists
|
||||
if meta_file and meta_file.exists():
|
||||
try:
|
||||
meta_file.unlink()
|
||||
removed += 1
|
||||
except Exception:
|
||||
errors += 1
|
||||
|
||||
shutil.rmtree(index_dir)
|
||||
|
||||
if project_path:
|
||||
print(f"✅ Index '{index_name}' removed from {project_path.name}")
|
||||
if removed > 0 and errors == 0:
|
||||
if project_path:
|
||||
print(
|
||||
f"✅ App index '{index_display_name}' removed from {project_path.name}"
|
||||
)
|
||||
else:
|
||||
print(f"✅ App index '{index_display_name}' removed successfully")
|
||||
return True
|
||||
elif removed > 0 and errors > 0:
|
||||
print(
|
||||
f"⚠️ App index '{index_display_name}' partially removed (some files couldn't be deleted)"
|
||||
)
|
||||
return True
|
||||
else:
|
||||
print(
|
||||
f"❌ No files found to remove for app index '{index_display_name}' in {index_dir}"
|
||||
)
|
||||
return False
|
||||
else:
|
||||
print(f"✅ Index '{index_name}' removed successfully")
|
||||
return True
|
||||
import shutil
|
||||
|
||||
shutil.rmtree(index_dir)
|
||||
|
||||
if project_path:
|
||||
print(f"✅ Index '{index_display_name}' removed from {project_path.name}")
|
||||
else:
|
||||
print(f"✅ Index '{index_display_name}' removed successfully")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ Error removing index '{index_name}': {e}")
|
||||
print(f"❌ Error removing index '{index_display_name}': {e}")
|
||||
return False
|
||||
|
||||
def load_documents(
|
||||
@@ -669,6 +857,7 @@ Examples:
|
||||
docs_paths: Union[str, list],
|
||||
custom_file_types: Union[str, None] = None,
|
||||
include_hidden: bool = False,
|
||||
args: Optional[dict[str, Any]] = None,
|
||||
):
|
||||
# Handle both single path (string) and multiple paths (list) for backward compatibility
|
||||
if isinstance(docs_paths, str):
|
||||
@@ -974,18 +1163,50 @@ Examples:
|
||||
}
|
||||
|
||||
print("start chunking documents")
|
||||
# Add progress bar for document chunking
|
||||
for doc in tqdm(documents, desc="Chunking documents", unit="doc"):
|
||||
# Check if this is a code file based on source path
|
||||
source_path = doc.metadata.get("source", "")
|
||||
is_code_file = any(source_path.endswith(ext) for ext in code_file_exts)
|
||||
|
||||
# Use appropriate parser based on file type
|
||||
parser = self.code_parser if is_code_file else self.node_parser
|
||||
nodes = parser.get_nodes_from_documents([doc])
|
||||
# Check if AST chunking is requested
|
||||
use_ast = getattr(args, "use_ast_chunking", False)
|
||||
|
||||
for node in nodes:
|
||||
all_texts.append(node.get_content())
|
||||
if use_ast:
|
||||
print("🧠 Using AST-aware chunking for code files")
|
||||
try:
|
||||
# Import enhanced chunking utilities
|
||||
# Add apps directory to path to import chunking utilities
|
||||
apps_dir = Path(__file__).parent.parent.parent.parent.parent / "apps"
|
||||
if apps_dir.exists():
|
||||
sys.path.insert(0, str(apps_dir))
|
||||
|
||||
from chunking import create_text_chunks
|
||||
|
||||
# Use enhanced chunking with AST support
|
||||
all_texts = create_text_chunks(
|
||||
documents,
|
||||
chunk_size=self.node_parser.chunk_size,
|
||||
chunk_overlap=self.node_parser.chunk_overlap,
|
||||
use_ast_chunking=True,
|
||||
ast_chunk_size=getattr(args, "ast_chunk_size", 768),
|
||||
ast_chunk_overlap=getattr(args, "ast_chunk_overlap", 96),
|
||||
code_file_extensions=None, # Use defaults
|
||||
ast_fallback_traditional=getattr(args, "ast_fallback_traditional", True),
|
||||
)
|
||||
|
||||
except ImportError as e:
|
||||
print(f"⚠️ AST chunking not available ({e}), falling back to traditional chunking")
|
||||
use_ast = False
|
||||
|
||||
if not use_ast:
|
||||
# Use traditional chunking logic
|
||||
for doc in tqdm(documents, desc="Chunking documents", unit="doc"):
|
||||
# Check if this is a code file based on source path
|
||||
source_path = doc.metadata.get("source", "")
|
||||
is_code_file = any(source_path.endswith(ext) for ext in code_file_exts)
|
||||
|
||||
# Use appropriate parser based on file type
|
||||
parser = self.code_parser if is_code_file else self.node_parser
|
||||
nodes = parser.get_nodes_from_documents([doc])
|
||||
|
||||
for node in nodes:
|
||||
all_texts.append(node.get_content())
|
||||
|
||||
print(f"Loaded {len(documents)} documents, {len(all_texts)} chunks")
|
||||
return all_texts
|
||||
@@ -1052,7 +1273,7 @@ Examples:
|
||||
)
|
||||
|
||||
all_texts = self.load_documents(
|
||||
docs_paths, args.file_types, include_hidden=args.include_hidden
|
||||
docs_paths, args.file_types, include_hidden=args.include_hidden, args=args
|
||||
)
|
||||
if not all_texts:
|
||||
print("No documents found")
|
||||
@@ -1085,13 +1306,101 @@ Examples:
|
||||
async def search_documents(self, args):
|
||||
index_name = args.index_name
|
||||
query = args.query
|
||||
index_path = self.get_index_path(index_name)
|
||||
|
||||
if not self.index_exists(index_name):
|
||||
print(
|
||||
f"Index '{index_name}' not found. Use 'leann build {index_name} --docs <dir> [<dir2> ...]' to create it."
|
||||
)
|
||||
return
|
||||
# First try to find the index in current project
|
||||
index_path = self.get_index_path(index_name)
|
||||
if self.index_exists(index_name):
|
||||
# Found in current project, use it
|
||||
pass
|
||||
else:
|
||||
# Search across all registered projects (like list_indexes does)
|
||||
all_matches = self._find_all_matching_indexes(index_name)
|
||||
if not all_matches:
|
||||
print(
|
||||
f"Index '{index_name}' not found. Use 'leann build {index_name} --docs <dir> [<dir2> ...]' to create it."
|
||||
)
|
||||
return
|
||||
elif len(all_matches) == 1:
|
||||
# Found exactly one match, use it
|
||||
match = all_matches[0]
|
||||
if match["kind"] == "cli":
|
||||
index_path = str(match["index_dir"] / "documents.leann")
|
||||
else:
|
||||
# App format: use the meta file to construct the path
|
||||
meta_file = match["meta_file"]
|
||||
file_base = match["file_base"]
|
||||
index_path = str(meta_file.parent / f"{file_base}.leann")
|
||||
|
||||
project_info = (
|
||||
"current project"
|
||||
if match["is_current"]
|
||||
else f"project '{match['project_path'].name}'"
|
||||
)
|
||||
print(f"Using index '{index_name}' from {project_info}")
|
||||
else:
|
||||
# Multiple matches found
|
||||
if args.non_interactive:
|
||||
# Non-interactive mode: automatically select the best match
|
||||
# Priority: current project first, then first available
|
||||
current_matches = [m for m in all_matches if m["is_current"]]
|
||||
if current_matches:
|
||||
match = current_matches[0]
|
||||
location_desc = "current project"
|
||||
else:
|
||||
match = all_matches[0]
|
||||
location_desc = f"project '{match['project_path'].name}'"
|
||||
|
||||
if match["kind"] == "cli":
|
||||
index_path = str(match["index_dir"] / "documents.leann")
|
||||
else:
|
||||
meta_file = match["meta_file"]
|
||||
file_base = match["file_base"]
|
||||
index_path = str(meta_file.parent / f"{file_base}.leann")
|
||||
|
||||
print(
|
||||
f"Found {len(all_matches)} indexes named '{index_name}', using index from {location_desc}"
|
||||
)
|
||||
else:
|
||||
# Interactive mode: ask user to choose
|
||||
print(f"Found {len(all_matches)} indexes named '{index_name}':")
|
||||
for i, match in enumerate(all_matches, 1):
|
||||
project_path = match["project_path"]
|
||||
is_current = match["is_current"]
|
||||
kind = match.get("kind", "cli")
|
||||
|
||||
if is_current:
|
||||
print(
|
||||
f" {i}. 🏠 Current project ({'CLI' if kind == 'cli' else 'APP'})"
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f" {i}. 📂 {project_path.name} ({'CLI' if kind == 'cli' else 'APP'})"
|
||||
)
|
||||
|
||||
try:
|
||||
choice = input(f"Which index to search? (1-{len(all_matches)}): ").strip()
|
||||
choice_idx = int(choice) - 1
|
||||
if 0 <= choice_idx < len(all_matches):
|
||||
match = all_matches[choice_idx]
|
||||
if match["kind"] == "cli":
|
||||
index_path = str(match["index_dir"] / "documents.leann")
|
||||
else:
|
||||
meta_file = match["meta_file"]
|
||||
file_base = match["file_base"]
|
||||
index_path = str(meta_file.parent / f"{file_base}.leann")
|
||||
|
||||
project_info = (
|
||||
"current project"
|
||||
if match["is_current"]
|
||||
else f"project '{match['project_path'].name}'"
|
||||
)
|
||||
print(f"Using index '{index_name}' from {project_info}")
|
||||
else:
|
||||
print("Invalid choice. Aborting search.")
|
||||
return
|
||||
except (ValueError, KeyboardInterrupt):
|
||||
print("Invalid input. Aborting search.")
|
||||
return
|
||||
|
||||
searcher = LeannSearcher(index_path=index_path)
|
||||
results = searcher.search(
|
||||
|
||||
@@ -192,6 +192,7 @@ class EmbeddingServerManager:
|
||||
stderr_target = None # Direct to console for visible logs
|
||||
|
||||
# Start embedding server subprocess
|
||||
logger.info(f"Starting server process with command: {' '.join(command)}")
|
||||
self.server_process = subprocess.Popen(
|
||||
command,
|
||||
cwd=project_root,
|
||||
|
||||
@@ -94,7 +94,7 @@ def handle_request(request):
|
||||
},
|
||||
}
|
||||
|
||||
# Build simplified command
|
||||
# Build simplified command with non-interactive flag for MCP compatibility
|
||||
cmd = [
|
||||
"leann",
|
||||
"search",
|
||||
@@ -102,6 +102,7 @@ def handle_request(request):
|
||||
args["query"],
|
||||
f"--top-k={args.get('top_k', 5)}",
|
||||
f"--complexity={args.get('complexity', 32)}",
|
||||
"--non-interactive",
|
||||
]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "leann"
|
||||
version = "0.3.0"
|
||||
version = "0.3.2"
|
||||
description = "LEANN - The smallest vector index in the world. RAG Everything with LEANN!"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.9"
|
||||
|
||||
Reference in New Issue
Block a user