Compare commits

..

1 Commits

Author SHA1 Message Date
yichuan520030910320
cd343e1d7a style: apply ruff format 2025-08-15 17:44:50 -07:00
5 changed files with 34 additions and 38 deletions

View File

@@ -426,21 +426,21 @@ Once the index is built, you can ask questions like:
**The future of code assistance is here.** Transform your development workflow with LEANN's native MCP integration for Claude Code. Index your entire codebase and get intelligent code assistance directly in your IDE. **The future of code assistance is here.** Transform your development workflow with LEANN's native MCP integration for Claude Code. Index your entire codebase and get intelligent code assistance directly in your IDE.
**Key features:** **Key features:**
- 🔍 **Semantic code search** across your entire project, fully local index and lightweight - 🔍 **Semantic code search** across your entire project
- 📚 **Context-aware assistance** for debugging and development - 📚 **Context-aware assistance** for debugging and development
- 🚀 **Zero-config setup** with automatic language detection - 🚀 **Zero-config setup** with automatic language detection
```bash ```bash
# Install LEANN globally for MCP integration # Install LEANN globally for MCP integration
uv tool install leann-core --with leann uv tool install leann-core
claude mcp add --scope user leann-server -- leann_mcp
# Setup is automatic - just start using Claude Code! # Setup is automatic - just start using Claude Code!
``` ```
Try our fully agentic pipeline with auto query rewriting, semantic search planning, and more: Try our fully agentic pipeline with auto query rewriting, semantic search planning, and more:
![LEANN MCP Integration](assets/mcp_leann.png) ![LEANN MCP Integration](assets/mcp_leann.png)
**🔥 Ready to supercharge your coding?** [Complete Setup Guide →](packages/leann-mcp/README.md) **Ready to supercharge your coding?** [Complete Setup Guide →](packages/leann-mcp/README.md)
## 🖥️ Command Line Interface ## 🖥️ Command Line Interface
@@ -457,8 +457,7 @@ leann --help
**To make it globally available:** **To make it globally available:**
```bash ```bash
# Install the LEANN CLI globally using uv tool # Install the LEANN CLI globally using uv tool
uv tool install leann-core --with leann uv tool install leann-core
# Now you can use leann from anywhere without activating venv # Now you can use leann from anywhere without activating venv
leann --help leann --help

View File

@@ -306,23 +306,6 @@ class LeannBuilder:
def build_index(self, index_path: str): def build_index(self, index_path: str):
if not self.chunks: if not self.chunks:
raise ValueError("No chunks added.") raise ValueError("No chunks added.")
# Filter out invalid/empty text chunks early to keep passage and embedding counts aligned
valid_chunks: list[dict[str, Any]] = []
skipped = 0
for chunk in self.chunks:
text = chunk.get("text", "")
if isinstance(text, str) and text.strip():
valid_chunks.append(chunk)
else:
skipped += 1
if skipped > 0:
print(
f"Warning: Skipping {skipped} empty/invalid text chunk(s). Processing {len(valid_chunks)} valid chunks"
)
self.chunks = valid_chunks
if not self.chunks:
raise ValueError("All provided chunks are empty or invalid. Nothing to index.")
if self.dimensions is None: if self.dimensions is None:
self.dimensions = len( self.dimensions = len(
compute_embeddings( compute_embeddings(

View File

@@ -148,6 +148,12 @@ Examples:
type=str, type=str,
help="Comma-separated list of file extensions to include (e.g., '.txt,.pdf,.pptx'). If not specified, uses default supported types.", help="Comma-separated list of file extensions to include (e.g., '.txt,.pdf,.pptx'). If not specified, uses default supported types.",
) )
build_parser.add_argument(
"--include-hidden",
action=argparse.BooleanOptionalAction,
default=False,
help="Include hidden files and directories (paths starting with '.') during indexing (default: false)",
)
build_parser.add_argument( build_parser.add_argument(
"--doc-chunk-size", "--doc-chunk-size",
type=int, type=int,
@@ -411,7 +417,10 @@ Examples:
print(f" leann ask {example_name} --interactive") print(f" leann ask {example_name} --interactive")
def load_documents( def load_documents(
self, docs_paths: Union[str, list], custom_file_types: Union[str, None] = None self,
docs_paths: Union[str, list],
custom_file_types: Union[str, None] = None,
include_hidden: bool = False,
): ):
# Handle both single path (string) and multiple paths (list) for backward compatibility # Handle both single path (string) and multiple paths (list) for backward compatibility
if isinstance(docs_paths, str): if isinstance(docs_paths, str):
@@ -455,6 +464,10 @@ Examples:
all_documents = [] all_documents = []
# Helper to detect hidden path components
def _path_has_hidden_segment(p: Path) -> bool:
return any(part.startswith(".") and part not in [".", ".."] for part in p.parts)
# First, process individual files if any # First, process individual files if any
if files: if files:
print(f"\n🔄 Processing {len(files)} individual file{'s' if len(files) > 1 else ''}...") print(f"\n🔄 Processing {len(files)} individual file{'s' if len(files) > 1 else ''}...")
@@ -467,8 +480,12 @@ Examples:
files_by_dir = defaultdict(list) files_by_dir = defaultdict(list)
for file_path in files: for file_path in files:
parent_dir = str(Path(file_path).parent) file_path_obj = Path(file_path)
files_by_dir[parent_dir].append(file_path) if not include_hidden and _path_has_hidden_segment(file_path_obj):
print(f" ⚠️ Skipping hidden file: {file_path}")
continue
parent_dir = str(file_path_obj.parent)
files_by_dir[parent_dir].append(str(file_path_obj))
# Load files from each parent directory # Load files from each parent directory
for parent_dir, file_list in files_by_dir.items(): for parent_dir, file_list in files_by_dir.items():
@@ -479,6 +496,7 @@ Examples:
file_docs = SimpleDirectoryReader( file_docs = SimpleDirectoryReader(
parent_dir, parent_dir,
input_files=file_list, input_files=file_list,
# exclude_hidden only affects directory scans; input_files are explicit
filename_as_id=True, filename_as_id=True,
).load_data() ).load_data()
all_documents.extend(file_docs) all_documents.extend(file_docs)
@@ -577,6 +595,8 @@ Examples:
# Check if file matches any exclude pattern # Check if file matches any exclude pattern
try: try:
relative_path = file_path.relative_to(docs_path) relative_path = file_path.relative_to(docs_path)
if not include_hidden and _path_has_hidden_segment(relative_path):
continue
if self._should_exclude_file(relative_path, gitignore_matches): if self._should_exclude_file(relative_path, gitignore_matches):
continue continue
except ValueError: except ValueError:
@@ -604,6 +624,7 @@ Examples:
try: try:
default_docs = SimpleDirectoryReader( default_docs = SimpleDirectoryReader(
str(file_path.parent), str(file_path.parent),
exclude_hidden=not include_hidden,
filename_as_id=True, filename_as_id=True,
required_exts=[file_path.suffix], required_exts=[file_path.suffix],
).load_data() ).load_data()
@@ -632,6 +653,7 @@ Examples:
encoding="utf-8", encoding="utf-8",
required_exts=code_extensions, required_exts=code_extensions,
file_extractor={}, # Use default extractors file_extractor={}, # Use default extractors
exclude_hidden=not include_hidden,
filename_as_id=True, filename_as_id=True,
).load_data(show_progress=True) ).load_data(show_progress=True)
@@ -781,7 +803,9 @@ Examples:
paragraph_separator="\n\n", paragraph_separator="\n\n",
) )
all_texts = self.load_documents(docs_paths, args.file_types) all_texts = self.load_documents(
docs_paths, args.file_types, include_hidden=args.include_hidden
)
if not all_texts: if not all_texts:
print("No documents found") print("No documents found")
return return

View File

@@ -244,16 +244,6 @@ def compute_embeddings_openai(texts: list[str], model_name: str) -> np.ndarray:
except ImportError as e: except ImportError as e:
raise ImportError(f"OpenAI package not installed: {e}") raise ImportError(f"OpenAI package not installed: {e}")
# Validate input list
if not texts:
raise ValueError("Cannot compute embeddings for empty text list")
# Extra validation: abort early if any item is empty/whitespace
invalid_count = sum(1 for t in texts if not isinstance(t, str) or not t.strip())
if invalid_count > 0:
raise ValueError(
f"Found {invalid_count} empty/invalid text(s) in input. Upstream should filter before calling OpenAI."
)
api_key = os.getenv("OPENAI_API_KEY") api_key = os.getenv("OPENAI_API_KEY")
if not api_key: if not api_key:
raise RuntimeError("OPENAI_API_KEY environment variable not set") raise RuntimeError("OPENAI_API_KEY environment variable not set")

View File

@@ -92,7 +92,7 @@ leann build docs-and-configs --docs $(git ls-files "*.md" "*.yml" "*.yaml" "*.js
``` ```
## **Try this in Claude Code:** **Try this in Claude Code:**
``` ```
Help me understand this codebase. List available indexes and search for authentication patterns. Help me understand this codebase. List available indexes and search for authentication patterns.
``` ```