Compare commits
24 Commits
debug/clea
...
v0.3.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0174ba5571 | ||
|
|
03af82d695 | ||
|
|
738f1dbab8 | ||
|
|
37d990d51c | ||
|
|
a6f07a54f1 | ||
|
|
46905e0687 | ||
|
|
838ade231e | ||
|
|
da6540decd | ||
|
|
39e18a7c11 | ||
|
|
6bde28584b | ||
|
|
f62632c41f | ||
|
|
27708243ca | ||
|
|
9a1e4652ca | ||
|
|
14e84d9e2d | ||
|
|
2dcfca19ff | ||
|
|
bee2167ee3 | ||
|
|
ef980d70b3 | ||
|
|
db3c63c441 | ||
|
|
00eeadb9dd | ||
|
|
42c8370709 | ||
|
|
fafdf8fcbe | ||
|
|
21f7d8e031 | ||
|
|
46565b9249 | ||
|
|
3dad76126a |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +0,0 @@
|
||||
paper_plot/data/big_graph_degree_data.npz filter=lfs diff=lfs merge=lfs -text
|
||||
60
.github/workflows/build-reusable.yml
vendored
60
.github/workflows/build-reusable.yml
vendored
@@ -87,7 +87,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{ inputs.ref }}
|
||||
submodules: recursive
|
||||
@@ -98,21 +98,23 @@ jobs:
|
||||
python-version: ${{ matrix.python }}
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
uses: astral-sh/setup-uv@v6
|
||||
|
||||
- name: Install system dependencies (Ubuntu)
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libomp-dev libboost-all-dev protobuf-compiler libzmq3-dev \
|
||||
pkg-config libopenblas-dev patchelf libabsl-dev libaio-dev libprotobuf-dev
|
||||
pkg-config libabsl-dev libaio-dev libprotobuf-dev \
|
||||
patchelf
|
||||
|
||||
# Install Intel MKL for DiskANN
|
||||
wget -q https://registrationcenter-download.intel.com/akdlm/IRC_NAS/79153e0f-74d7-45af-b8c2-258941adf58a/intel-onemkl-2025.0.0.940.sh
|
||||
sudo sh intel-onemkl-2025.0.0.940.sh -a --components intel.oneapi.lin.mkl.devel --action install --eula accept -s
|
||||
source /opt/intel/oneapi/setvars.sh
|
||||
echo "MKLROOT=/opt/intel/oneapi/mkl/latest" >> $GITHUB_ENV
|
||||
echo "LD_LIBRARY_PATH=/opt/intel/oneapi/mkl/latest/lib/intel64:$LD_LIBRARY_PATH" >> $GITHUB_ENV
|
||||
echo "LD_LIBRARY_PATH=/opt/intel/oneapi/compiler/latest/linux/compiler/lib/intel64_lin" >> $GITHUB_ENV
|
||||
echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/oneapi/mkl/latest/lib/intel64" >> $GITHUB_ENV
|
||||
|
||||
- name: Install system dependencies (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
@@ -304,3 +306,53 @@ jobs:
|
||||
with:
|
||||
name: packages-${{ matrix.os }}-py${{ matrix.python }}
|
||||
path: packages/*/dist/
|
||||
|
||||
|
||||
arch-smoke:
|
||||
name: Arch Linux smoke test (install & import)
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: archlinux:latest
|
||||
|
||||
steps:
|
||||
- name: Prepare system
|
||||
run: |
|
||||
pacman -Syu --noconfirm
|
||||
pacman -S --noconfirm python python-pip gcc git zlib openssl
|
||||
|
||||
- name: Download ALL wheel artifacts from this run
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
# Don't specify name, download all artifacts
|
||||
path: ./wheels
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
|
||||
- name: Create virtual environment and install wheels
|
||||
run: |
|
||||
uv venv
|
||||
source .venv/bin/activate || source .venv/Scripts/activate
|
||||
uv pip install --find-links wheels leann-core
|
||||
uv pip install --find-links wheels leann-backend-hnsw
|
||||
uv pip install --find-links wheels leann-backend-diskann
|
||||
uv pip install --find-links wheels leann
|
||||
|
||||
- name: Import & tiny runtime check
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
MKL_NUM_THREADS: 1
|
||||
run: |
|
||||
source .venv/bin/activate || source .venv/Scripts/activate
|
||||
python - <<'PY'
|
||||
import leann
|
||||
import leann_backend_hnsw as h
|
||||
import leann_backend_diskann as d
|
||||
from leann import LeannBuilder, LeannSearcher
|
||||
b = LeannBuilder(backend_name="hnsw")
|
||||
b.add_text("hello arch")
|
||||
b.build_index("arch_demo.leann")
|
||||
s = LeannSearcher("arch_demo.leann")
|
||||
print("search:", s.search("hello", top_k=1))
|
||||
PY
|
||||
|
||||
2
.github/workflows/link-check.yml
vendored
2
.github/workflows/link-check.yml
vendored
@@ -14,6 +14,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: lycheeverse/lychee-action@v2
|
||||
with:
|
||||
args: --no-progress --insecure README.md docs/ apps/ examples/ benchmarks/
|
||||
args: --no-progress --insecure --user-agent 'curl/7.68.0' README.md docs/ apps/ examples/ benchmarks/
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -18,6 +18,7 @@ demo/experiment_results/**/*.json
|
||||
*.eml
|
||||
*.emlx
|
||||
*.json
|
||||
!.vscode/*.json
|
||||
*.sh
|
||||
*.txt
|
||||
!CMakeLists.txt
|
||||
|
||||
5
.vscode/extensions.json
vendored
Normal file
5
.vscode/extensions.json
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"charliermarsh.ruff",
|
||||
]
|
||||
}
|
||||
22
.vscode/settings.json
vendored
Normal file
22
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"python.defaultInterpreterPath": ".venv/bin/python",
|
||||
"python.terminal.activateEnvironment": true,
|
||||
"[python]": {
|
||||
"editor.defaultFormatter": "charliermarsh.ruff",
|
||||
"editor.formatOnSave": true,
|
||||
"editor.codeActionsOnSave": {
|
||||
"source.organizeImports": "explicit",
|
||||
"source.fixAll": "explicit"
|
||||
},
|
||||
"editor.insertSpaces": true,
|
||||
"editor.tabSize": 4
|
||||
},
|
||||
"ruff.enable": true,
|
||||
"files.watcherExclude": {
|
||||
"**/.venv/**": true,
|
||||
"**/__pycache__/**": true,
|
||||
"**/*.egg-info/**": true,
|
||||
"**/build/**": true,
|
||||
"**/dist/**": true
|
||||
}
|
||||
}
|
||||
160
README.md
160
README.md
@@ -5,7 +5,7 @@
|
||||
<p align="center">
|
||||
<img src="https://img.shields.io/badge/Python-3.9%20%7C%203.10%20%7C%203.11%20%7C%203.12%20%7C%203.13-blue.svg" alt="Python Versions">
|
||||
<img src="https://github.com/yichuan-w/LEANN/actions/workflows/build-and-publish.yml/badge.svg" alt="CI Status">
|
||||
<img src="https://img.shields.io/badge/Platform-Ubuntu%20%7C%20macOS%20(ARM64%2FIntel)-lightgrey" alt="Platform">
|
||||
<img src="https://img.shields.io/badge/Platform-Ubuntu%20%26%20Arch%20%26%20WSL%20%7C%20macOS%20(ARM64%2FIntel)-lightgrey" alt="Platform">
|
||||
<img src="https://img.shields.io/badge/License-MIT-green.svg" alt="MIT License">
|
||||
<img src="https://img.shields.io/badge/MCP-Native%20Integration-blue" alt="MCP Integration">
|
||||
</p>
|
||||
@@ -31,7 +31,7 @@ LEANN achieves this through *graph-based selective recomputation* with *high-deg
|
||||
<img src="assets/effects.png" alt="LEANN vs Traditional Vector DB Storage Comparison" width="70%">
|
||||
</p>
|
||||
|
||||
> **The numbers speak for themselves:** Index 60 million text chunks in just 6GB instead of 201GB. From emails to browser history, everything fits on your laptop. [See detailed benchmarks for different applications below ↓](#storage-comparison)
|
||||
> **The numbers speak for themselves:** Index 60 million text chunks in just 6GB instead of 201GB. From emails to browser history, everything fits on your laptop. [See detailed benchmarks for different applications below ↓](#-storage-comparison)
|
||||
|
||||
|
||||
🔒 **Privacy:** Your data never leaves your laptop. No OpenAI, no cloud, no "terms of service".
|
||||
@@ -70,6 +70,8 @@ uv venv
|
||||
source .venv/bin/activate
|
||||
uv pip install leann
|
||||
```
|
||||
<!--
|
||||
> Low-resource? See “Low-resource setups” in the [Configuration Guide](docs/configuration-guide.md#low-resource-setups). -->
|
||||
|
||||
<details>
|
||||
<summary>
|
||||
@@ -85,15 +87,60 @@ git submodule update --init --recursive
|
||||
```
|
||||
|
||||
**macOS:**
|
||||
|
||||
Note: DiskANN requires MacOS 13.3 or later.
|
||||
|
||||
```bash
|
||||
brew install llvm libomp boost protobuf zeromq pkgconf
|
||||
CC=$(brew --prefix llvm)/bin/clang CXX=$(brew --prefix llvm)/bin/clang++ uv sync
|
||||
brew install libomp boost protobuf zeromq pkgconf
|
||||
uv sync --extra diskann
|
||||
```
|
||||
|
||||
**Linux:**
|
||||
**Linux (Ubuntu/Debian):**
|
||||
|
||||
Note: On Ubuntu 20.04, you may need to build a newer Abseil and pin Protobuf (e.g., v3.20.x) for building DiskANN. See [Issue #30](https://github.com/yichuan-w/LEANN/issues/30) for a step-by-step note.
|
||||
|
||||
You can manually install [Intel oneAPI MKL](https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl.html) instead of `libmkl-full-dev` for DiskANN. You can also use `libopenblas-dev` for building HNSW only, by removing `--extra diskann` in the command below.
|
||||
|
||||
```bash
|
||||
sudo apt-get install libomp-dev libboost-all-dev protobuf-compiler libabsl-dev libmkl-full-dev libaio-dev libzmq3-dev
|
||||
uv sync
|
||||
sudo apt-get update && sudo apt-get install -y \
|
||||
libomp-dev libboost-all-dev protobuf-compiler libzmq3-dev \
|
||||
pkg-config libabsl-dev libaio-dev libprotobuf-dev \
|
||||
libmkl-full-dev
|
||||
|
||||
uv sync --extra diskann
|
||||
```
|
||||
|
||||
**Linux (Arch Linux):**
|
||||
|
||||
```bash
|
||||
sudo pacman -Syu && sudo pacman -S --needed base-devel cmake pkgconf git gcc \
|
||||
boost boost-libs protobuf abseil-cpp libaio zeromq
|
||||
|
||||
# For MKL in DiskANN
|
||||
sudo pacman -S --needed base-devel git
|
||||
git clone https://aur.archlinux.org/paru-bin.git
|
||||
cd paru-bin && makepkg -si
|
||||
paru -S intel-oneapi-mkl intel-oneapi-compiler
|
||||
source /opt/intel/oneapi/setvars.sh
|
||||
|
||||
uv sync --extra diskann
|
||||
```
|
||||
|
||||
**Linux (RHEL / CentOS Stream / Oracle / Rocky / AlmaLinux):**
|
||||
|
||||
See [Issue #50](https://github.com/yichuan-w/LEANN/issues/50) for more details.
|
||||
|
||||
```bash
|
||||
sudo dnf groupinstall -y "Development Tools"
|
||||
sudo dnf install -y libomp-devel boost-devel protobuf-compiler protobuf-devel \
|
||||
abseil-cpp-devel libaio-devel zeromq-devel pkgconf-pkg-config
|
||||
|
||||
# For MKL in DiskANN
|
||||
sudo dnf install -y intel-oneapi-mkl intel-oneapi-mkl-devel \
|
||||
intel-oneapi-openmp || sudo dnf install -y intel-oneapi-compiler
|
||||
source /opt/intel/oneapi/setvars.sh
|
||||
|
||||
uv sync --extra diskann
|
||||
```
|
||||
|
||||
</details>
|
||||
@@ -184,34 +231,34 @@ All RAG examples share these common parameters. **Interactive mode** is availabl
|
||||
|
||||
```bash
|
||||
# Core Parameters (General preprocessing for all examples)
|
||||
--index-dir DIR # Directory to store the index (default: current directory)
|
||||
--query "YOUR QUESTION" # Single query mode. Omit for interactive chat (type 'quit' to exit), and now you can play with your index interactively
|
||||
--max-items N # Limit data preprocessing (default: -1, process all data)
|
||||
--force-rebuild # Force rebuild index even if it exists
|
||||
--index-dir DIR # Directory to store the index (default: current directory)
|
||||
--query "YOUR QUESTION" # Single query mode. Omit for interactive chat (type 'quit' to exit), and now you can play with your index interactively
|
||||
--max-items N # Limit data preprocessing (default: -1, process all data)
|
||||
--force-rebuild # Force rebuild index even if it exists
|
||||
|
||||
# Embedding Parameters
|
||||
--embedding-model MODEL # e.g., facebook/contriever, text-embedding-3-small, nomic-embed-text,mlx-community/Qwen3-Embedding-0.6B-8bit or nomic-embed-text
|
||||
--embedding-mode MODE # sentence-transformers, openai, mlx, or ollama
|
||||
--embedding-model MODEL # e.g., facebook/contriever, text-embedding-3-small, mlx-community/Qwen3-Embedding-0.6B-8bit or nomic-embed-text
|
||||
--embedding-mode MODE # sentence-transformers, openai, mlx, or ollama
|
||||
|
||||
# LLM Parameters (Text generation models)
|
||||
--llm TYPE # LLM backend: openai, ollama, or hf (default: openai)
|
||||
--llm-model MODEL # Model name (default: gpt-4o) e.g., gpt-4o-mini, llama3.2:1b, Qwen/Qwen2.5-1.5B-Instruct
|
||||
--thinking-budget LEVEL # Thinking budget for reasoning models: low/medium/high (supported by o3, o3-mini, GPT-Oss:20b, and other reasoning models)
|
||||
--llm TYPE # LLM backend: openai, ollama, or hf (default: openai)
|
||||
--llm-model MODEL # Model name (default: gpt-4o) e.g., gpt-4o-mini, llama3.2:1b, Qwen/Qwen2.5-1.5B-Instruct
|
||||
--thinking-budget LEVEL # Thinking budget for reasoning models: low/medium/high (supported by o3, o3-mini, GPT-Oss:20b, and other reasoning models)
|
||||
|
||||
# Search Parameters
|
||||
--top-k N # Number of results to retrieve (default: 20)
|
||||
--search-complexity N # Search complexity for graph traversal (default: 32)
|
||||
--top-k N # Number of results to retrieve (default: 20)
|
||||
--search-complexity N # Search complexity for graph traversal (default: 32)
|
||||
|
||||
# Chunking Parameters
|
||||
--chunk-size N # Size of text chunks (default varies by source: 256 for most, 192 for WeChat)
|
||||
--chunk-overlap N # Overlap between chunks (default varies: 25-128 depending on source)
|
||||
--chunk-size N # Size of text chunks (default varies by source: 256 for most, 192 for WeChat)
|
||||
--chunk-overlap N # Overlap between chunks (default varies: 25-128 depending on source)
|
||||
|
||||
# Index Building Parameters
|
||||
--backend-name NAME # Backend to use: hnsw or diskann (default: hnsw)
|
||||
--graph-degree N # Graph degree for index construction (default: 32)
|
||||
--build-complexity N # Build complexity for index construction (default: 64)
|
||||
--no-compact # Disable compact index storage (compact storage IS enabled to save storage by default)
|
||||
--no-recompute # Disable embedding recomputation (recomputation IS enabled to save storage by default)
|
||||
--backend-name NAME # Backend to use: hnsw or diskann (default: hnsw)
|
||||
--graph-degree N # Graph degree for index construction (default: 32)
|
||||
--build-complexity N # Build complexity for index construction (default: 64)
|
||||
--compact / --no-compact # Use compact storage (default: true). Must be `no-compact` for `no-recompute` build.
|
||||
--recompute / --no-recompute # Enable/disable embedding recomputation (default: enabled). Should not do a `no-recompute` search in a `recompute` build.
|
||||
```
|
||||
|
||||
</details>
|
||||
@@ -424,21 +471,21 @@ Once the index is built, you can ask questions like:
|
||||
**The future of code assistance is here.** Transform your development workflow with LEANN's native MCP integration for Claude Code. Index your entire codebase and get intelligent code assistance directly in your IDE.
|
||||
|
||||
**Key features:**
|
||||
- 🔍 **Semantic code search** across your entire project
|
||||
- 🔍 **Semantic code search** across your entire project, fully local index and lightweight
|
||||
- 📚 **Context-aware assistance** for debugging and development
|
||||
- 🚀 **Zero-config setup** with automatic language detection
|
||||
|
||||
```bash
|
||||
# Install LEANN globally for MCP integration
|
||||
uv tool install leann-core
|
||||
|
||||
uv tool install leann-core --with leann
|
||||
claude mcp add --scope user leann-server -- leann_mcp
|
||||
# Setup is automatic - just start using Claude Code!
|
||||
```
|
||||
Try our fully agentic pipeline with auto query rewriting, semantic search planning, and more:
|
||||
|
||||

|
||||
|
||||
**Ready to supercharge your coding?** [Complete Setup Guide →](packages/leann-mcp/README.md)
|
||||
**🔥 Ready to supercharge your coding?** [Complete Setup Guide →](packages/leann-mcp/README.md)
|
||||
|
||||
## 🖥️ Command Line Interface
|
||||
|
||||
@@ -455,7 +502,8 @@ leann --help
|
||||
**To make it globally available:**
|
||||
```bash
|
||||
# Install the LEANN CLI globally using uv tool
|
||||
uv tool install leann-core
|
||||
uv tool install leann-core --with leann
|
||||
|
||||
|
||||
# Now you can use leann from anywhere without activating venv
|
||||
leann --help
|
||||
@@ -479,30 +527,35 @@ leann ask my-docs --interactive
|
||||
|
||||
# List all your indexes
|
||||
leann list
|
||||
|
||||
# Remove an index
|
||||
leann remove my-docs
|
||||
```
|
||||
|
||||
**Key CLI features:**
|
||||
- Auto-detects document formats (PDF, TXT, MD, DOCX)
|
||||
- Auto-detects document formats (PDF, TXT, MD, DOCX, PPTX + code files)
|
||||
- Smart text chunking with overlap
|
||||
- Multiple LLM providers (Ollama, OpenAI, HuggingFace)
|
||||
- Organized index storage in `~/.leann/indexes/`
|
||||
- Organized index storage in `.leann/indexes/` (project-local)
|
||||
- Support for advanced search parameters
|
||||
|
||||
<details>
|
||||
<summary><strong>📋 Click to expand: Complete CLI Reference</strong></summary>
|
||||
|
||||
You can use `leann --help`, or `leann build --help`, `leann search --help`, `leann ask --help`, `leann list --help`, `leann remove --help` to get the complete CLI reference.
|
||||
|
||||
**Build Command:**
|
||||
```bash
|
||||
leann build INDEX_NAME --docs DIRECTORY [OPTIONS]
|
||||
leann build INDEX_NAME --docs DIRECTORY|FILE [DIRECTORY|FILE ...] [OPTIONS]
|
||||
|
||||
Options:
|
||||
--backend {hnsw,diskann} Backend to use (default: hnsw)
|
||||
--embedding-model MODEL Embedding model (default: facebook/contriever)
|
||||
--graph-degree N Graph degree (default: 32)
|
||||
--complexity N Build complexity (default: 64)
|
||||
--force Force rebuild existing index
|
||||
--compact Use compact storage (default: true)
|
||||
--recompute Enable recomputation (default: true)
|
||||
--graph-degree N Graph degree (default: 32)
|
||||
--complexity N Build complexity (default: 64)
|
||||
--force Force rebuild existing index
|
||||
--compact / --no-compact Use compact storage (default: true). Must be `no-compact` for `no-recompute` build.
|
||||
--recompute / --no-recompute Enable recomputation (default: true)
|
||||
```
|
||||
|
||||
**Search Command:**
|
||||
@@ -510,9 +563,9 @@ Options:
|
||||
leann search INDEX_NAME QUERY [OPTIONS]
|
||||
|
||||
Options:
|
||||
--top-k N Number of results (default: 5)
|
||||
--complexity N Search complexity (default: 64)
|
||||
--recompute-embeddings Use recomputation for highest accuracy
|
||||
--top-k N Number of results (default: 5)
|
||||
--complexity N Search complexity (default: 64)
|
||||
--recompute / --no-recompute Enable/disable embedding recomputation (default: enabled). Should not do a `no-recompute` search in a `recompute` build.
|
||||
--pruning-strategy {global,local,proportional}
|
||||
```
|
||||
|
||||
@@ -527,6 +580,31 @@ Options:
|
||||
--top-k N Retrieval count (default: 20)
|
||||
```
|
||||
|
||||
**List Command:**
|
||||
```bash
|
||||
leann list
|
||||
|
||||
# Lists all indexes across all projects with status indicators:
|
||||
# ✅ - Index is complete and ready to use
|
||||
# ❌ - Index is incomplete or corrupted
|
||||
# 📁 - CLI-created index (in .leann/indexes/)
|
||||
# 📄 - App-created index (*.leann.meta.json files)
|
||||
```
|
||||
|
||||
**Remove Command:**
|
||||
```bash
|
||||
leann remove INDEX_NAME [OPTIONS]
|
||||
|
||||
Options:
|
||||
--force, -f Force removal without confirmation
|
||||
|
||||
# Smart removal: automatically finds and safely removes indexes
|
||||
# - Shows all matching indexes across projects
|
||||
# - Requires confirmation for cross-project removal
|
||||
# - Interactive selection when multiple matches found
|
||||
# - Supports both CLI and app-created indexes
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## 🏗️ Architecture & How It Works
|
||||
|
||||
@@ -10,6 +10,7 @@ from typing import Any
|
||||
|
||||
import dotenv
|
||||
from leann.api import LeannBuilder, LeannChat
|
||||
from leann.registry import register_project_directory
|
||||
from llama_index.core.node_parser import SentenceSplitter
|
||||
|
||||
dotenv.load_dotenv()
|
||||
@@ -69,14 +70,14 @@ class BaseRAGExample(ABC):
|
||||
"--embedding-model",
|
||||
type=str,
|
||||
default=embedding_model_default,
|
||||
help=f"Embedding model to use (default: {embedding_model_default})",
|
||||
help=f"Embedding model to use (default: {embedding_model_default}), we provide facebook/contriever, text-embedding-3-small,mlx-community/Qwen3-Embedding-0.6B-8bit or nomic-embed-text",
|
||||
)
|
||||
embedding_group.add_argument(
|
||||
"--embedding-mode",
|
||||
type=str,
|
||||
default="sentence-transformers",
|
||||
choices=["sentence-transformers", "openai", "mlx", "ollama"],
|
||||
help="Embedding backend mode (default: sentence-transformers)",
|
||||
help="Embedding backend mode (default: sentence-transformers), we provide sentence-transformers, openai, mlx, or ollama",
|
||||
)
|
||||
|
||||
# LLM parameters
|
||||
@@ -86,13 +87,13 @@ class BaseRAGExample(ABC):
|
||||
type=str,
|
||||
default="openai",
|
||||
choices=["openai", "ollama", "hf", "simulated"],
|
||||
help="LLM backend to use (default: openai)",
|
||||
help="LLM backend: openai, ollama, or hf (default: openai)",
|
||||
)
|
||||
llm_group.add_argument(
|
||||
"--llm-model",
|
||||
type=str,
|
||||
default=None,
|
||||
help="LLM model name (default: gpt-4o for openai, llama3.2:1b for ollama)",
|
||||
help="Model name (default: gpt-4o) e.g., gpt-4o-mini, llama3.2:1b, Qwen/Qwen2.5-1.5B-Instruct",
|
||||
)
|
||||
llm_group.add_argument(
|
||||
"--llm-host",
|
||||
@@ -214,6 +215,11 @@ class BaseRAGExample(ABC):
|
||||
builder.build_index(index_path)
|
||||
print(f"Index saved to: {index_path}")
|
||||
|
||||
# Register project directory so leann list can discover this index
|
||||
# The index is saved as args.index_dir/index_name.leann
|
||||
# We want to register the current working directory where the app is run
|
||||
register_project_directory(Path.cwd())
|
||||
|
||||
return index_path
|
||||
|
||||
async def run_interactive_chat(self, args, index_path: str):
|
||||
|
||||
148
benchmarks/benchmark_no_recompute.py
Normal file
148
benchmarks/benchmark_no_recompute.py
Normal file
@@ -0,0 +1,148 @@
|
||||
import argparse
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
from leann import LeannBuilder, LeannSearcher
|
||||
|
||||
|
||||
def _meta_exists(index_path: str) -> bool:
|
||||
p = Path(index_path)
|
||||
return (p.parent / f"{p.stem}.meta.json").exists()
|
||||
|
||||
|
||||
def ensure_index(index_path: str, backend_name: str, num_docs: int, is_recompute: bool) -> None:
|
||||
# if _meta_exists(index_path):
|
||||
# return
|
||||
kwargs = {}
|
||||
if backend_name == "hnsw":
|
||||
kwargs["is_compact"] = is_recompute
|
||||
builder = LeannBuilder(
|
||||
backend_name=backend_name,
|
||||
embedding_model=os.getenv("LEANN_EMBED_MODEL", "facebook/contriever"),
|
||||
embedding_mode=os.getenv("LEANN_EMBED_MODE", "sentence-transformers"),
|
||||
graph_degree=32,
|
||||
complexity=64,
|
||||
is_recompute=is_recompute,
|
||||
num_threads=4,
|
||||
**kwargs,
|
||||
)
|
||||
for i in range(num_docs):
|
||||
builder.add_text(
|
||||
f"This is a test document number {i}. It contains some repeated text for benchmarking."
|
||||
)
|
||||
builder.build_index(index_path)
|
||||
|
||||
|
||||
def _bench_group(
|
||||
index_path: str,
|
||||
recompute: bool,
|
||||
query: str,
|
||||
repeats: int,
|
||||
complexity: int = 32,
|
||||
top_k: int = 10,
|
||||
) -> float:
|
||||
# Independent searcher per group; fixed port when recompute
|
||||
searcher = LeannSearcher(index_path=index_path)
|
||||
|
||||
# Warm-up once
|
||||
_ = searcher.search(
|
||||
query,
|
||||
top_k=top_k,
|
||||
complexity=complexity,
|
||||
recompute_embeddings=recompute,
|
||||
)
|
||||
|
||||
def _once() -> float:
|
||||
t0 = time.time()
|
||||
_ = searcher.search(
|
||||
query,
|
||||
top_k=top_k,
|
||||
complexity=complexity,
|
||||
recompute_embeddings=recompute,
|
||||
)
|
||||
return time.time() - t0
|
||||
|
||||
if repeats <= 1:
|
||||
t = _once()
|
||||
else:
|
||||
vals = [_once() for _ in range(repeats)]
|
||||
vals.sort()
|
||||
t = vals[len(vals) // 2]
|
||||
|
||||
searcher.cleanup()
|
||||
return t
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--num-docs", type=int, default=5000)
|
||||
parser.add_argument("--repeats", type=int, default=3)
|
||||
parser.add_argument("--complexity", type=int, default=32)
|
||||
args = parser.parse_args()
|
||||
|
||||
base = Path.cwd() / ".leann" / "indexes" / f"bench_n{args.num_docs}"
|
||||
base.parent.mkdir(parents=True, exist_ok=True)
|
||||
# ---------- Build HNSW variants ----------
|
||||
hnsw_r = str(base / f"hnsw_recompute_n{args.num_docs}.leann")
|
||||
hnsw_nr = str(base / f"hnsw_norecompute_n{args.num_docs}.leann")
|
||||
ensure_index(hnsw_r, "hnsw", args.num_docs, True)
|
||||
ensure_index(hnsw_nr, "hnsw", args.num_docs, False)
|
||||
|
||||
# ---------- Build DiskANN variants ----------
|
||||
diskann_r = str(base / "diskann_r.leann")
|
||||
diskann_nr = str(base / "diskann_nr.leann")
|
||||
ensure_index(diskann_r, "diskann", args.num_docs, True)
|
||||
ensure_index(diskann_nr, "diskann", args.num_docs, False)
|
||||
|
||||
# ---------- Helpers ----------
|
||||
def _size_for(prefix: str) -> int:
|
||||
p = Path(prefix)
|
||||
base_dir = p.parent
|
||||
stem = p.stem
|
||||
total = 0
|
||||
for f in base_dir.iterdir():
|
||||
if f.is_file() and f.name.startswith(stem):
|
||||
total += f.stat().st_size
|
||||
return total
|
||||
|
||||
# ---------- HNSW benchmark ----------
|
||||
t_hnsw_r = _bench_group(
|
||||
hnsw_r, True, "test document number 42", repeats=args.repeats, complexity=args.complexity
|
||||
)
|
||||
t_hnsw_nr = _bench_group(
|
||||
hnsw_nr, False, "test document number 42", repeats=args.repeats, complexity=args.complexity
|
||||
)
|
||||
size_hnsw_r = _size_for(hnsw_r)
|
||||
size_hnsw_nr = _size_for(hnsw_nr)
|
||||
|
||||
print("Benchmark results (HNSW):")
|
||||
print(f" recompute=True: search_time={t_hnsw_r:.3f}s, size={size_hnsw_r / 1024 / 1024:.1f}MB")
|
||||
print(
|
||||
f" recompute=False: search_time={t_hnsw_nr:.3f}s, size={size_hnsw_nr / 1024 / 1024:.1f}MB"
|
||||
)
|
||||
print(" Expectation: no-recompute should be faster but larger on disk.")
|
||||
|
||||
# ---------- DiskANN benchmark ----------
|
||||
t_diskann_r = _bench_group(
|
||||
diskann_r, True, "DiskANN R test doc 123", repeats=args.repeats, complexity=args.complexity
|
||||
)
|
||||
t_diskann_nr = _bench_group(
|
||||
diskann_nr,
|
||||
False,
|
||||
"DiskANN NR test doc 123",
|
||||
repeats=args.repeats,
|
||||
complexity=args.complexity,
|
||||
)
|
||||
size_diskann_r = _size_for(diskann_r)
|
||||
size_diskann_nr = _size_for(diskann_nr)
|
||||
|
||||
print("\nBenchmark results (DiskANN):")
|
||||
print(f" build(recompute=True, partition): size={size_diskann_r / 1024 / 1024:.1f}MB")
|
||||
print(f" build(recompute=False): size={size_diskann_nr / 1024 / 1024:.1f}MB")
|
||||
print(f" search recompute=True (final rerank): {t_diskann_r:.3f}s")
|
||||
print(f" search recompute=False (PQ only): {t_diskann_nr:.3f}s")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -10,6 +10,7 @@ This benchmark compares search performance between DiskANN and HNSW backends:
|
||||
"""
|
||||
|
||||
import gc
|
||||
import multiprocessing as mp
|
||||
import tempfile
|
||||
import time
|
||||
from pathlib import Path
|
||||
@@ -17,6 +18,12 @@ from typing import Any
|
||||
|
||||
import numpy as np
|
||||
|
||||
# Prefer 'fork' start method to avoid POSIX semaphore leaks on macOS
|
||||
try:
|
||||
mp.set_start_method("fork", force=True)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def create_test_texts(n_docs: int) -> list[str]:
|
||||
"""Create synthetic test documents for benchmarking."""
|
||||
@@ -113,10 +120,10 @@ def benchmark_backend(
|
||||
]
|
||||
score_validity_rate = len(valid_scores) / len(all_scores) if all_scores else 0
|
||||
|
||||
# Clean up
|
||||
# Clean up (ensure embedding server shutdown and object GC)
|
||||
try:
|
||||
if hasattr(searcher, "__del__"):
|
||||
searcher.__del__()
|
||||
if hasattr(searcher, "cleanup"):
|
||||
searcher.cleanup()
|
||||
del searcher
|
||||
del builder
|
||||
gc.collect()
|
||||
@@ -259,10 +266,21 @@ if __name__ == "__main__":
|
||||
print(f"\n❌ Benchmark failed: {e}")
|
||||
sys.exit(1)
|
||||
finally:
|
||||
# Ensure clean exit
|
||||
# Ensure clean exit (forceful to prevent rare hangs from atexit/threads)
|
||||
try:
|
||||
gc.collect()
|
||||
print("\n🧹 Cleanup completed")
|
||||
# Flush stdio to ensure message is visible before hard-exit
|
||||
try:
|
||||
import sys as _sys
|
||||
|
||||
_sys.stdout.flush()
|
||||
_sys.stderr.flush()
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
sys.exit(0)
|
||||
# Use os._exit to bypass atexit handlers that may hang in rare cases
|
||||
import os as _os
|
||||
|
||||
_os._exit(0)
|
||||
|
||||
@@ -183,6 +183,9 @@ class Benchmark:
|
||||
start_time = time.time()
|
||||
with torch.no_grad():
|
||||
self.model(input_ids=input_ids, attention_mask=attention_mask)
|
||||
# mps sync
|
||||
if torch.backends.mps.is_available():
|
||||
torch.mps.synchronize()
|
||||
end_time = time.time()
|
||||
|
||||
return end_time - start_time
|
||||
|
||||
@@ -52,7 +52,7 @@ Based on our experience developing LEANN, embedding models fall into three categ
|
||||
### Quick Start: Cloud and Local Embedding Options
|
||||
|
||||
**OpenAI Embeddings (Fastest Setup)**
|
||||
For immediate testing without local model downloads:
|
||||
For immediate testing without local model downloads(also if you [do not have GPU](https://github.com/yichuan-w/LEANN/issues/43) and do not care that much about your document leak, you should use this, we compute the embedding and recompute using openai API):
|
||||
```bash
|
||||
# Set OpenAI embeddings (requires OPENAI_API_KEY)
|
||||
--embedding-mode openai --embedding-model text-embedding-3-small
|
||||
@@ -97,29 +97,23 @@ ollama pull nomic-embed-text
|
||||
```
|
||||
|
||||
### DiskANN
|
||||
**Best for**: Performance-critical applications and large datasets - **Production-ready with automatic graph partitioning**
|
||||
**Best for**: Large datasets, especially when you want `recompute=True`.
|
||||
|
||||
**How it works:**
|
||||
- **Product Quantization (PQ) + Real-time Reranking**: Uses compressed PQ codes for fast graph traversal, then recomputes exact embeddings for final candidates
|
||||
- **Automatic Graph Partitioning**: When `is_recompute=True`, automatically partitions large indices and safely removes redundant files to save storage
|
||||
- **Superior Speed-Accuracy Trade-off**: Faster search than HNSW while maintaining high accuracy
|
||||
**Key advantages:**
|
||||
- **Faster search** on large datasets (3x+ speedup vs HNSW in many cases)
|
||||
- **Smart storage**: `recompute=True` enables automatic graph partitioning for smaller indexes
|
||||
- **Better scaling**: Designed for 100k+ documents
|
||||
|
||||
**Trade-offs compared to HNSW:**
|
||||
- ✅ **Faster search latency** (typically 2-8x speedup)
|
||||
- ✅ **Better scaling** for large datasets
|
||||
- ✅ **Smart storage management** with automatic partitioning
|
||||
- ✅ **Better graph locality** with `--ldg-times` parameter for SSD optimization
|
||||
- ⚠️ **Slightly larger index size** due to PQ tables and graph metadata
|
||||
**Recompute behavior:**
|
||||
- `recompute=True` (recommended): Pure PQ traversal + final reranking - faster and enables partitioning
|
||||
- `recompute=False`: PQ + partial real distances during traversal - slower but higher accuracy
|
||||
|
||||
```bash
|
||||
# Recommended for most use cases
|
||||
--backend-name diskann --graph-degree 32 --build-complexity 64
|
||||
|
||||
# For large-scale deployments
|
||||
--backend-name diskann --graph-degree 64 --build-complexity 128
|
||||
```
|
||||
|
||||
**Performance Benchmark**: Run `python benchmarks/diskann_vs_hnsw_speed_comparison.py` to compare DiskANN and HNSW on your system.
|
||||
**Performance Benchmark**: Run `uv run benchmarks/diskann_vs_hnsw_speed_comparison.py` to compare DiskANN and HNSW on your system.
|
||||
|
||||
## LLM Selection: Engine and Model Comparison
|
||||
|
||||
@@ -273,24 +267,114 @@ Every configuration choice involves trade-offs:
|
||||
|
||||
The key is finding the right balance for your specific use case. Start small and simple, measure performance, then scale up only where needed.
|
||||
|
||||
## Deep Dive: Critical Configuration Decisions
|
||||
## Low-resource setups
|
||||
|
||||
### When to Disable Recomputation
|
||||
If you don’t have a local GPU or builds/searches are too slow, use one or more of the options below.
|
||||
|
||||
LEANN's recomputation feature provides exact distance calculations but can be disabled for extreme QPS requirements:
|
||||
### 1) Use OpenAI embeddings (no local compute)
|
||||
|
||||
Fastest path with zero local GPU requirements. Set your API key and use OpenAI embeddings during build and search:
|
||||
|
||||
```bash
|
||||
--no-recompute # Disable selective recomputation
|
||||
export OPENAI_API_KEY=sk-...
|
||||
|
||||
# Build with OpenAI embeddings
|
||||
leann build my-index \
|
||||
--embedding-mode openai \
|
||||
--embedding-model text-embedding-3-small
|
||||
|
||||
# Search with OpenAI embeddings (recompute at query time)
|
||||
leann search my-index "your query" \
|
||||
--recompute
|
||||
```
|
||||
|
||||
**Trade-offs**:
|
||||
- **With recomputation** (default): Exact distances, best quality, higher latency, minimal storage (only stores metadata, recomputes embeddings on-demand)
|
||||
- **Without recomputation**: Must store full embeddings, significantly higher memory and storage usage (10-100x more), but faster search
|
||||
### 2) Run remote builds with SkyPilot (cloud GPU)
|
||||
|
||||
Offload embedding generation and index building to a GPU VM using [SkyPilot](https://skypilot.readthedocs.io/en/latest/). A template is provided at `sky/leann-build.yaml`.
|
||||
|
||||
```bash
|
||||
# One-time: install and configure SkyPilot
|
||||
pip install skypilot
|
||||
|
||||
# Launch with defaults (L4:1) and mount ./data to ~/leann-data; the build runs automatically
|
||||
sky launch -c leann-gpu sky/leann-build.yaml
|
||||
|
||||
# Override parameters via -e key=value (optional)
|
||||
sky launch -c leann-gpu sky/leann-build.yaml \
|
||||
-e index_name=my-index \
|
||||
-e backend=hnsw \
|
||||
-e embedding_mode=sentence-transformers \
|
||||
-e embedding_model=Qwen/Qwen3-Embedding-0.6B
|
||||
|
||||
# Copy the built index back to your local .leann (use rsync)
|
||||
rsync -Pavz leann-gpu:~/.leann/indexes/my-index ./.leann/indexes/
|
||||
```
|
||||
|
||||
### 3) Disable recomputation to trade storage for speed
|
||||
|
||||
If you need lower latency and have more storage/memory, disable recomputation. This stores full embeddings and avoids recomputing at search time.
|
||||
|
||||
```bash
|
||||
# Build without recomputation (HNSW requires non-compact in this mode)
|
||||
leann build my-index --no-recompute --no-compact
|
||||
|
||||
# Search without recomputation
|
||||
leann search my-index "your query" --no-recompute
|
||||
```
|
||||
|
||||
When to use:
|
||||
- Extreme low latency requirements (high QPS, interactive assistants)
|
||||
- Read-heavy workloads where storage is cheaper than latency
|
||||
- No always-available GPU
|
||||
|
||||
Constraints:
|
||||
- HNSW: when `--no-recompute` is set, LEANN automatically disables compact mode during build
|
||||
- DiskANN: supported; `--no-recompute` skips selective recompute during search
|
||||
|
||||
Storage impact:
|
||||
- Storing N embeddings of dimension D with float32 requires approximately N × D × 4 bytes
|
||||
- Example: 1,000,000 chunks × 768 dims × 4 bytes ≈ 2.86 GB (plus graph/metadata)
|
||||
|
||||
Converting an existing index (rebuild required):
|
||||
```bash
|
||||
# Rebuild in-place (ensure you still have original docs or can regenerate chunks)
|
||||
leann build my-index --force --no-recompute --no-compact
|
||||
```
|
||||
|
||||
Python API usage:
|
||||
```python
|
||||
from leann import LeannSearcher
|
||||
|
||||
searcher = LeannSearcher("/path/to/my-index.leann")
|
||||
results = searcher.search("your query", top_k=10, recompute_embeddings=False)
|
||||
```
|
||||
|
||||
Trade-offs:
|
||||
- Lower latency and fewer network hops at query time
|
||||
- Significantly higher storage (10–100× vs selective recomputation)
|
||||
- Slightly larger memory footprint during build and search
|
||||
|
||||
Quick benchmark results (`benchmarks/benchmark_no_recompute.py` with 5k texts, complexity=32):
|
||||
|
||||
- HNSW
|
||||
|
||||
```text
|
||||
recompute=True: search_time=0.818s, size=1.1MB
|
||||
recompute=False: search_time=0.012s, size=16.6MB
|
||||
```
|
||||
|
||||
- DiskANN
|
||||
|
||||
```text
|
||||
recompute=True: search_time=0.041s, size=5.9MB
|
||||
recompute=False: search_time=0.013s, size=24.6MB
|
||||
```
|
||||
|
||||
Conclusion:
|
||||
- **HNSW**: `no-recompute` is significantly faster (no embedding recomputation) but requires much more storage (stores all embeddings)
|
||||
- **DiskANN**: `no-recompute` uses PQ + partial real distances during traversal (slower but higher accuracy), while `recompute=True` uses pure PQ traversal + final reranking (faster traversal, enables build-time partitioning for smaller storage)
|
||||
|
||||
|
||||
**Disable when**:
|
||||
- You have abundant storage and memory
|
||||
- Need extremely low latency (< 100ms)
|
||||
- Running a read-heavy workload where storage cost is acceptable
|
||||
|
||||
## Further Reading
|
||||
|
||||
|
||||
@@ -441,9 +441,14 @@ class DiskannSearcher(BaseSearcher):
|
||||
else: # "global"
|
||||
use_global_pruning = True
|
||||
|
||||
# Perform search with suppressed C++ output based on log level
|
||||
use_deferred_fetch = kwargs.get("USE_DEFERRED_FETCH", True)
|
||||
recompute_neighors = False
|
||||
# Strategy:
|
||||
# - Traversal always uses PQ distances
|
||||
# - If recompute_embeddings=True, do a single final rerank via deferred fetch
|
||||
# (fetch embeddings for the final candidate set only)
|
||||
# - Do not recompute neighbor distances along the path
|
||||
use_deferred_fetch = True if recompute_embeddings else False
|
||||
recompute_neighors = False # Expected typo. For backward compatibility.
|
||||
|
||||
with suppress_cpp_output_if_needed():
|
||||
labels, distances = self._index.batch_search(
|
||||
query,
|
||||
|
||||
@@ -4,8 +4,8 @@ build-backend = "scikit_build_core.build"
|
||||
|
||||
[project]
|
||||
name = "leann-backend-diskann"
|
||||
version = "0.2.8"
|
||||
dependencies = ["leann-core==0.2.8", "numpy", "protobuf>=3.19.0"]
|
||||
version = "0.3.2"
|
||||
dependencies = ["leann-core==0.3.2", "numpy", "protobuf>=3.19.0"]
|
||||
|
||||
[tool.scikit-build]
|
||||
# Key: simplified CMake path
|
||||
|
||||
Submodule packages/leann-backend-diskann/third_party/DiskANN updated: 04048bb302...c593831474
@@ -54,12 +54,13 @@ class HNSWBuilder(LeannBackendBuilderInterface):
|
||||
self.efConstruction = self.build_params.setdefault("efConstruction", 200)
|
||||
self.distance_metric = self.build_params.setdefault("distance_metric", "mips")
|
||||
self.dimensions = self.build_params.get("dimensions")
|
||||
if not self.is_recompute:
|
||||
if self.is_compact:
|
||||
# TODO: support this case @andy
|
||||
raise ValueError(
|
||||
"is_recompute is False, but is_compact is True. This is not compatible now. change is compact to False and you can use the original HNSW index."
|
||||
)
|
||||
if not self.is_recompute and self.is_compact:
|
||||
# Auto-correct: non-recompute requires non-compact storage for HNSW
|
||||
logger.warning(
|
||||
"is_recompute=False requires non-compact HNSW. Forcing is_compact=False."
|
||||
)
|
||||
self.is_compact = False
|
||||
self.build_params["is_compact"] = False
|
||||
|
||||
def build(self, data: np.ndarray, ids: list[str], index_path: str, **kwargs):
|
||||
from . import faiss # type: ignore
|
||||
@@ -184,9 +185,11 @@ class HNSWSearcher(BaseSearcher):
|
||||
"""
|
||||
from . import faiss # type: ignore
|
||||
|
||||
if not recompute_embeddings:
|
||||
if self.is_pruned:
|
||||
raise RuntimeError("Recompute is required for pruned index.")
|
||||
if not recompute_embeddings and self.is_pruned:
|
||||
raise RuntimeError(
|
||||
"Recompute is required for pruned/compact HNSW index. "
|
||||
"Re-run search with --recompute, or rebuild with --no-recompute and --no-compact."
|
||||
)
|
||||
if recompute_embeddings:
|
||||
if zmq_port is None:
|
||||
raise ValueError("zmq_port must be provided if recompute_embeddings is True")
|
||||
|
||||
@@ -6,10 +6,10 @@ build-backend = "scikit_build_core.build"
|
||||
|
||||
[project]
|
||||
name = "leann-backend-hnsw"
|
||||
version = "0.2.8"
|
||||
version = "0.3.2"
|
||||
description = "Custom-built HNSW (Faiss) backend for the Leann toolkit."
|
||||
dependencies = [
|
||||
"leann-core==0.2.8",
|
||||
"leann-core==0.3.2",
|
||||
"numpy",
|
||||
"pyzmq>=23.0.0",
|
||||
"msgpack>=1.0.0",
|
||||
|
||||
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "leann-core"
|
||||
version = "0.2.8"
|
||||
version = "0.3.2"
|
||||
description = "Core API and plugin system for LEANN"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.9"
|
||||
|
||||
@@ -46,6 +46,7 @@ def compute_embeddings(
|
||||
- "sentence-transformers": Use sentence-transformers library (default)
|
||||
- "mlx": Use MLX backend for Apple Silicon
|
||||
- "openai": Use OpenAI embedding API
|
||||
- "gemini": Use Google Gemini embedding API
|
||||
use_server: Whether to use embedding server (True for search, False for build)
|
||||
|
||||
Returns:
|
||||
@@ -204,6 +205,18 @@ class LeannBuilder:
|
||||
**backend_kwargs,
|
||||
):
|
||||
self.backend_name = backend_name
|
||||
# Normalize incompatible combinations early (for consistent metadata)
|
||||
if backend_name == "hnsw":
|
||||
is_recompute = backend_kwargs.get("is_recompute", True)
|
||||
is_compact = backend_kwargs.get("is_compact", True)
|
||||
if is_recompute is False and is_compact is True:
|
||||
warnings.warn(
|
||||
"HNSW with is_recompute=False requires non-compact storage. Forcing is_compact=False.",
|
||||
UserWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
backend_kwargs["is_compact"] = False
|
||||
|
||||
backend_factory: Optional[LeannBackendFactoryInterface] = BACKEND_REGISTRY.get(backend_name)
|
||||
if backend_factory is None:
|
||||
raise ValueError(f"Backend '{backend_name}' not found or not registered.")
|
||||
@@ -294,6 +307,23 @@ class LeannBuilder:
|
||||
def build_index(self, index_path: str):
|
||||
if not self.chunks:
|
||||
raise ValueError("No chunks added.")
|
||||
|
||||
# Filter out invalid/empty text chunks early to keep passage and embedding counts aligned
|
||||
valid_chunks: list[dict[str, Any]] = []
|
||||
skipped = 0
|
||||
for chunk in self.chunks:
|
||||
text = chunk.get("text", "")
|
||||
if isinstance(text, str) and text.strip():
|
||||
valid_chunks.append(chunk)
|
||||
else:
|
||||
skipped += 1
|
||||
if skipped > 0:
|
||||
print(
|
||||
f"Warning: Skipping {skipped} empty/invalid text chunk(s). Processing {len(valid_chunks)} valid chunks"
|
||||
)
|
||||
self.chunks = valid_chunks
|
||||
if not self.chunks:
|
||||
raise ValueError("All provided chunks are empty or invalid. Nothing to index.")
|
||||
if self.dimensions is None:
|
||||
self.dimensions = len(
|
||||
compute_embeddings(
|
||||
@@ -523,6 +553,7 @@ class LeannSearcher:
|
||||
self.embedding_model = self.meta_data["embedding_model"]
|
||||
# Support both old and new format
|
||||
self.embedding_mode = self.meta_data.get("embedding_mode", "sentence-transformers")
|
||||
# Delegate portability handling to PassageManager
|
||||
self.passage_manager = PassageManager(
|
||||
self.meta_data.get("passage_sources", []), metadata_file_path=self.meta_path_str
|
||||
)
|
||||
@@ -583,7 +614,7 @@ class LeannSearcher:
|
||||
zmq_port=zmq_port,
|
||||
)
|
||||
# logger.info(f" Generated embedding shape: {query_embedding.shape}")
|
||||
time.time() - start_time
|
||||
# time.time() - start_time
|
||||
# logger.info(f" Embedding time: {embedding_time} seconds")
|
||||
|
||||
start_time = time.time()
|
||||
@@ -649,8 +680,26 @@ class LeannSearcher:
|
||||
This method should be called after you're done using the searcher,
|
||||
especially in test environments or batch processing scenarios.
|
||||
"""
|
||||
if hasattr(self.backend_impl, "embedding_server_manager"):
|
||||
self.backend_impl.embedding_server_manager.stop_server()
|
||||
backend = getattr(self.backend_impl, "embedding_server_manager", None)
|
||||
if backend is not None:
|
||||
backend.stop_server()
|
||||
|
||||
# Enable automatic cleanup patterns
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
try:
|
||||
self.cleanup()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
self.cleanup()
|
||||
except Exception:
|
||||
# Avoid noisy errors during interpreter shutdown
|
||||
pass
|
||||
|
||||
|
||||
class LeannChat:
|
||||
@@ -730,3 +779,19 @@ class LeannChat:
|
||||
"""
|
||||
if hasattr(self.searcher, "cleanup"):
|
||||
self.searcher.cleanup()
|
||||
|
||||
# Enable automatic cleanup patterns
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
try:
|
||||
self.cleanup()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
self.cleanup()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@@ -422,7 +422,6 @@ class LLMInterface(ABC):
|
||||
top_k=10,
|
||||
complexity=64,
|
||||
beam_width=8,
|
||||
USE_DEFERRED_FETCH=True,
|
||||
skip_search_reorder=True,
|
||||
recompute_beighbor_embeddings=True,
|
||||
dedup_node_dis=True,
|
||||
@@ -434,7 +433,6 @@ class LLMInterface(ABC):
|
||||
Supported kwargs:
|
||||
- complexity (int): Search complexity parameter (default: 32)
|
||||
- beam_width (int): Beam width for search (default: 4)
|
||||
- USE_DEFERRED_FETCH (bool): Enable deferred fetch mode (default: False)
|
||||
- skip_search_reorder (bool): Skip search reorder step (default: False)
|
||||
- recompute_beighbor_embeddings (bool): Enable ZMQ embedding server for neighbor recomputation (default: False)
|
||||
- dedup_node_dis (bool): Deduplicate nodes by distance (default: False)
|
||||
@@ -682,6 +680,60 @@ class HFChat(LLMInterface):
|
||||
return response.strip()
|
||||
|
||||
|
||||
class GeminiChat(LLMInterface):
|
||||
"""LLM interface for Google Gemini models."""
|
||||
|
||||
def __init__(self, model: str = "gemini-2.5-flash", api_key: Optional[str] = None):
|
||||
self.model = model
|
||||
self.api_key = api_key or os.getenv("GEMINI_API_KEY")
|
||||
|
||||
if not self.api_key:
|
||||
raise ValueError(
|
||||
"Gemini API key is required. Set GEMINI_API_KEY environment variable or pass api_key parameter."
|
||||
)
|
||||
|
||||
logger.info(f"Initializing Gemini Chat with model='{model}'")
|
||||
|
||||
try:
|
||||
import google.genai as genai
|
||||
|
||||
self.client = genai.Client(api_key=self.api_key)
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The 'google-genai' library is required for Gemini models. Please install it with 'uv pip install google-genai'."
|
||||
)
|
||||
|
||||
def ask(self, prompt: str, **kwargs) -> str:
|
||||
logger.info(f"Sending request to Gemini with model {self.model}")
|
||||
|
||||
try:
|
||||
from google.genai.types import GenerateContentConfig
|
||||
|
||||
generation_config = GenerateContentConfig(
|
||||
temperature=kwargs.get("temperature", 0.7),
|
||||
max_output_tokens=kwargs.get("max_tokens", 1000),
|
||||
)
|
||||
|
||||
# Handle top_p parameter
|
||||
if "top_p" in kwargs:
|
||||
generation_config.top_p = kwargs["top_p"]
|
||||
|
||||
response = self.client.models.generate_content(
|
||||
model=self.model,
|
||||
contents=prompt,
|
||||
config=generation_config,
|
||||
)
|
||||
# Handle potential None response text
|
||||
response_text = response.text
|
||||
if response_text is None:
|
||||
logger.warning("Gemini returned None response text")
|
||||
return ""
|
||||
return response_text.strip()
|
||||
except Exception as e:
|
||||
logger.error(f"Error communicating with Gemini: {e}")
|
||||
return f"Error: Could not get a response from Gemini. Details: {e}"
|
||||
|
||||
|
||||
class OpenAIChat(LLMInterface):
|
||||
"""LLM interface for OpenAI models."""
|
||||
|
||||
@@ -795,6 +847,8 @@ def get_llm(llm_config: Optional[dict[str, Any]] = None) -> LLMInterface:
|
||||
return HFChat(model_name=model or "deepseek-ai/deepseek-llm-7b-chat")
|
||||
elif llm_type == "openai":
|
||||
return OpenAIChat(model=model or "gpt-4o", api_key=llm_config.get("api_key"))
|
||||
elif llm_type == "gemini":
|
||||
return GeminiChat(model=model or "gemini-2.5-flash", api_key=llm_config.get("api_key"))
|
||||
elif llm_type == "simulated":
|
||||
return SimulatedChat()
|
||||
else:
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
import argparse
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
from typing import Optional, Union
|
||||
|
||||
from llama_index.core import SimpleDirectoryReader
|
||||
from llama_index.core.node_parser import SentenceSplitter
|
||||
from tqdm import tqdm
|
||||
|
||||
from .api import LeannBuilder, LeannChat, LeannSearcher
|
||||
from .registry import register_project_directory
|
||||
|
||||
|
||||
def extract_pdf_text_with_pymupdf(file_path: str) -> str:
|
||||
@@ -72,7 +73,7 @@ class LeannCLI:
|
||||
def create_parser(self) -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="leann",
|
||||
description="LEANN - Local Enhanced AI Navigation",
|
||||
description="The smallest vector index in the world. RAG Everything with LEANN!",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
@@ -84,6 +85,7 @@ Examples:
|
||||
leann search my-docs "query" # Search in my-docs index
|
||||
leann ask my-docs "question" # Ask my-docs index
|
||||
leann list # List all stored indexes
|
||||
leann remove my-docs # Remove an index (local first, then global)
|
||||
""",
|
||||
)
|
||||
|
||||
@@ -102,9 +104,18 @@ Examples:
|
||||
help="Documents directories and/or files (default: current directory)",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--backend", type=str, default="hnsw", choices=["hnsw", "diskann"]
|
||||
"--backend",
|
||||
type=str,
|
||||
default="hnsw",
|
||||
choices=["hnsw", "diskann"],
|
||||
help="Backend to use (default: hnsw)",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--embedding-model",
|
||||
type=str,
|
||||
default="facebook/contriever",
|
||||
help="Embedding model (default: facebook/contriever)",
|
||||
)
|
||||
build_parser.add_argument("--embedding-model", type=str, default="facebook/contriever")
|
||||
build_parser.add_argument(
|
||||
"--embedding-mode",
|
||||
type=str,
|
||||
@@ -112,36 +123,93 @@ Examples:
|
||||
choices=["sentence-transformers", "openai", "mlx", "ollama"],
|
||||
help="Embedding backend mode (default: sentence-transformers)",
|
||||
)
|
||||
build_parser.add_argument("--force", "-f", action="store_true", help="Force rebuild")
|
||||
build_parser.add_argument("--graph-degree", type=int, default=32)
|
||||
build_parser.add_argument("--complexity", type=int, default=64)
|
||||
build_parser.add_argument(
|
||||
"--force", "-f", action="store_true", help="Force rebuild existing index"
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--graph-degree", type=int, default=32, help="Graph degree (default: 32)"
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--complexity", type=int, default=64, help="Build complexity (default: 64)"
|
||||
)
|
||||
build_parser.add_argument("--num-threads", type=int, default=1)
|
||||
build_parser.add_argument("--compact", action="store_true", default=True)
|
||||
build_parser.add_argument("--recompute", action="store_true", default=True)
|
||||
build_parser.add_argument(
|
||||
"--compact",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="Use compact storage (default: true). Must be `no-compact` for `no-recompute` build.",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--recompute",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="Enable recomputation (default: true)",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--file-types",
|
||||
type=str,
|
||||
help="Comma-separated list of file extensions to include (e.g., '.txt,.pdf,.pptx'). If not specified, uses default supported types.",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--include-hidden",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help="Include hidden files and directories (paths starting with '.') during indexing (default: false)",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--doc-chunk-size",
|
||||
type=int,
|
||||
default=256,
|
||||
help="Document chunk size in tokens/characters (default: 256)",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--doc-chunk-overlap",
|
||||
type=int,
|
||||
default=128,
|
||||
help="Document chunk overlap (default: 128)",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--code-chunk-size",
|
||||
type=int,
|
||||
default=512,
|
||||
help="Code chunk size in tokens/lines (default: 512)",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--code-chunk-overlap",
|
||||
type=int,
|
||||
default=50,
|
||||
help="Code chunk overlap (default: 50)",
|
||||
)
|
||||
|
||||
# Search command
|
||||
search_parser = subparsers.add_parser("search", help="Search documents")
|
||||
search_parser.add_argument("index_name", help="Index name")
|
||||
search_parser.add_argument("query", help="Search query")
|
||||
search_parser.add_argument("--top-k", type=int, default=5)
|
||||
search_parser.add_argument("--complexity", type=int, default=64)
|
||||
search_parser.add_argument(
|
||||
"--top-k", type=int, default=5, help="Number of results (default: 5)"
|
||||
)
|
||||
search_parser.add_argument(
|
||||
"--complexity", type=int, default=64, help="Search complexity (default: 64)"
|
||||
)
|
||||
search_parser.add_argument("--beam-width", type=int, default=1)
|
||||
search_parser.add_argument("--prune-ratio", type=float, default=0.0)
|
||||
search_parser.add_argument(
|
||||
"--recompute-embeddings",
|
||||
action="store_true",
|
||||
"--recompute",
|
||||
dest="recompute_embeddings",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="Recompute embeddings (default: True)",
|
||||
help="Enable/disable embedding recomputation (default: enabled). Should not do a `no-recompute` search in a `recompute` build.",
|
||||
)
|
||||
search_parser.add_argument(
|
||||
"--pruning-strategy",
|
||||
choices=["global", "local", "proportional"],
|
||||
default="global",
|
||||
help="Pruning strategy (default: global)",
|
||||
)
|
||||
search_parser.add_argument(
|
||||
"--non-interactive",
|
||||
action="store_true",
|
||||
help="Non-interactive mode: automatically select index without prompting",
|
||||
)
|
||||
|
||||
# Ask command
|
||||
@@ -152,19 +220,27 @@ Examples:
|
||||
type=str,
|
||||
default="ollama",
|
||||
choices=["simulated", "ollama", "hf", "openai"],
|
||||
help="LLM provider (default: ollama)",
|
||||
)
|
||||
ask_parser.add_argument(
|
||||
"--model", type=str, default="qwen3:8b", help="Model name (default: qwen3:8b)"
|
||||
)
|
||||
ask_parser.add_argument("--model", type=str, default="qwen3:8b")
|
||||
ask_parser.add_argument("--host", type=str, default="http://localhost:11434")
|
||||
ask_parser.add_argument("--interactive", "-i", action="store_true")
|
||||
ask_parser.add_argument("--top-k", type=int, default=20)
|
||||
ask_parser.add_argument(
|
||||
"--interactive", "-i", action="store_true", help="Interactive chat mode"
|
||||
)
|
||||
ask_parser.add_argument(
|
||||
"--top-k", type=int, default=20, help="Retrieval count (default: 20)"
|
||||
)
|
||||
ask_parser.add_argument("--complexity", type=int, default=32)
|
||||
ask_parser.add_argument("--beam-width", type=int, default=1)
|
||||
ask_parser.add_argument("--prune-ratio", type=float, default=0.0)
|
||||
ask_parser.add_argument(
|
||||
"--recompute-embeddings",
|
||||
action="store_true",
|
||||
"--recompute",
|
||||
dest="recompute_embeddings",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="Recompute embeddings (default: True)",
|
||||
help="Enable/disable embedding recomputation during ask (default: enabled)",
|
||||
)
|
||||
ask_parser.add_argument(
|
||||
"--pruning-strategy",
|
||||
@@ -182,35 +258,18 @@ Examples:
|
||||
# List command
|
||||
subparsers.add_parser("list", help="List all indexes")
|
||||
|
||||
# Remove command
|
||||
remove_parser = subparsers.add_parser("remove", help="Remove an index")
|
||||
remove_parser.add_argument("index_name", help="Index name to remove")
|
||||
remove_parser.add_argument(
|
||||
"--force", "-f", action="store_true", help="Force removal without confirmation"
|
||||
)
|
||||
|
||||
return parser
|
||||
|
||||
def register_project_dir(self):
|
||||
"""Register current project directory in global registry"""
|
||||
global_registry = Path.home() / ".leann" / "projects.json"
|
||||
global_registry.parent.mkdir(exist_ok=True)
|
||||
|
||||
current_dir = str(Path.cwd())
|
||||
|
||||
# Load existing registry
|
||||
projects = []
|
||||
if global_registry.exists():
|
||||
try:
|
||||
import json
|
||||
|
||||
with open(global_registry) as f:
|
||||
projects = json.load(f)
|
||||
except Exception:
|
||||
projects = []
|
||||
|
||||
# Add current directory if not already present
|
||||
if current_dir not in projects:
|
||||
projects.append(current_dir)
|
||||
|
||||
# Save registry
|
||||
import json
|
||||
|
||||
with open(global_registry, "w") as f:
|
||||
json.dump(projects, f, indent=2)
|
||||
register_project_directory()
|
||||
|
||||
def _build_gitignore_parser(self, docs_dir: str):
|
||||
"""Build gitignore parser using gitignore-parser library."""
|
||||
@@ -270,8 +329,6 @@ Examples:
|
||||
return False
|
||||
|
||||
def list_indexes(self):
|
||||
print("Stored LEANN indexes:")
|
||||
|
||||
# Get all project directories with .leann
|
||||
global_registry = Path.home() / ".leann" / "projects.json"
|
||||
all_projects = []
|
||||
@@ -297,58 +354,485 @@ Examples:
|
||||
if (current_path / ".leann" / "indexes").exists() and current_path not in valid_projects:
|
||||
valid_projects.append(current_path)
|
||||
|
||||
if not valid_projects:
|
||||
print(
|
||||
"No indexes found. Use 'leann build <name> --docs <dir> [<dir2> ...]' to create one."
|
||||
)
|
||||
return
|
||||
|
||||
total_indexes = 0
|
||||
current_dir = Path.cwd()
|
||||
# Separate current and other projects
|
||||
other_projects = []
|
||||
|
||||
for project_path in valid_projects:
|
||||
indexes_dir = project_path / ".leann" / "indexes"
|
||||
if not indexes_dir.exists():
|
||||
continue
|
||||
if project_path != current_path:
|
||||
other_projects.append(project_path)
|
||||
|
||||
index_dirs = [d for d in indexes_dir.iterdir() if d.is_dir()]
|
||||
if not index_dirs:
|
||||
continue
|
||||
print("📚 LEANN Indexes")
|
||||
print("=" * 50)
|
||||
|
||||
# Show project header
|
||||
if project_path == current_dir:
|
||||
print(f"\n📁 Current project ({project_path}):")
|
||||
else:
|
||||
print(f"\n📂 {project_path}:")
|
||||
total_indexes = 0
|
||||
current_indexes_count = 0
|
||||
|
||||
for index_dir in index_dirs:
|
||||
# Show current project first (most important)
|
||||
print("\n🏠 Current Project")
|
||||
print(f" {current_path}")
|
||||
print(" " + "─" * 45)
|
||||
|
||||
current_indexes = self._discover_indexes_in_project(current_path)
|
||||
if current_indexes:
|
||||
for idx in current_indexes:
|
||||
total_indexes += 1
|
||||
index_name = index_dir.name
|
||||
meta_file = index_dir / "documents.leann.meta.json"
|
||||
status = "✓" if meta_file.exists() else "✗"
|
||||
current_indexes_count += 1
|
||||
type_icon = "📁" if idx["type"] == "cli" else "📄"
|
||||
print(f" {current_indexes_count}. {type_icon} {idx['name']} {idx['status']}")
|
||||
if idx["size_mb"] > 0:
|
||||
print(f" 📦 Size: {idx['size_mb']:.1f} MB")
|
||||
else:
|
||||
print(" 📭 No indexes in current project")
|
||||
|
||||
print(f" {total_indexes}. {index_name} [{status}]")
|
||||
if status == "✓":
|
||||
size_mb = sum(f.stat().st_size for f in index_dir.iterdir() if f.is_file()) / (
|
||||
1024 * 1024
|
||||
# Show other projects (reference information)
|
||||
if other_projects:
|
||||
print("\n\n🗂️ Other Projects")
|
||||
print(" " + "─" * 45)
|
||||
|
||||
for project_path in other_projects:
|
||||
project_indexes = self._discover_indexes_in_project(project_path)
|
||||
if not project_indexes:
|
||||
continue
|
||||
|
||||
print(f"\n 📂 {project_path.name}")
|
||||
print(f" {project_path}")
|
||||
|
||||
for idx in project_indexes:
|
||||
total_indexes += 1
|
||||
type_icon = "📁" if idx["type"] == "cli" else "📄"
|
||||
print(f" • {type_icon} {idx['name']} {idx['status']}")
|
||||
if idx["size_mb"] > 0:
|
||||
print(f" 📦 {idx['size_mb']:.1f} MB")
|
||||
|
||||
# Summary and usage info
|
||||
print("\n" + "=" * 50)
|
||||
if total_indexes == 0:
|
||||
print("💡 Get started:")
|
||||
print(" leann build my-docs --docs ./documents")
|
||||
else:
|
||||
# Count only projects that have at least one discoverable index
|
||||
projects_count = sum(
|
||||
1 for p in valid_projects if len(self._discover_indexes_in_project(p)) > 0
|
||||
)
|
||||
print(f"📊 Total: {total_indexes} indexes across {projects_count} projects")
|
||||
|
||||
if current_indexes_count > 0:
|
||||
print("\n💫 Quick start (current project):")
|
||||
# Get first index from current project for example
|
||||
current_indexes_dir = current_path / ".leann" / "indexes"
|
||||
if current_indexes_dir.exists():
|
||||
current_index_dirs = [d for d in current_indexes_dir.iterdir() if d.is_dir()]
|
||||
if current_index_dirs:
|
||||
example_name = current_index_dirs[0].name
|
||||
print(f' leann search {example_name} "your query"')
|
||||
print(f" leann ask {example_name} --interactive")
|
||||
else:
|
||||
print("\n💡 Create your first index:")
|
||||
print(" leann build my-docs --docs ./documents")
|
||||
|
||||
def _discover_indexes_in_project(self, project_path: Path):
|
||||
"""Discover all indexes in a project directory (both CLI and apps formats)"""
|
||||
indexes = []
|
||||
|
||||
# 1. CLI format: .leann/indexes/index_name/
|
||||
cli_indexes_dir = project_path / ".leann" / "indexes"
|
||||
if cli_indexes_dir.exists():
|
||||
for index_dir in cli_indexes_dir.iterdir():
|
||||
if index_dir.is_dir():
|
||||
meta_file = index_dir / "documents.leann.meta.json"
|
||||
status = "✅" if meta_file.exists() else "❌"
|
||||
|
||||
size_mb = 0
|
||||
if meta_file.exists():
|
||||
try:
|
||||
size_mb = sum(
|
||||
f.stat().st_size for f in index_dir.iterdir() if f.is_file()
|
||||
) / (1024 * 1024)
|
||||
except (OSError, PermissionError):
|
||||
pass
|
||||
|
||||
indexes.append(
|
||||
{
|
||||
"name": index_dir.name,
|
||||
"type": "cli",
|
||||
"status": status,
|
||||
"size_mb": size_mb,
|
||||
"path": index_dir,
|
||||
}
|
||||
)
|
||||
print(f" Size: {size_mb:.1f} MB")
|
||||
|
||||
if total_indexes > 0:
|
||||
print(f"\nTotal: {total_indexes} indexes across {len(valid_projects)} projects")
|
||||
print("\nUsage (current project only):")
|
||||
# 2. Apps format: *.leann.meta.json files anywhere in the project
|
||||
cli_indexes_dir = project_path / ".leann" / "indexes"
|
||||
for meta_file in project_path.rglob("*.leann.meta.json"):
|
||||
if meta_file.is_file():
|
||||
# Skip CLI-built indexes (which store meta under .leann/indexes/<name>/)
|
||||
try:
|
||||
if cli_indexes_dir.exists() and cli_indexes_dir in meta_file.parents:
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
# Use the parent directory name as the app index display name
|
||||
display_name = meta_file.parent.name
|
||||
# Extract file base used to store files
|
||||
file_base = meta_file.name.replace(".leann.meta.json", "")
|
||||
|
||||
# Show example from current project
|
||||
current_indexes_dir = current_dir / ".leann" / "indexes"
|
||||
if current_indexes_dir.exists():
|
||||
current_index_dirs = [d for d in current_indexes_dir.iterdir() if d.is_dir()]
|
||||
if current_index_dirs:
|
||||
example_name = current_index_dirs[0].name
|
||||
print(f' leann search {example_name} "your query"')
|
||||
print(f" leann ask {example_name} --interactive")
|
||||
# Apps indexes are considered complete if the .leann.meta.json file exists
|
||||
status = "✅"
|
||||
|
||||
# Calculate total size of all related files (use file base)
|
||||
size_mb = 0
|
||||
try:
|
||||
index_dir = meta_file.parent
|
||||
for related_file in index_dir.glob(f"{file_base}.leann*"):
|
||||
size_mb += related_file.stat().st_size / (1024 * 1024)
|
||||
except (OSError, PermissionError):
|
||||
pass
|
||||
|
||||
indexes.append(
|
||||
{
|
||||
"name": display_name,
|
||||
"type": "app",
|
||||
"status": status,
|
||||
"size_mb": size_mb,
|
||||
"path": meta_file,
|
||||
}
|
||||
)
|
||||
|
||||
return indexes
|
||||
|
||||
def remove_index(self, index_name: str, force: bool = False):
|
||||
"""Safely remove an index - always show all matches for transparency"""
|
||||
|
||||
# Always do a comprehensive search for safety
|
||||
print(f"🔍 Searching for all indexes named '{index_name}'...")
|
||||
all_matches = self._find_all_matching_indexes(index_name)
|
||||
|
||||
if not all_matches:
|
||||
print(f"❌ Index '{index_name}' not found in any project.")
|
||||
return False
|
||||
|
||||
if len(all_matches) == 1:
|
||||
return self._remove_single_match(all_matches[0], index_name, force)
|
||||
else:
|
||||
return self._remove_from_multiple_matches(all_matches, index_name, force)
|
||||
|
||||
def _find_all_matching_indexes(self, index_name: str):
|
||||
"""Find all indexes with the given name across all projects"""
|
||||
matches = []
|
||||
|
||||
# Get all registered projects
|
||||
global_registry = Path.home() / ".leann" / "projects.json"
|
||||
all_projects = []
|
||||
|
||||
if global_registry.exists():
|
||||
try:
|
||||
import json
|
||||
|
||||
with open(global_registry) as f:
|
||||
all_projects = json.load(f)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Always include current project
|
||||
current_path = Path.cwd()
|
||||
if str(current_path) not in all_projects:
|
||||
all_projects.append(str(current_path))
|
||||
|
||||
# Search across all projects
|
||||
for project_dir in all_projects:
|
||||
project_path = Path(project_dir)
|
||||
if not project_path.exists():
|
||||
continue
|
||||
|
||||
# 1) CLI-format index under .leann/indexes/<name>
|
||||
index_dir = project_path / ".leann" / "indexes" / index_name
|
||||
if index_dir.exists():
|
||||
is_current = project_path == current_path
|
||||
matches.append(
|
||||
{
|
||||
"project_path": project_path,
|
||||
"index_dir": index_dir,
|
||||
"is_current": is_current,
|
||||
"kind": "cli",
|
||||
}
|
||||
)
|
||||
|
||||
# 2) App-format indexes
|
||||
# We support two ways of addressing apps:
|
||||
# a) by the file base (e.g., `pdf_documents`)
|
||||
# b) by the parent directory name (e.g., `new_txt`)
|
||||
seen_app_meta = set()
|
||||
|
||||
# 2a) by file base
|
||||
for meta_file in project_path.rglob(f"{index_name}.leann.meta.json"):
|
||||
if meta_file.is_file():
|
||||
# Skip CLI-built indexes' meta under .leann/indexes
|
||||
try:
|
||||
cli_indexes_dir = project_path / ".leann" / "indexes"
|
||||
if cli_indexes_dir.exists() and cli_indexes_dir in meta_file.parents:
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
is_current = project_path == current_path
|
||||
key = (str(project_path), str(meta_file))
|
||||
if key in seen_app_meta:
|
||||
continue
|
||||
seen_app_meta.add(key)
|
||||
matches.append(
|
||||
{
|
||||
"project_path": project_path,
|
||||
"files_dir": meta_file.parent,
|
||||
"meta_file": meta_file,
|
||||
"is_current": is_current,
|
||||
"kind": "app",
|
||||
"display_name": meta_file.parent.name,
|
||||
"file_base": meta_file.name.replace(".leann.meta.json", ""),
|
||||
}
|
||||
)
|
||||
|
||||
# 2b) by parent directory name
|
||||
for meta_file in project_path.rglob("*.leann.meta.json"):
|
||||
if meta_file.is_file() and meta_file.parent.name == index_name:
|
||||
# Skip CLI-built indexes' meta under .leann/indexes
|
||||
try:
|
||||
cli_indexes_dir = project_path / ".leann" / "indexes"
|
||||
if cli_indexes_dir.exists() and cli_indexes_dir in meta_file.parents:
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
is_current = project_path == current_path
|
||||
key = (str(project_path), str(meta_file))
|
||||
if key in seen_app_meta:
|
||||
continue
|
||||
seen_app_meta.add(key)
|
||||
matches.append(
|
||||
{
|
||||
"project_path": project_path,
|
||||
"files_dir": meta_file.parent,
|
||||
"meta_file": meta_file,
|
||||
"is_current": is_current,
|
||||
"kind": "app",
|
||||
"display_name": meta_file.parent.name,
|
||||
"file_base": meta_file.name.replace(".leann.meta.json", ""),
|
||||
}
|
||||
)
|
||||
|
||||
# Sort: current project first, then by project name
|
||||
matches.sort(key=lambda x: (not x["is_current"], x["project_path"].name))
|
||||
return matches
|
||||
|
||||
def _remove_single_match(self, match, index_name: str, force: bool):
|
||||
"""Handle removal when only one match is found"""
|
||||
project_path = match["project_path"]
|
||||
is_current = match["is_current"]
|
||||
kind = match.get("kind", "cli")
|
||||
|
||||
if is_current:
|
||||
location_info = "current project"
|
||||
emoji = "🏠"
|
||||
else:
|
||||
location_info = f"other project '{project_path.name}'"
|
||||
emoji = "📂"
|
||||
|
||||
print(f"✅ Found 1 index named '{index_name}':")
|
||||
print(f" {emoji} Location: {location_info}")
|
||||
if kind == "cli":
|
||||
print(f" 📍 Path: {project_path / '.leann' / 'indexes' / index_name}")
|
||||
else:
|
||||
print(f" 📍 Meta: {match['meta_file']}")
|
||||
|
||||
if not force:
|
||||
if not is_current:
|
||||
print("\n⚠️ CROSS-PROJECT REMOVAL!")
|
||||
print(" This will delete the index from another project.")
|
||||
|
||||
response = input(f" ❓ Confirm removal from {location_info}? (y/N): ").strip().lower()
|
||||
if response not in ["y", "yes"]:
|
||||
print(" ❌ Removal cancelled.")
|
||||
return False
|
||||
|
||||
if kind == "cli":
|
||||
return self._delete_index_directory(
|
||||
match["index_dir"],
|
||||
index_name,
|
||||
project_path if not is_current else None,
|
||||
is_app=False,
|
||||
)
|
||||
else:
|
||||
return self._delete_index_directory(
|
||||
match["files_dir"],
|
||||
match.get("display_name", index_name),
|
||||
project_path if not is_current else None,
|
||||
is_app=True,
|
||||
meta_file=match.get("meta_file"),
|
||||
app_file_base=match.get("file_base"),
|
||||
)
|
||||
|
||||
def _remove_from_multiple_matches(self, matches, index_name: str, force: bool):
|
||||
"""Handle removal when multiple matches are found"""
|
||||
|
||||
print(f"⚠️ Found {len(matches)} indexes named '{index_name}':")
|
||||
print(" " + "─" * 50)
|
||||
|
||||
for i, match in enumerate(matches, 1):
|
||||
project_path = match["project_path"]
|
||||
is_current = match["is_current"]
|
||||
kind = match.get("kind", "cli")
|
||||
|
||||
if is_current:
|
||||
print(f" {i}. 🏠 Current project ({'CLI' if kind == 'cli' else 'APP'})")
|
||||
else:
|
||||
print(f" {i}. 📂 {project_path.name} ({'CLI' if kind == 'cli' else 'APP'})")
|
||||
|
||||
# Show path details
|
||||
if kind == "cli":
|
||||
print(f" 📍 {project_path / '.leann' / 'indexes' / index_name}")
|
||||
else:
|
||||
print(f" 📍 {match['meta_file']}")
|
||||
|
||||
# Show size info
|
||||
try:
|
||||
if kind == "cli":
|
||||
size_mb = sum(
|
||||
f.stat().st_size for f in match["index_dir"].iterdir() if f.is_file()
|
||||
) / (1024 * 1024)
|
||||
else:
|
||||
file_base = match.get("file_base")
|
||||
size_mb = 0.0
|
||||
if file_base:
|
||||
size_mb = sum(
|
||||
f.stat().st_size
|
||||
for f in match["files_dir"].glob(f"{file_base}.leann*")
|
||||
if f.is_file()
|
||||
) / (1024 * 1024)
|
||||
print(f" 📦 Size: {size_mb:.1f} MB")
|
||||
except (OSError, PermissionError):
|
||||
pass
|
||||
|
||||
print(" " + "─" * 50)
|
||||
|
||||
if force:
|
||||
print(" ❌ Multiple matches found, but --force specified.")
|
||||
print(" Please run without --force to choose which one to remove.")
|
||||
return False
|
||||
|
||||
try:
|
||||
choice = input(
|
||||
f" ❓ Which one to remove? (1-{len(matches)}, or 'c' to cancel): "
|
||||
).strip()
|
||||
if choice.lower() == "c":
|
||||
print(" ❌ Removal cancelled.")
|
||||
return False
|
||||
|
||||
choice_idx = int(choice) - 1
|
||||
if 0 <= choice_idx < len(matches):
|
||||
selected_match = matches[choice_idx]
|
||||
project_path = selected_match["project_path"]
|
||||
is_current = selected_match["is_current"]
|
||||
kind = selected_match.get("kind", "cli")
|
||||
|
||||
location = "current project" if is_current else f"'{project_path.name}' project"
|
||||
print(f" 🎯 Selected: Remove from {location}")
|
||||
|
||||
# Final confirmation for safety
|
||||
confirm = input(
|
||||
f" ❓ FINAL CONFIRMATION - Type '{index_name}' to proceed: "
|
||||
).strip()
|
||||
if confirm != index_name:
|
||||
print(" ❌ Confirmation failed. Removal cancelled.")
|
||||
return False
|
||||
|
||||
if kind == "cli":
|
||||
return self._delete_index_directory(
|
||||
selected_match["index_dir"],
|
||||
index_name,
|
||||
project_path if not is_current else None,
|
||||
is_app=False,
|
||||
)
|
||||
else:
|
||||
return self._delete_index_directory(
|
||||
selected_match["files_dir"],
|
||||
selected_match.get("display_name", index_name),
|
||||
project_path if not is_current else None,
|
||||
is_app=True,
|
||||
meta_file=selected_match.get("meta_file"),
|
||||
app_file_base=selected_match.get("file_base"),
|
||||
)
|
||||
else:
|
||||
print(" ❌ Invalid choice. Removal cancelled.")
|
||||
return False
|
||||
|
||||
except (ValueError, KeyboardInterrupt):
|
||||
print("\n ❌ Invalid input. Removal cancelled.")
|
||||
return False
|
||||
|
||||
def _delete_index_directory(
|
||||
self,
|
||||
index_dir: Path,
|
||||
index_display_name: str,
|
||||
project_path: Optional[Path] = None,
|
||||
is_app: bool = False,
|
||||
meta_file: Optional[Path] = None,
|
||||
app_file_base: Optional[str] = None,
|
||||
):
|
||||
"""Delete a CLI index directory or APP index files safely."""
|
||||
try:
|
||||
if is_app:
|
||||
removed = 0
|
||||
errors = 0
|
||||
# Delete only files that belong to this app index (based on file base)
|
||||
pattern_base = app_file_base or ""
|
||||
for f in index_dir.glob(f"{pattern_base}.leann*"):
|
||||
try:
|
||||
f.unlink()
|
||||
removed += 1
|
||||
except Exception:
|
||||
errors += 1
|
||||
# Best-effort: also remove the meta file if specified and still exists
|
||||
if meta_file and meta_file.exists():
|
||||
try:
|
||||
meta_file.unlink()
|
||||
removed += 1
|
||||
except Exception:
|
||||
errors += 1
|
||||
|
||||
if removed > 0 and errors == 0:
|
||||
if project_path:
|
||||
print(
|
||||
f"✅ App index '{index_display_name}' removed from {project_path.name}"
|
||||
)
|
||||
else:
|
||||
print(f"✅ App index '{index_display_name}' removed successfully")
|
||||
return True
|
||||
elif removed > 0 and errors > 0:
|
||||
print(
|
||||
f"⚠️ App index '{index_display_name}' partially removed (some files couldn't be deleted)"
|
||||
)
|
||||
return True
|
||||
else:
|
||||
print(
|
||||
f"❌ No files found to remove for app index '{index_display_name}' in {index_dir}"
|
||||
)
|
||||
return False
|
||||
else:
|
||||
import shutil
|
||||
|
||||
shutil.rmtree(index_dir)
|
||||
|
||||
if project_path:
|
||||
print(f"✅ Index '{index_display_name}' removed from {project_path.name}")
|
||||
else:
|
||||
print(f"✅ Index '{index_display_name}' removed successfully")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ Error removing index '{index_display_name}': {e}")
|
||||
return False
|
||||
|
||||
def load_documents(
|
||||
self, docs_paths: Union[str, list], custom_file_types: Union[str, None] = None
|
||||
self,
|
||||
docs_paths: Union[str, list],
|
||||
custom_file_types: Union[str, None] = None,
|
||||
include_hidden: bool = False,
|
||||
):
|
||||
# Handle both single path (string) and multiple paths (list) for backward compatibility
|
||||
if isinstance(docs_paths, str):
|
||||
@@ -392,6 +876,10 @@ Examples:
|
||||
|
||||
all_documents = []
|
||||
|
||||
# Helper to detect hidden path components
|
||||
def _path_has_hidden_segment(p: Path) -> bool:
|
||||
return any(part.startswith(".") and part not in [".", ".."] for part in p.parts)
|
||||
|
||||
# First, process individual files if any
|
||||
if files:
|
||||
print(f"\n🔄 Processing {len(files)} individual file{'s' if len(files) > 1 else ''}...")
|
||||
@@ -404,8 +892,12 @@ Examples:
|
||||
|
||||
files_by_dir = defaultdict(list)
|
||||
for file_path in files:
|
||||
parent_dir = str(Path(file_path).parent)
|
||||
files_by_dir[parent_dir].append(file_path)
|
||||
file_path_obj = Path(file_path)
|
||||
if not include_hidden and _path_has_hidden_segment(file_path_obj):
|
||||
print(f" ⚠️ Skipping hidden file: {file_path}")
|
||||
continue
|
||||
parent_dir = str(file_path_obj.parent)
|
||||
files_by_dir[parent_dir].append(str(file_path_obj))
|
||||
|
||||
# Load files from each parent directory
|
||||
for parent_dir, file_list in files_by_dir.items():
|
||||
@@ -416,6 +908,7 @@ Examples:
|
||||
file_docs = SimpleDirectoryReader(
|
||||
parent_dir,
|
||||
input_files=file_list,
|
||||
# exclude_hidden only affects directory scans; input_files are explicit
|
||||
filename_as_id=True,
|
||||
).load_data()
|
||||
all_documents.extend(file_docs)
|
||||
@@ -514,6 +1007,8 @@ Examples:
|
||||
# Check if file matches any exclude pattern
|
||||
try:
|
||||
relative_path = file_path.relative_to(docs_path)
|
||||
if not include_hidden and _path_has_hidden_segment(relative_path):
|
||||
continue
|
||||
if self._should_exclude_file(relative_path, gitignore_matches):
|
||||
continue
|
||||
except ValueError:
|
||||
@@ -541,6 +1036,7 @@ Examples:
|
||||
try:
|
||||
default_docs = SimpleDirectoryReader(
|
||||
str(file_path.parent),
|
||||
exclude_hidden=not include_hidden,
|
||||
filename_as_id=True,
|
||||
required_exts=[file_path.suffix],
|
||||
).load_data()
|
||||
@@ -569,6 +1065,7 @@ Examples:
|
||||
encoding="utf-8",
|
||||
required_exts=code_extensions,
|
||||
file_extractor={}, # Use default extractors
|
||||
exclude_hidden=not include_hidden,
|
||||
filename_as_id=True,
|
||||
).load_data(show_progress=True)
|
||||
|
||||
@@ -687,7 +1184,40 @@ Examples:
|
||||
print(f"Index '{index_name}' already exists. Use --force to rebuild.")
|
||||
return
|
||||
|
||||
all_texts = self.load_documents(docs_paths, args.file_types)
|
||||
# Configure chunking based on CLI args before loading documents
|
||||
# Guard against invalid configurations
|
||||
doc_chunk_size = max(1, int(args.doc_chunk_size))
|
||||
doc_chunk_overlap = max(0, int(args.doc_chunk_overlap))
|
||||
if doc_chunk_overlap >= doc_chunk_size:
|
||||
print(
|
||||
f"⚠️ Adjusting doc chunk overlap from {doc_chunk_overlap} to {doc_chunk_size - 1} (must be < chunk size)"
|
||||
)
|
||||
doc_chunk_overlap = doc_chunk_size - 1
|
||||
|
||||
code_chunk_size = max(1, int(args.code_chunk_size))
|
||||
code_chunk_overlap = max(0, int(args.code_chunk_overlap))
|
||||
if code_chunk_overlap >= code_chunk_size:
|
||||
print(
|
||||
f"⚠️ Adjusting code chunk overlap from {code_chunk_overlap} to {code_chunk_size - 1} (must be < chunk size)"
|
||||
)
|
||||
code_chunk_overlap = code_chunk_size - 1
|
||||
|
||||
self.node_parser = SentenceSplitter(
|
||||
chunk_size=doc_chunk_size,
|
||||
chunk_overlap=doc_chunk_overlap,
|
||||
separator=" ",
|
||||
paragraph_separator="\n\n",
|
||||
)
|
||||
self.code_parser = SentenceSplitter(
|
||||
chunk_size=code_chunk_size,
|
||||
chunk_overlap=code_chunk_overlap,
|
||||
separator="\n",
|
||||
paragraph_separator="\n\n",
|
||||
)
|
||||
|
||||
all_texts = self.load_documents(
|
||||
docs_paths, args.file_types, include_hidden=args.include_hidden
|
||||
)
|
||||
if not all_texts:
|
||||
print("No documents found")
|
||||
return
|
||||
@@ -719,13 +1249,101 @@ Examples:
|
||||
async def search_documents(self, args):
|
||||
index_name = args.index_name
|
||||
query = args.query
|
||||
index_path = self.get_index_path(index_name)
|
||||
|
||||
if not self.index_exists(index_name):
|
||||
print(
|
||||
f"Index '{index_name}' not found. Use 'leann build {index_name} --docs <dir> [<dir2> ...]' to create it."
|
||||
)
|
||||
return
|
||||
# First try to find the index in current project
|
||||
index_path = self.get_index_path(index_name)
|
||||
if self.index_exists(index_name):
|
||||
# Found in current project, use it
|
||||
pass
|
||||
else:
|
||||
# Search across all registered projects (like list_indexes does)
|
||||
all_matches = self._find_all_matching_indexes(index_name)
|
||||
if not all_matches:
|
||||
print(
|
||||
f"Index '{index_name}' not found. Use 'leann build {index_name} --docs <dir> [<dir2> ...]' to create it."
|
||||
)
|
||||
return
|
||||
elif len(all_matches) == 1:
|
||||
# Found exactly one match, use it
|
||||
match = all_matches[0]
|
||||
if match["kind"] == "cli":
|
||||
index_path = str(match["index_dir"] / "documents.leann")
|
||||
else:
|
||||
# App format: use the meta file to construct the path
|
||||
meta_file = match["meta_file"]
|
||||
file_base = match["file_base"]
|
||||
index_path = str(meta_file.parent / f"{file_base}.leann")
|
||||
|
||||
project_info = (
|
||||
"current project"
|
||||
if match["is_current"]
|
||||
else f"project '{match['project_path'].name}'"
|
||||
)
|
||||
print(f"Using index '{index_name}' from {project_info}")
|
||||
else:
|
||||
# Multiple matches found
|
||||
if args.non_interactive:
|
||||
# Non-interactive mode: automatically select the best match
|
||||
# Priority: current project first, then first available
|
||||
current_matches = [m for m in all_matches if m["is_current"]]
|
||||
if current_matches:
|
||||
match = current_matches[0]
|
||||
location_desc = "current project"
|
||||
else:
|
||||
match = all_matches[0]
|
||||
location_desc = f"project '{match['project_path'].name}'"
|
||||
|
||||
if match["kind"] == "cli":
|
||||
index_path = str(match["index_dir"] / "documents.leann")
|
||||
else:
|
||||
meta_file = match["meta_file"]
|
||||
file_base = match["file_base"]
|
||||
index_path = str(meta_file.parent / f"{file_base}.leann")
|
||||
|
||||
print(
|
||||
f"Found {len(all_matches)} indexes named '{index_name}', using index from {location_desc}"
|
||||
)
|
||||
else:
|
||||
# Interactive mode: ask user to choose
|
||||
print(f"Found {len(all_matches)} indexes named '{index_name}':")
|
||||
for i, match in enumerate(all_matches, 1):
|
||||
project_path = match["project_path"]
|
||||
is_current = match["is_current"]
|
||||
kind = match.get("kind", "cli")
|
||||
|
||||
if is_current:
|
||||
print(
|
||||
f" {i}. 🏠 Current project ({'CLI' if kind == 'cli' else 'APP'})"
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f" {i}. 📂 {project_path.name} ({'CLI' if kind == 'cli' else 'APP'})"
|
||||
)
|
||||
|
||||
try:
|
||||
choice = input(f"Which index to search? (1-{len(all_matches)}): ").strip()
|
||||
choice_idx = int(choice) - 1
|
||||
if 0 <= choice_idx < len(all_matches):
|
||||
match = all_matches[choice_idx]
|
||||
if match["kind"] == "cli":
|
||||
index_path = str(match["index_dir"] / "documents.leann")
|
||||
else:
|
||||
meta_file = match["meta_file"]
|
||||
file_base = match["file_base"]
|
||||
index_path = str(meta_file.parent / f"{file_base}.leann")
|
||||
|
||||
project_info = (
|
||||
"current project"
|
||||
if match["is_current"]
|
||||
else f"project '{match['project_path'].name}'"
|
||||
)
|
||||
print(f"Using index '{index_name}' from {project_info}")
|
||||
else:
|
||||
print("Invalid choice. Aborting search.")
|
||||
return
|
||||
except (ValueError, KeyboardInterrupt):
|
||||
print("Invalid input. Aborting search.")
|
||||
return
|
||||
|
||||
searcher = LeannSearcher(index_path=index_path)
|
||||
results = searcher.search(
|
||||
@@ -824,6 +1442,8 @@ Examples:
|
||||
|
||||
if args.command == "list":
|
||||
self.list_indexes()
|
||||
elif args.command == "remove":
|
||||
self.remove_index(args.index_name, args.force)
|
||||
elif args.command == "build":
|
||||
await self.build_index(args)
|
||||
elif args.command == "search":
|
||||
@@ -835,10 +1455,15 @@ Examples:
|
||||
|
||||
|
||||
def main():
|
||||
import logging
|
||||
|
||||
import dotenv
|
||||
|
||||
dotenv.load_dotenv()
|
||||
|
||||
# Set clean logging for CLI usage
|
||||
logging.getLogger().setLevel(logging.WARNING) # Only show warnings and errors
|
||||
|
||||
cli = LeannCLI()
|
||||
asyncio.run(cli.run())
|
||||
|
||||
|
||||
@@ -57,6 +57,8 @@ def compute_embeddings(
|
||||
return compute_embeddings_mlx(texts, model_name)
|
||||
elif mode == "ollama":
|
||||
return compute_embeddings_ollama(texts, model_name, is_build=is_build)
|
||||
elif mode == "gemini":
|
||||
return compute_embeddings_gemini(texts, model_name, is_build=is_build)
|
||||
else:
|
||||
raise ValueError(f"Unsupported embedding mode: {mode}")
|
||||
|
||||
@@ -244,6 +246,16 @@ def compute_embeddings_openai(texts: list[str], model_name: str) -> np.ndarray:
|
||||
except ImportError as e:
|
||||
raise ImportError(f"OpenAI package not installed: {e}")
|
||||
|
||||
# Validate input list
|
||||
if not texts:
|
||||
raise ValueError("Cannot compute embeddings for empty text list")
|
||||
# Extra validation: abort early if any item is empty/whitespace
|
||||
invalid_count = sum(1 for t in texts if not isinstance(t, str) or not t.strip())
|
||||
if invalid_count > 0:
|
||||
raise ValueError(
|
||||
f"Found {invalid_count} empty/invalid text(s) in input. Upstream should filter before calling OpenAI."
|
||||
)
|
||||
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
if not api_key:
|
||||
raise RuntimeError("OPENAI_API_KEY environment variable not set")
|
||||
@@ -263,8 +275,16 @@ def compute_embeddings_openai(texts: list[str], model_name: str) -> np.ndarray:
|
||||
print(f"len of texts: {len(texts)}")
|
||||
|
||||
# OpenAI has limits on batch size and input length
|
||||
max_batch_size = 1000 # Conservative batch size
|
||||
max_batch_size = 800 # Conservative batch size because the token limit is 300K
|
||||
all_embeddings = []
|
||||
# get the avg len of texts
|
||||
avg_len = sum(len(text) for text in texts) / len(texts)
|
||||
print(f"avg len of texts: {avg_len}")
|
||||
# if avg len is less than 1000, use the max batch size
|
||||
if avg_len > 300:
|
||||
max_batch_size = 500
|
||||
|
||||
# if avg len is less than 1000, use the max batch size
|
||||
|
||||
try:
|
||||
from tqdm import tqdm
|
||||
@@ -650,3 +670,83 @@ def compute_embeddings_ollama(
|
||||
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
||||
|
||||
return embeddings
|
||||
|
||||
|
||||
def compute_embeddings_gemini(
|
||||
texts: list[str], model_name: str = "text-embedding-004", is_build: bool = False
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Compute embeddings using Google Gemini API.
|
||||
|
||||
Args:
|
||||
texts: List of texts to compute embeddings for
|
||||
model_name: Gemini model name (default: "text-embedding-004")
|
||||
is_build: Whether this is a build operation (shows progress bar)
|
||||
|
||||
Returns:
|
||||
Embeddings array, shape: (len(texts), embedding_dim)
|
||||
"""
|
||||
try:
|
||||
import os
|
||||
|
||||
import google.genai as genai
|
||||
except ImportError as e:
|
||||
raise ImportError(f"Google GenAI package not installed: {e}")
|
||||
|
||||
api_key = os.getenv("GEMINI_API_KEY")
|
||||
if not api_key:
|
||||
raise RuntimeError("GEMINI_API_KEY environment variable not set")
|
||||
|
||||
# Cache Gemini client
|
||||
cache_key = "gemini_client"
|
||||
if cache_key in _model_cache:
|
||||
client = _model_cache[cache_key]
|
||||
else:
|
||||
client = genai.Client(api_key=api_key)
|
||||
_model_cache[cache_key] = client
|
||||
logger.info("Gemini client cached")
|
||||
|
||||
logger.info(
|
||||
f"Computing embeddings for {len(texts)} texts using Gemini API, model: '{model_name}'"
|
||||
)
|
||||
|
||||
# Gemini supports batch embedding
|
||||
max_batch_size = 100 # Conservative batch size for Gemini
|
||||
all_embeddings = []
|
||||
|
||||
try:
|
||||
from tqdm import tqdm
|
||||
|
||||
total_batches = (len(texts) + max_batch_size - 1) // max_batch_size
|
||||
batch_range = range(0, len(texts), max_batch_size)
|
||||
batch_iterator = tqdm(
|
||||
batch_range, desc="Computing embeddings", unit="batch", total=total_batches
|
||||
)
|
||||
except ImportError:
|
||||
# Fallback when tqdm is not available
|
||||
batch_iterator = range(0, len(texts), max_batch_size)
|
||||
|
||||
for i in batch_iterator:
|
||||
batch_texts = texts[i : i + max_batch_size]
|
||||
|
||||
try:
|
||||
# Use the embed_content method from the new Google GenAI SDK
|
||||
response = client.models.embed_content(
|
||||
model=model_name,
|
||||
contents=batch_texts,
|
||||
config=genai.types.EmbedContentConfig(
|
||||
task_type="RETRIEVAL_DOCUMENT" # For document embedding
|
||||
),
|
||||
)
|
||||
|
||||
# Extract embeddings from response
|
||||
for embedding_data in response.embeddings:
|
||||
all_embeddings.append(embedding_data.values)
|
||||
except Exception as e:
|
||||
logger.error(f"Batch {i} failed: {e}")
|
||||
raise
|
||||
|
||||
embeddings = np.array(all_embeddings, dtype=np.float32)
|
||||
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
||||
|
||||
return embeddings
|
||||
|
||||
@@ -268,8 +268,12 @@ class EmbeddingServerManager:
|
||||
f"Terminating server process (PID: {self.server_process.pid}) for backend {self.backend_module_name}..."
|
||||
)
|
||||
|
||||
# Use simple termination - our improved server shutdown should handle this properly
|
||||
self.server_process.terminate()
|
||||
# Use simple termination first; if the server installed signal handlers,
|
||||
# it will exit cleanly. Otherwise escalate to kill after a short wait.
|
||||
try:
|
||||
self.server_process.terminate()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
self.server_process.wait(timeout=5) # Give more time for graceful shutdown
|
||||
@@ -278,7 +282,10 @@ class EmbeddingServerManager:
|
||||
logger.warning(
|
||||
f"Server process {self.server_process.pid} did not terminate within 5 seconds, force killing..."
|
||||
)
|
||||
self.server_process.kill()
|
||||
try:
|
||||
self.server_process.kill()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
self.server_process.wait(timeout=2)
|
||||
logger.info(f"Server process {self.server_process.pid} killed successfully.")
|
||||
|
||||
@@ -64,19 +64,6 @@ def handle_request(request):
|
||||
"required": ["index_name", "query"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "leann_status",
|
||||
"description": "📊 Check the health and stats of your code indexes - like a medical checkup for your codebase knowledge!",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"index_name": {
|
||||
"type": "string",
|
||||
"description": "Optional: Name of specific index to check. If not provided, shows status of all indexes.",
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "leann_list",
|
||||
"description": "📋 Show all your indexed codebases - your personal code library! Use this to see what's available for search.",
|
||||
@@ -107,7 +94,7 @@ def handle_request(request):
|
||||
},
|
||||
}
|
||||
|
||||
# Build simplified command
|
||||
# Build simplified command with non-interactive flag for MCP compatibility
|
||||
cmd = [
|
||||
"leann",
|
||||
"search",
|
||||
@@ -115,18 +102,10 @@ def handle_request(request):
|
||||
args["query"],
|
||||
f"--top-k={args.get('top_k', 5)}",
|
||||
f"--complexity={args.get('complexity', 32)}",
|
||||
"--non-interactive",
|
||||
]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
|
||||
elif tool_name == "leann_status":
|
||||
if args.get("index_name"):
|
||||
# Check specific index status - for now, we'll use leann list and filter
|
||||
result = subprocess.run(["leann", "list"], capture_output=True, text=True)
|
||||
# We could enhance this to show more detailed status per index
|
||||
else:
|
||||
# Show all indexes status
|
||||
result = subprocess.run(["leann", "list"], capture_output=True, text=True)
|
||||
|
||||
elif tool_name == "leann_list":
|
||||
result = subprocess.run(["leann", "list"], capture_output=True, text=True)
|
||||
|
||||
|
||||
@@ -2,11 +2,17 @@
|
||||
|
||||
import importlib
|
||||
import importlib.metadata
|
||||
from typing import TYPE_CHECKING
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Optional, Union
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from leann.interface import LeannBackendFactoryInterface
|
||||
|
||||
# Set up logger for this module
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
BACKEND_REGISTRY: dict[str, "LeannBackendFactoryInterface"] = {}
|
||||
|
||||
|
||||
@@ -14,7 +20,7 @@ def register_backend(name: str):
|
||||
"""A decorator to register a new backend class."""
|
||||
|
||||
def decorator(cls):
|
||||
print(f"INFO: Registering backend '{name}'")
|
||||
logger.debug(f"Registering backend '{name}'")
|
||||
BACKEND_REGISTRY[name] = cls
|
||||
return cls
|
||||
|
||||
@@ -39,3 +45,54 @@ def autodiscover_backends():
|
||||
# print(f"WARN: Could not import backend module '{backend_module_name}': {e}")
|
||||
pass
|
||||
# print("INFO: Backend auto-discovery finished.")
|
||||
|
||||
|
||||
def register_project_directory(project_dir: Optional[Union[str, Path]] = None):
|
||||
"""
|
||||
Register a project directory in the global LEANN registry.
|
||||
|
||||
This allows `leann list` to discover indexes created by apps or other tools.
|
||||
|
||||
Args:
|
||||
project_dir: Directory to register. If None, uses current working directory.
|
||||
"""
|
||||
if project_dir is None:
|
||||
project_dir = Path.cwd()
|
||||
else:
|
||||
project_dir = Path(project_dir)
|
||||
|
||||
# Only register directories that have some kind of LEANN content
|
||||
# Either .leann/indexes/ (CLI format) or *.leann.meta.json files (apps format)
|
||||
has_cli_indexes = (project_dir / ".leann" / "indexes").exists()
|
||||
has_app_indexes = any(project_dir.rglob("*.leann.meta.json"))
|
||||
|
||||
if not (has_cli_indexes or has_app_indexes):
|
||||
# Don't register if there are no LEANN indexes
|
||||
return
|
||||
|
||||
global_registry = Path.home() / ".leann" / "projects.json"
|
||||
global_registry.parent.mkdir(exist_ok=True)
|
||||
|
||||
project_str = str(project_dir.resolve())
|
||||
|
||||
# Load existing registry
|
||||
projects = []
|
||||
if global_registry.exists():
|
||||
try:
|
||||
with open(global_registry) as f:
|
||||
projects = json.load(f)
|
||||
except Exception:
|
||||
logger.debug("Could not load existing project registry")
|
||||
projects = []
|
||||
|
||||
# Add project if not already present
|
||||
if project_str not in projects:
|
||||
projects.append(project_str)
|
||||
|
||||
# Save updated registry
|
||||
try:
|
||||
with open(global_registry, "w") as f:
|
||||
json.dump(projects, f, indent=2)
|
||||
logger.debug(f"Registered project directory: {project_str}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not save project registry: {e}")
|
||||
|
||||
@@ -4,27 +4,29 @@ Transform your development workflow with intelligent code assistance using LEANN
|
||||
|
||||
## Prerequisites
|
||||
|
||||
**Step 1:** First, complete the basic LEANN installation following the [📦 Installation guide](../../README.md#installation) in the root README:
|
||||
Install LEANN globally for MCP integration (with default backend):
|
||||
|
||||
```bash
|
||||
uv venv
|
||||
source .venv/bin/activate
|
||||
uv pip install leann
|
||||
uv tool install leann-core --with leann
|
||||
```
|
||||
|
||||
**Step 2:** Install LEANN globally for MCP integration:
|
||||
```bash
|
||||
uv tool install leann-core
|
||||
```
|
||||
|
||||
This makes the `leann` command available system-wide, which `leann_mcp` requires.
|
||||
This installs the `leann` CLI into an isolated tool environment and includes both backends so `leann build` works out-of-the-box.
|
||||
|
||||
## 🚀 Quick Setup
|
||||
|
||||
Add the LEANN MCP server to Claude Code:
|
||||
Add the LEANN MCP server to Claude Code. Choose the scope based on how widely you want it available. Below is the command to install it globally; if you prefer a local install, skip this step:
|
||||
|
||||
```bash
|
||||
claude mcp add leann-server -- leann_mcp
|
||||
# Global (recommended): available in all projects for your user
|
||||
claude mcp add --scope user leann-server -- leann_mcp
|
||||
```
|
||||
|
||||
- `leann-server`: the display name of the MCP server in Claude Code (you can change it).
|
||||
- `leann_mcp`: the Python entry point installed with LEANN that starts the MCP server.
|
||||
|
||||
Verify it is registered globally:
|
||||
|
||||
```bash
|
||||
claude mcp list | cat
|
||||
```
|
||||
|
||||
## 🛠️ Available Tools
|
||||
@@ -33,27 +35,36 @@ Once connected, you'll have access to these powerful semantic search tools in Cl
|
||||
|
||||
- **`leann_list`** - List all available indexes across your projects
|
||||
- **`leann_search`** - Perform semantic searches across code and documents
|
||||
- **`leann_ask`** - Ask natural language questions and get AI-powered answers from your codebase
|
||||
|
||||
|
||||
## 🎯 Quick Start Example
|
||||
|
||||
```bash
|
||||
# Add locally if you did not add it globally (current folder only; default if --scope is omitted)
|
||||
claude mcp add leann-server -- leann_mcp
|
||||
|
||||
# Build an index for your project (change to your actual path)
|
||||
leann build my-project --docs ./
|
||||
# See the advanced examples below for more ways to configure indexing
|
||||
# Set the index name (replace 'my-project' with your own)
|
||||
leann build my-project --docs $(git ls-files)
|
||||
|
||||
# Start Claude Code
|
||||
claude
|
||||
```
|
||||
|
||||
## 🚀 Advanced Usage Examples
|
||||
## 🚀 Advanced Usage Examples to build the index
|
||||
|
||||
### Index Entire Git Repository
|
||||
```bash
|
||||
# Index all tracked files in your git repository, note right now we will skip submodules, but we can add it back easily if you want
|
||||
# Index all tracked files in your Git repository.
|
||||
# Note: submodules are currently skipped; we can add them back if needed.
|
||||
leann build my-repo --docs $(git ls-files) --embedding-mode sentence-transformers --embedding-model all-MiniLM-L6-v2 --backend hnsw
|
||||
|
||||
# Index only specific file types from git
|
||||
# Index only tracked Python files from Git.
|
||||
leann build my-python-code --docs $(git ls-files "*.py") --embedding-mode sentence-transformers --embedding-model all-MiniLM-L6-v2 --backend hnsw
|
||||
|
||||
# If you encounter empty requests caused by empty files (e.g., __init__.py), exclude zero-byte files. Thanks @ww2283 for pointing [that](https://github.com/yichuan-w/LEANN/issues/48) out
|
||||
leann build leann-prospec-lig --docs $(find ./src -name "*.py" -not -empty) --embedding-mode openai --embedding-model text-embedding-3-small
|
||||
```
|
||||
|
||||
### Multiple Directories and Files
|
||||
@@ -81,7 +92,7 @@ leann build docs-and-configs --docs $(git ls-files "*.md" "*.yml" "*.yaml" "*.js
|
||||
```
|
||||
|
||||
|
||||
**Try this in Claude Code:**
|
||||
## **Try this in Claude Code:**
|
||||
```
|
||||
Help me understand this codebase. List available indexes and search for authentication patterns.
|
||||
```
|
||||
@@ -90,6 +101,7 @@ Help me understand this codebase. List available indexes and search for authenti
|
||||
<img src="../../assets/claude_code_leann.png" alt="LEANN in Claude Code" width="80%">
|
||||
</p>
|
||||
|
||||
If you see a prompt asking whether to proceed with LEANN, you can now use it in your chat!
|
||||
|
||||
## 🧠 How It Works
|
||||
|
||||
@@ -125,3 +137,11 @@ To remove LEANN
|
||||
```
|
||||
uv pip uninstall leann leann-backend-hnsw leann-core
|
||||
```
|
||||
|
||||
To globally remove LEANN (for version update)
|
||||
```
|
||||
uv tool list | cat
|
||||
uv tool uninstall leann-core
|
||||
command -v leann || echo "leann gone"
|
||||
command -v leann_mcp || echo "leann_mcp gone"
|
||||
```
|
||||
|
||||
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "leann"
|
||||
version = "0.2.8"
|
||||
version = "0.3.2"
|
||||
description = "LEANN - The smallest vector index in the world. RAG Everything with LEANN!"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.9"
|
||||
|
||||
1
packages/wechat-exporter/__init__.py
Normal file
1
packages/wechat-exporter/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
__all__ = []
|
||||
@@ -136,5 +136,9 @@ def export_sqlite(
|
||||
connection.commit()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main():
|
||||
app()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -10,11 +10,10 @@ requires-python = ">=3.9"
|
||||
dependencies = [
|
||||
"leann-core",
|
||||
"leann-backend-hnsw",
|
||||
"typer>=0.12.3",
|
||||
"numpy>=1.26.0",
|
||||
"torch",
|
||||
"tqdm",
|
||||
"flask",
|
||||
"flask_compress",
|
||||
"datasets>=2.15.0",
|
||||
"evaluate",
|
||||
"colorama",
|
||||
@@ -65,9 +64,7 @@ test = [
|
||||
"pytest>=7.0",
|
||||
"pytest-timeout>=2.0",
|
||||
"llama-index-core>=0.12.0",
|
||||
"llama-index-readers-file>=0.4.0",
|
||||
"python-dotenv>=1.0.0",
|
||||
"sentence-transformers>=2.2.0",
|
||||
]
|
||||
|
||||
diskann = [
|
||||
@@ -84,6 +81,11 @@ documents = [
|
||||
|
||||
[tool.setuptools]
|
||||
py-modules = []
|
||||
packages = ["wechat_exporter"]
|
||||
package-dir = { "wechat_exporter" = "packages/wechat-exporter" }
|
||||
|
||||
[project.scripts]
|
||||
wechat-exporter = "wechat_exporter.main:main"
|
||||
|
||||
|
||||
[tool.uv.sources]
|
||||
@@ -94,13 +96,8 @@ leann-backend-hnsw = { path = "packages/leann-backend-hnsw", editable = true }
|
||||
[tool.ruff]
|
||||
target-version = "py39"
|
||||
line-length = 100
|
||||
extend-exclude = [
|
||||
"third_party",
|
||||
"*.egg-info",
|
||||
"__pycache__",
|
||||
".git",
|
||||
".venv",
|
||||
]
|
||||
extend-exclude = ["third_party"]
|
||||
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = [
|
||||
@@ -123,21 +120,12 @@ ignore = [
|
||||
"RUF012", # mutable class attributes should be annotated with typing.ClassVar
|
||||
]
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"test/**/*.py" = ["E402"] # module level import not at top of file (common in tests)
|
||||
"examples/**/*.py" = ["E402"] # module level import not at top of file (common in examples)
|
||||
|
||||
[tool.ruff.format]
|
||||
quote-style = "double"
|
||||
indent-style = "space"
|
||||
skip-magic-trailing-comma = false
|
||||
line-ending = "auto"
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"ruff>=0.12.4",
|
||||
]
|
||||
|
||||
[tool.lychee]
|
||||
accept = ["200", "403", "429", "503"]
|
||||
timeout = 20
|
||||
|
||||
76
sky/leann-build.yaml
Normal file
76
sky/leann-build.yaml
Normal file
@@ -0,0 +1,76 @@
|
||||
name: leann-build
|
||||
|
||||
resources:
|
||||
# Choose a GPU for fast embeddings (examples: L4, A10G, A100). CPU also works but is slower.
|
||||
accelerators: L4:1
|
||||
# Optionally pin a cloud, otherwise SkyPilot will auto-select
|
||||
# cloud: aws
|
||||
disk_size: 100
|
||||
|
||||
envs:
|
||||
# Build parameters (override with: sky launch -c leann-gpu sky/leann-build.yaml -e key=value)
|
||||
index_name: my-index
|
||||
docs: ./data
|
||||
backend: hnsw # hnsw | diskann
|
||||
complexity: 64
|
||||
graph_degree: 32
|
||||
num_threads: 8
|
||||
# Embedding selection
|
||||
embedding_mode: sentence-transformers # sentence-transformers | openai | mlx | ollama
|
||||
embedding_model: facebook/contriever
|
||||
# Storage/latency knobs
|
||||
recompute: true # true => selective recomputation (recommended)
|
||||
compact: true # for HNSW only
|
||||
# Optional pass-through
|
||||
extra_args: ""
|
||||
# Rebuild control
|
||||
force: true
|
||||
|
||||
# Sync local paths to the remote VM. Adjust as needed.
|
||||
file_mounts:
|
||||
# Example: mount your local data directory used for building
|
||||
~/leann-data: ${docs}
|
||||
|
||||
setup: |
|
||||
set -e
|
||||
# Install uv (package manager)
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
|
||||
# Ensure modern libstdc++ for FAISS (GLIBCXX >= 3.4.30)
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y libstdc++6 libgomp1
|
||||
# Also upgrade conda's libstdc++ in base env (Skypilot images include conda)
|
||||
if command -v conda >/dev/null 2>&1; then
|
||||
conda install -y -n base -c conda-forge libstdcxx-ng
|
||||
fi
|
||||
|
||||
# Install LEANN CLI and backends into the user environment
|
||||
uv pip install --upgrade pip
|
||||
uv pip install leann-core leann-backend-hnsw leann-backend-diskann
|
||||
|
||||
run: |
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
# Derive flags from env
|
||||
recompute_flag=""
|
||||
if [ "${recompute}" = "false" ] || [ "${recompute}" = "0" ]; then
|
||||
recompute_flag="--no-recompute"
|
||||
fi
|
||||
force_flag=""
|
||||
if [ "${force}" = "true" ] || [ "${force}" = "1" ]; then
|
||||
force_flag="--force"
|
||||
fi
|
||||
|
||||
# Build command
|
||||
python -m leann.cli build ${index_name} \
|
||||
--docs ~/leann-data \
|
||||
--backend ${backend} \
|
||||
--complexity ${complexity} \
|
||||
--graph-degree ${graph_degree} \
|
||||
--num-threads ${num_threads} \
|
||||
--embedding-mode ${embedding_mode} \
|
||||
--embedding-model ${embedding_model} \
|
||||
${recompute_flag} ${force_flag} ${extra_args}
|
||||
|
||||
# Print where the index is stored for downstream rsync
|
||||
echo "INDEX_OUT_DIR=~/.leann/indexes/${index_name}"
|
||||
Reference in New Issue
Block a user