Compare commits

..

1 Commits

Author SHA1 Message Date
yichuan520030910320
2c3824e7b6 feat: support multiple input formats for --docs argument
- Add support for multiple directories: --docs ./src ./tests ./config
- Add support for individual files: --docs ./file1.py ./file2.txt
- Add support for mixed files and directories: --docs ./README.md ./src/ ./config.json
- Add git ls-files integration: --docs $(git ls-files)
- Add git submodule detection and skip logic to avoid indexing third-party dependencies
- Add comprehensive error handling for path resolution issues
- Update MCP README with advanced usage examples including git integration
- Fix ruff linting issues with closure variable binding

Breaking changes: None - fully backward compatible with existing single directory usage

Examples:
  leann build my-repo --docs $(git ls-files) --embedding-mode sentence-transformers --embedding-model all-MiniLM-L6-v2 --backend hnsw
  leann build my-code --docs ./src ./tests ./docs --embedding-mode sentence-transformers --embedding-model all-MiniLM-L6-v2 --backend hnsw
  leann build my-configs --docs ./package.json ./tsconfig.json ./webpack.config.js --embedding-mode sentence-transformers --embedding-model all-MiniLM-L6-v2 --backend hnsw
2025-08-12 02:04:44 -07:00
11 changed files with 83 additions and 336 deletions

View File

@@ -64,16 +64,6 @@ jobs:
python: '3.12' python: '3.12'
- os: macos-14 - os: macos-14
python: '3.13' python: '3.13'
- os: macos-15
python: '3.9'
- os: macos-15
python: '3.10'
- os: macos-15
python: '3.11'
- os: macos-15
python: '3.12'
- os: macos-15
python: '3.13'
- os: macos-13 - os: macos-13
python: '3.9' python: '3.9'
- os: macos-13 - os: macos-13
@@ -157,14 +147,7 @@ jobs:
# Use system clang for better compatibility # Use system clang for better compatibility
export CC=clang export CC=clang
export CXX=clang++ export CXX=clang++
# Homebrew libraries on each macOS version require matching minimum version export MACOSX_DEPLOYMENT_TARGET=11.0
if [[ "${{ matrix.os }}" == "macos-13" ]]; then
export MACOSX_DEPLOYMENT_TARGET=13.0
elif [[ "${{ matrix.os }}" == "macos-14" ]]; then
export MACOSX_DEPLOYMENT_TARGET=14.0
elif [[ "${{ matrix.os }}" == "macos-15" ]]; then
export MACOSX_DEPLOYMENT_TARGET=15.0
fi
uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist
else else
uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist
@@ -178,14 +161,7 @@ jobs:
export CC=clang export CC=clang
export CXX=clang++ export CXX=clang++
# DiskANN requires macOS 13.3+ for sgesdd_ LAPACK function # DiskANN requires macOS 13.3+ for sgesdd_ LAPACK function
# But Homebrew libraries on each macOS version require matching minimum version export MACOSX_DEPLOYMENT_TARGET=13.3
if [[ "${{ matrix.os }}" == "macos-13" ]]; then
export MACOSX_DEPLOYMENT_TARGET=13.3
elif [[ "${{ matrix.os }}" == "macos-14" ]]; then
export MACOSX_DEPLOYMENT_TARGET=14.0
elif [[ "${{ matrix.os }}" == "macos-15" ]]; then
export MACOSX_DEPLOYMENT_TARGET=15.0
fi
uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist
else else
uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist
@@ -221,24 +197,10 @@ jobs:
- name: Repair wheels (macOS) - name: Repair wheels (macOS)
if: runner.os == 'macOS' if: runner.os == 'macOS'
run: | run: |
# Determine deployment target based on runner OS
# Must match the Homebrew libraries for each macOS version
if [[ "${{ matrix.os }}" == "macos-13" ]]; then
HNSW_TARGET="13.0"
DISKANN_TARGET="13.3"
elif [[ "${{ matrix.os }}" == "macos-14" ]]; then
HNSW_TARGET="14.0"
DISKANN_TARGET="14.0"
elif [[ "${{ matrix.os }}" == "macos-15" ]]; then
HNSW_TARGET="15.0"
DISKANN_TARGET="15.0"
fi
# Repair HNSW wheel # Repair HNSW wheel
cd packages/leann-backend-hnsw cd packages/leann-backend-hnsw
if [ -d dist ]; then if [ -d dist ]; then
export MACOSX_DEPLOYMENT_TARGET=$HNSW_TARGET delocate-wheel -w dist_repaired -v dist/*.whl
delocate-wheel -w dist_repaired -v --require-target-macos-version $HNSW_TARGET dist/*.whl
rm -rf dist rm -rf dist
mv dist_repaired dist mv dist_repaired dist
fi fi
@@ -247,8 +209,7 @@ jobs:
# Repair DiskANN wheel # Repair DiskANN wheel
cd packages/leann-backend-diskann cd packages/leann-backend-diskann
if [ -d dist ]; then if [ -d dist ]; then
export MACOSX_DEPLOYMENT_TARGET=$DISKANN_TARGET delocate-wheel -w dist_repaired -v dist/*.whl
delocate-wheel -w dist_repaired -v --require-target-macos-version $DISKANN_TARGET dist/*.whl
rm -rf dist rm -rf dist
mv dist_repaired dist mv dist_repaired dist
fi fi
@@ -288,8 +249,8 @@ jobs:
# Activate virtual environment # Activate virtual environment
source .venv/bin/activate || source .venv/Scripts/activate source .venv/bin/activate || source .venv/Scripts/activate
# Run tests # Run all tests
pytest -v tests/ pytest tests/
- name: Run sanity checks (optional) - name: Run sanity checks (optional)
run: | run: |

View File

@@ -71,8 +71,6 @@ source .venv/bin/activate
uv pip install leann uv pip install leann
``` ```
> Low-resource? See “Low-resource setups” in the [Configuration Guide](docs/configuration-guide.md#low-resource-setups).
<details> <details>
<summary> <summary>
<strong>🔧 Build from Source (Recommended for development)</strong> <strong>🔧 Build from Source (Recommended for development)</strong>

View File

@@ -259,80 +259,24 @@ Every configuration choice involves trade-offs:
The key is finding the right balance for your specific use case. Start small and simple, measure performance, then scale up only where needed. The key is finding the right balance for your specific use case. Start small and simple, measure performance, then scale up only where needed.
## Low-resource setups ## Deep Dive: Critical Configuration Decisions
If you dont have a local GPU or builds/searches are too slow, use one or more of the options below. ### When to Disable Recomputation
### 1) Use OpenAI embeddings (no local compute) LEANN's recomputation feature provides exact distance calculations but can be disabled for extreme QPS requirements:
Fastest path with zero local GPU requirements. Set your API key and use OpenAI embeddings during build and search:
```bash ```bash
export OPENAI_API_KEY=sk-... --no-recompute # Disable selective recomputation
# Build with OpenAI embeddings
leann build my-index \
--embedding-mode openai \
--embedding-model text-embedding-3-small
# Search with OpenAI embeddings (recompute at query time)
leann search my-index "your query" \
--recompute-embeddings
``` ```
### 2) Run remote builds with SkyPilot (cloud GPU) **Trade-offs**:
- **With recomputation** (default): Exact distances, best quality, higher latency, minimal storage (only stores metadata, recomputes embeddings on-demand)
- **Without recomputation**: Must store full embeddings, significantly higher memory and storage usage (10-100x more), but faster search
Offload embedding generation and index building to a GPU VM using SkyPilot. A template is provided at `sky/leann-build.yaml`. **Disable when**:
- You have abundant storage and memory
```bash - Need extremely low latency (< 100ms)
# One-time: install and configure SkyPilot - Running a read-heavy workload where storage cost is acceptable
pip install skypilot
sky launch -c leann-gpu sky/leann-build.yaml
# Build remotely (template installs uv + leann CLI)
sky exec leann-gpu -- "leann build my-index --docs ~/leann-data --backend hnsw --complexity 64 --graph-degree 32"
```
Details: see “Running Builds on SkyPilot (Optional)” below.
### 3) Disable recomputation to trade storage for speed
If you need lower latency and have more storage/memory, disable recomputation. This stores full embeddings and avoids recomputing at search time.
```bash
# Build without recomputation (HNSW requires non-compact in this mode)
leann build my-index --no-recompute --no-compact
# Search without recomputation
leann search my-index "your query" --no-recompute
```
Trade-offs: lower query-time latency, but significantly higher storage usage.
## Running Builds on SkyPilot (Optional)
You can offload embedding generation and index building to a cloud GPU VM using SkyPilot, without changing any LEANN code. This is useful when your local machine lacks a GPU or you want faster throughput.
### Quick Start
1) Install SkyPilot by following their docs (`pip install skypilot`), then configure cloud credentials.
2) Use the provided SkyPilot template:
```bash
sky launch -c leann-gpu sky/leann-build.yaml
```
3) On the remote, either put your data under the mounted path or adjust `file_mounts` in `sky/leann-build.yaml`. Then run the LEANN build:
```bash
sky exec leann-gpu -- "leann build my-index --docs ~/leann-data --backend hnsw --complexity 64 --graph-degree 32"
```
Notes:
- The template installs `uv` and the `leann` CLI globally on the remote instance.
- Change the `accelerators` and `cloud` settings in `sky/leann-build.yaml` to match your budget/availability (e.g., `A10G:1`, `A100:1`, or CPU-only if you prefer).
- You can also build with `diskann` by switching `--backend diskann`.
## Further Reading ## Further Reading

View File

@@ -4,8 +4,8 @@ build-backend = "scikit_build_core.build"
[project] [project]
name = "leann-backend-diskann" name = "leann-backend-diskann"
version = "0.2.9" version = "0.2.7"
dependencies = ["leann-core==0.2.9", "numpy", "protobuf>=3.19.0"] dependencies = ["leann-core==0.2.7", "numpy", "protobuf>=3.19.0"]
[tool.scikit-build] [tool.scikit-build]
# Key: simplified CMake path # Key: simplified CMake path

View File

@@ -95,8 +95,6 @@ def create_hnsw_embedding_server(
passage_sources.append(source_copy) passage_sources.append(source_copy)
passages = PassageManager(passage_sources) passages = PassageManager(passage_sources)
# Use index dimensions from metadata for shaping fallback responses
embedding_dim: int = int(meta.get("dimensions", 0))
logger.info( logger.info(
f"Loaded PassageManager with {len(passages.global_offset_map)} passages from metadata" f"Loaded PassageManager with {len(passages.global_offset_map)} passages from metadata"
) )
@@ -111,9 +109,6 @@ def create_hnsw_embedding_server(
socket.setsockopt(zmq.RCVTIMEO, 300000) socket.setsockopt(zmq.RCVTIMEO, 300000)
socket.setsockopt(zmq.SNDTIMEO, 300000) socket.setsockopt(zmq.SNDTIMEO, 300000)
# Track last request type for safe fallback responses on exceptions
last_request_type = "unknown" # one of: 'text', 'distance', 'embedding', 'unknown'
last_request_length = 0
while True: while True:
try: try:
message_bytes = socket.recv() message_bytes = socket.recv()
@@ -126,8 +121,6 @@ def create_hnsw_embedding_server(
if isinstance(request_payload, list) and len(request_payload) > 0: if isinstance(request_payload, list) and len(request_payload) > 0:
# Check if this is a direct text request (list of strings) # Check if this is a direct text request (list of strings)
if all(isinstance(item, str) for item in request_payload): if all(isinstance(item, str) for item in request_payload):
last_request_type = "text"
last_request_length = len(request_payload)
logger.info( logger.info(
f"Processing direct text embedding request for {len(request_payload)} texts in {embedding_mode} mode" f"Processing direct text embedding request for {len(request_payload)} texts in {embedding_mode} mode"
) )
@@ -152,66 +145,43 @@ def create_hnsw_embedding_server(
): ):
node_ids = request_payload[0] node_ids = request_payload[0]
query_vector = np.array(request_payload[1], dtype=np.float32) query_vector = np.array(request_payload[1], dtype=np.float32)
last_request_type = "distance"
last_request_length = len(node_ids)
logger.debug("Distance calculation request received") logger.debug("Distance calculation request received")
logger.debug(f" Node IDs: {node_ids}") logger.debug(f" Node IDs: {node_ids}")
logger.debug(f" Query vector dim: {len(query_vector)}") logger.debug(f" Query vector dim: {len(query_vector)}")
# Get embeddings for node IDs, tolerate missing IDs # Get embeddings for node IDs
texts: list[str] = [] texts = []
found_indices: list[int] = [] for nid in node_ids:
for idx, nid in enumerate(node_ids):
try: try:
passage_data = passages.get_passage(str(nid)) passage_data = passages.get_passage(str(nid))
txt = passage_data.get("text", "") txt = passage_data["text"]
if isinstance(txt, str) and len(txt) > 0: texts.append(txt)
texts.append(txt)
found_indices.append(idx)
else:
logger.error(f"Empty text for passage ID {nid}")
except KeyError: except KeyError:
logger.error(f"Passage ID {nid} not found") logger.error(f"Passage ID {nid} not found")
raise RuntimeError(f"FATAL: Passage with ID {nid} not found")
except Exception as e: except Exception as e:
logger.error(f"Exception looking up passage ID {nid}: {e}") logger.error(f"Exception looking up passage ID {nid}: {e}")
raise
# Prepare full-length response distances with safe fallbacks # Process embeddings
large_distance = 1e9 embeddings = compute_embeddings(texts, model_name, mode=embedding_mode)
response_distances = [large_distance] * len(node_ids) logger.info(
f"Computed embeddings for {len(texts)} texts, shape: {embeddings.shape}"
if texts:
try:
# Process embeddings only for found indices
embeddings = compute_embeddings(texts, model_name, mode=embedding_mode)
logger.info(
f"Computed embeddings for {len(texts)} texts, shape: {embeddings.shape}"
)
# Calculate distances for found embeddings only
if distance_metric == "l2":
partial_distances = np.sum(
np.square(embeddings - query_vector.reshape(1, -1)), axis=1
)
else: # mips or cosine
partial_distances = -np.dot(embeddings, query_vector)
# Place computed distances back into the full response array
for pos, dval in zip(
found_indices, partial_distances.flatten().tolist()
):
response_distances[pos] = float(dval)
except Exception as e:
logger.error(
f"Distance computation error, falling back to large distances: {e}"
)
# Always reply with exactly len(node_ids) distances
response_bytes = msgpack.packb([response_distances], use_single_float=True)
logger.debug(
f"Sending distance response with {len(response_distances)} distances (found={len(found_indices)})"
) )
# Calculate distances
if distance_metric == "l2":
distances = np.sum(
np.square(embeddings - query_vector.reshape(1, -1)), axis=1
)
else: # mips or cosine
distances = -np.dot(embeddings, query_vector)
response_payload = distances.flatten().tolist()
response_bytes = msgpack.packb([response_payload], use_single_float=True)
logger.debug(f"Sending distance response with {len(distances)} distances")
socket.send(response_bytes) socket.send(response_bytes)
e2e_end = time.time() e2e_end = time.time()
logger.info(f"⏱️ Distance calculation E2E time: {e2e_end - e2e_start:.6f}s") logger.info(f"⏱️ Distance calculation E2E time: {e2e_end - e2e_start:.6f}s")
@@ -231,61 +201,40 @@ def create_hnsw_embedding_server(
node_ids = request_payload[0] node_ids = request_payload[0]
logger.debug(f"Request for {len(node_ids)} node embeddings") logger.debug(f"Request for {len(node_ids)} node embeddings")
last_request_type = "embedding"
last_request_length = len(node_ids)
# Allocate output buffer (B, D) and fill with zeros for robustness # Look up texts by node IDs
if embedding_dim <= 0: texts = []
logger.error("Embedding dimension unknown; cannot serve embedding request") for nid in node_ids:
dims = [0, 0]
data = []
else:
dims = [len(node_ids), embedding_dim]
data = [0.0] * (dims[0] * dims[1])
# Look up texts by node IDs; compute embeddings where available
texts: list[str] = []
found_indices: list[int] = []
for idx, nid in enumerate(node_ids):
try: try:
passage_data = passages.get_passage(str(nid)) passage_data = passages.get_passage(str(nid))
txt = passage_data.get("text", "") txt = passage_data["text"]
if isinstance(txt, str) and len(txt) > 0: if not txt:
texts.append(txt) raise RuntimeError(f"FATAL: Empty text for passage ID {nid}")
found_indices.append(idx) texts.append(txt)
else:
logger.error(f"Empty text for passage ID {nid}")
except KeyError: except KeyError:
logger.error(f"Passage with ID {nid} not found") raise RuntimeError(f"FATAL: Passage with ID {nid} not found")
except Exception as e: except Exception as e:
logger.error(f"Exception looking up passage ID {nid}: {e}") logger.error(f"Exception looking up passage ID {nid}: {e}")
raise
if texts: # Process embeddings
try: embeddings = compute_embeddings(texts, model_name, mode=embedding_mode)
# Process embeddings for found texts only logger.info(
embeddings = compute_embeddings(texts, model_name, mode=embedding_mode) f"Computed embeddings for {len(texts)} texts, shape: {embeddings.shape}"
logger.info( )
f"Computed embeddings for {len(texts)} texts, shape: {embeddings.shape}"
)
if np.isnan(embeddings).any() or np.isinf(embeddings).any(): # Serialization and response
logger.error( if np.isnan(embeddings).any() or np.isinf(embeddings).any():
f"NaN or Inf detected in embeddings! Requested IDs: {node_ids[:5]}..." logger.error(
) f"NaN or Inf detected in embeddings! Requested IDs: {node_ids[:5]}..."
dims = [0, embedding_dim] )
data = [] raise AssertionError()
else:
# Copy computed embeddings into the correct positions
emb_f32 = np.ascontiguousarray(embeddings, dtype=np.float32)
flat = emb_f32.flatten().tolist()
for j, pos in enumerate(found_indices):
start = pos * embedding_dim
end = start + embedding_dim
data[start:end] = flat[j * embedding_dim : (j + 1) * embedding_dim]
except Exception as e:
logger.error(f"Embedding computation error, returning zeros: {e}")
response_payload = [dims, data] hidden_contiguous_f32 = np.ascontiguousarray(embeddings, dtype=np.float32)
response_payload = [
list(hidden_contiguous_f32.shape),
hidden_contiguous_f32.flatten().tolist(),
]
response_bytes = msgpack.packb(response_payload, use_single_float=True) response_bytes = msgpack.packb(response_payload, use_single_float=True)
socket.send(response_bytes) socket.send(response_bytes)
@@ -300,22 +249,7 @@ def create_hnsw_embedding_server(
import traceback import traceback
traceback.print_exc() traceback.print_exc()
# Fallback to a safe, minimal-structure response to avoid client crashes socket.send(msgpack.packb([[], []]))
if last_request_type == "distance":
# Return a vector of large distances with the expected length
fallback_len = max(0, int(last_request_length))
large_distance = 1e9
safe_response = [[large_distance] * fallback_len]
elif last_request_type == "embedding":
# Return an empty embedding block with known dimension if available
if embedding_dim > 0:
safe_response = [[0, embedding_dim], []]
else:
safe_response = [[0, 0], []]
else:
# Unknown request type: default to empty embedding structure
safe_response = [[0, int(embedding_dim) if embedding_dim > 0 else 0], []]
socket.send(msgpack.packb(safe_response, use_single_float=True))
zmq_thread = threading.Thread(target=zmq_server_thread, daemon=True) zmq_thread = threading.Thread(target=zmq_server_thread, daemon=True)
zmq_thread.start() zmq_thread.start()

View File

@@ -6,10 +6,10 @@ build-backend = "scikit_build_core.build"
[project] [project]
name = "leann-backend-hnsw" name = "leann-backend-hnsw"
version = "0.2.9" version = "0.2.7"
description = "Custom-built HNSW (Faiss) backend for the Leann toolkit." description = "Custom-built HNSW (Faiss) backend for the Leann toolkit."
dependencies = [ dependencies = [
"leann-core==0.2.9", "leann-core==0.2.7",
"numpy", "numpy",
"pyzmq>=23.0.0", "pyzmq>=23.0.0",
"msgpack>=1.0.0", "msgpack>=1.0.0",

View File

@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project] [project]
name = "leann-core" name = "leann-core"
version = "0.2.9" version = "0.2.7"
description = "Core API and plugin system for LEANN" description = "Core API and plugin system for LEANN"
readme = "README.md" readme = "README.md"
requires-python = ">=3.9" requires-python = ">=3.9"

View File

@@ -117,19 +117,7 @@ Examples:
build_parser.add_argument("--complexity", type=int, default=64) build_parser.add_argument("--complexity", type=int, default=64)
build_parser.add_argument("--num-threads", type=int, default=1) build_parser.add_argument("--num-threads", type=int, default=1)
build_parser.add_argument("--compact", action="store_true", default=True) build_parser.add_argument("--compact", action="store_true", default=True)
build_parser.add_argument(
"--no-compact",
dest="compact",
action="store_false",
help="Disable compact index storage (store full embeddings; higher storage)",
)
build_parser.add_argument("--recompute", action="store_true", default=True) build_parser.add_argument("--recompute", action="store_true", default=True)
build_parser.add_argument(
"--no-recompute",
dest="recompute",
action="store_false",
help="Disable embedding recomputation (store full embeddings; lower query latency)",
)
build_parser.add_argument( build_parser.add_argument(
"--file-types", "--file-types",
type=str, type=str,
@@ -150,18 +138,6 @@ Examples:
default=True, default=True,
help="Recompute embeddings (default: True)", help="Recompute embeddings (default: True)",
) )
search_parser.add_argument(
"--no-recompute-embeddings",
dest="recompute_embeddings",
action="store_false",
help="Disable embedding recomputation during search",
)
search_parser.add_argument(
"--no-recompute",
dest="recompute_embeddings",
action="store_false",
help="Alias for --no-recompute-embeddings",
)
search_parser.add_argument( search_parser.add_argument(
"--pruning-strategy", "--pruning-strategy",
choices=["global", "local", "proportional"], choices=["global", "local", "proportional"],
@@ -190,18 +166,6 @@ Examples:
default=True, default=True,
help="Recompute embeddings (default: True)", help="Recompute embeddings (default: True)",
) )
ask_parser.add_argument(
"--no-recompute-embeddings",
dest="recompute_embeddings",
action="store_false",
help="Disable embedding recomputation during ask",
)
ask_parser.add_argument(
"--no-recompute",
dest="recompute_embeddings",
action="store_false",
help="Alias for --no-recompute-embeddings",
)
ask_parser.add_argument( ask_parser.add_argument(
"--pruning-strategy", "--pruning-strategy",
choices=["global", "local", "proportional"], choices=["global", "local", "proportional"],

View File

@@ -4,12 +4,20 @@ Transform your development workflow with intelligent code assistance using LEANN
## Prerequisites ## Prerequisites
Install LEANN globally for MCP integration (with default backend): **Step 1:** First, complete the basic LEANN installation following the [📦 Installation guide](../../README.md#installation) in the root README:
```bash ```bash
uv tool install leann-core --with leann uv venv
source .venv/bin/activate
uv pip install leann
``` ```
This installs the `leann` CLI into an isolated tool environment and includes both backends so `leann build` works out-of-the-box.
**Step 2:** Install LEANN globally for MCP integration:
```bash
uv tool install leann-core
```
This makes the `leann` command available system-wide, which `leann_mcp` requires.
## 🚀 Quick Setup ## 🚀 Quick Setup

View File

@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project] [project]
name = "leann" name = "leann"
version = "0.2.9" version = "0.2.7"
description = "LEANN - The smallest vector index in the world. RAG Everything with LEANN!" description = "LEANN - The smallest vector index in the world. RAG Everything with LEANN!"
readme = "README.md" readme = "README.md"
requires-python = ">=3.9" requires-python = ">=3.9"

View File

@@ -1,62 +0,0 @@
name: leann-build
resources:
# Choose a GPU for fast embeddings (examples: L4, A10G, A100). CPU also works but is slower.
accelerators: L4:1
# Optionally pin a cloud, otherwise SkyPilot will auto-select
# cloud: aws
disk_size: 100
env:
# Build parameters (override with: sky launch -c leann-gpu sky/leann-build.yaml -e key=value)
index_name: my-index
docs: ./data
backend: hnsw # hnsw | diskann
complexity: 64
graph_degree: 32
num_threads: 8
# Embedding selection
embedding_mode: sentence-transformers # sentence-transformers | openai | mlx | ollama
embedding_model: facebook/contriever
# Storage/latency knobs
recompute: true # true => selective recomputation; false => store full embeddings
compact: true # for HNSW only: false when recompute=false
# Optional pass-through
extra_args: ""
# Sync local paths to the remote VM. Adjust as needed.
file_mounts:
# Example: mount your local data directory used for building
~/leann-data: ${docs}
setup: |
set -e
# Install uv (package manager)
curl -LsSf https://astral.sh/uv/install.sh | sh
export PATH="$HOME/.local/bin:$PATH"
# Install the LEANN CLI globally on the remote machine
uv tool install leann
run: |
export PATH="$HOME/.local/bin:$PATH"
# Derive flags from env
recompute_flag=""
if [ "${recompute}" = "false" ] || [ "${recompute}" = "0" ]; then
recompute_flag="--no-recompute"
fi
compact_flag=""
if [ "${compact}" = "false" ] || [ "${compact}" = "0" ]; then
compact_flag="--no-compact"
fi
# Build command
leann build ${index_name} \
--docs ~/leann-data \
--backend ${backend} \
--complexity ${complexity} \
--graph-degree ${graph_degree} \
--num-threads ${num_threads} \
--embedding-mode ${embedding_mode} \
--embedding-model ${embedding_model} \
${recompute_flag} ${compact_flag} ${extra_args}