Compare commits

..

5 Commits

Author SHA1 Message Date
yichuan-w
5637270f02 fix: format colqwen_forward.py to pass pre-commit checks 2025-12-03 09:06:29 +00:00
yichuan-w
1c690e4a8a reproduce docvqa results and add debug file 2025-12-03 08:54:55 +00:00
yichuan-w
07afe546ea reproduce docvqa results 2025-11-14 10:22:42 +00:00
yichuan-w
ae3b8af3df update vidore 2025-11-14 07:31:24 +00:00
yichuan-w
a9c014df9e Add timing instrumentation and multi-dataset support for multi-vector retrieval
- Add timing measurements for search operations (load and core time)
- Increase embedding batch size from 1 to 32 for better performance
- Add explicit memory cleanup with del all_embeddings
- Support loading and merging multiple datasets with different splits
- Add CLI arguments for search method selection (ann/exact/exact-all)
- Auto-detect image field names across different dataset structures
- Print candidate doc counts for performance monitoring

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-10 21:13:17 +00:00
54 changed files with 1339 additions and 4756 deletions

View File

@@ -28,36 +28,15 @@ jobs:
run: |
uv run --only-group lint pre-commit run --all-files --show-diff-on-failure
type-check:
name: Type Check with ty
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.ref }}
submodules: recursive
- name: Install uv and Python
uses: astral-sh/setup-uv@v6
with:
python-version: '3.11'
- name: Install ty
run: uv tool install ty
- name: Run ty type checker
run: |
# Run ty on core packages, apps, and tests
ty check packages/leann-core/src apps tests
build:
needs: [lint, type-check]
needs: lint
name: Build ${{ matrix.os }} Python ${{ matrix.python }}
strategy:
matrix:
include:
# Note: Python 3.9 dropped - uses PEP 604 union syntax (str | None)
# which requires Python 3.10+
- os: ubuntu-22.04
python: '3.9'
- os: ubuntu-22.04
python: '3.10'
- os: ubuntu-22.04
@@ -67,6 +46,8 @@ jobs:
- os: ubuntu-22.04
python: '3.13'
# ARM64 Linux builds
- os: ubuntu-24.04-arm
python: '3.9'
- os: ubuntu-24.04-arm
python: '3.10'
- os: ubuntu-24.04-arm
@@ -75,6 +56,8 @@ jobs:
python: '3.12'
- os: ubuntu-24.04-arm
python: '3.13'
- os: macos-14
python: '3.9'
- os: macos-14
python: '3.10'
- os: macos-14
@@ -83,6 +66,8 @@ jobs:
python: '3.12'
- os: macos-14
python: '3.13'
- os: macos-15
python: '3.9'
- os: macos-15
python: '3.10'
- os: macos-15
@@ -91,24 +76,16 @@ jobs:
python: '3.12'
- os: macos-15
python: '3.13'
# Intel Mac builds (x86_64) - replaces deprecated macos-13
# Note: Python 3.13 excluded - PyTorch has no wheels for macOS x86_64 + Python 3.13
# (PyTorch <=2.4.1 lacks cp313, PyTorch >=2.5.0 dropped Intel Mac support)
- os: macos-15-intel
- os: macos-13
python: '3.9'
- os: macos-13
python: '3.10'
- os: macos-15-intel
- os: macos-13
python: '3.11'
- os: macos-15-intel
- os: macos-13
python: '3.12'
# macOS 26 (beta) - arm64
- os: macos-26
python: '3.10'
- os: macos-26
python: '3.11'
- os: macos-26
python: '3.12'
- os: macos-26
python: '3.13'
# Note: macos-13 + Python 3.13 excluded due to PyTorch compatibility
# (PyTorch 2.5+ supports Python 3.13 but not Intel Mac x86_64)
runs-on: ${{ matrix.os }}
steps:
@@ -227,16 +204,13 @@ jobs:
# Use system clang for better compatibility
export CC=clang
export CXX=clang++
# Set deployment target based on runner
# macos-15-intel runs macOS 15, so target 15.0 (system libraries require it)
if [[ "${{ matrix.os }}" == "macos-15-intel" ]]; then
export MACOSX_DEPLOYMENT_TARGET=15.0
elif [[ "${{ matrix.os }}" == macos-14* ]]; then
# Homebrew libraries on each macOS version require matching minimum version
if [[ "${{ matrix.os }}" == "macos-13" ]]; then
export MACOSX_DEPLOYMENT_TARGET=13.0
elif [[ "${{ matrix.os }}" == "macos-14" ]]; then
export MACOSX_DEPLOYMENT_TARGET=14.0
elif [[ "${{ matrix.os }}" == macos-15* ]]; then
elif [[ "${{ matrix.os }}" == "macos-15" ]]; then
export MACOSX_DEPLOYMENT_TARGET=15.0
elif [[ "${{ matrix.os }}" == macos-26* ]]; then
export MACOSX_DEPLOYMENT_TARGET=26.0
fi
uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist
else
@@ -250,16 +224,14 @@ jobs:
# Use system clang for better compatibility
export CC=clang
export CXX=clang++
# Set deployment target based on runner
# macos-15-intel runs macOS 15, so target 15.0 (system libraries require it)
if [[ "${{ matrix.os }}" == "macos-15-intel" ]]; then
export MACOSX_DEPLOYMENT_TARGET=15.0
elif [[ "${{ matrix.os }}" == macos-14* ]]; then
# DiskANN requires macOS 13.3+ for sgesdd_ LAPACK function
# But Homebrew libraries on each macOS version require matching minimum version
if [[ "${{ matrix.os }}" == "macos-13" ]]; then
export MACOSX_DEPLOYMENT_TARGET=13.3
elif [[ "${{ matrix.os }}" == "macos-14" ]]; then
export MACOSX_DEPLOYMENT_TARGET=14.0
elif [[ "${{ matrix.os }}" == macos-15* ]]; then
elif [[ "${{ matrix.os }}" == "macos-15" ]]; then
export MACOSX_DEPLOYMENT_TARGET=15.0
elif [[ "${{ matrix.os }}" == macos-26* ]]; then
export MACOSX_DEPLOYMENT_TARGET=26.0
fi
uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist
else
@@ -297,19 +269,16 @@ jobs:
if: runner.os == 'macOS'
run: |
# Determine deployment target based on runner OS
# macos-15-intel runs macOS 15, so target 15.0 (system libraries require it)
if [[ "${{ matrix.os }}" == "macos-15-intel" ]]; then
HNSW_TARGET="15.0"
DISKANN_TARGET="15.0"
elif [[ "${{ matrix.os }}" == macos-14* ]]; then
# Must match the Homebrew libraries for each macOS version
if [[ "${{ matrix.os }}" == "macos-13" ]]; then
HNSW_TARGET="13.0"
DISKANN_TARGET="13.3"
elif [[ "${{ matrix.os }}" == "macos-14" ]]; then
HNSW_TARGET="14.0"
DISKANN_TARGET="14.0"
elif [[ "${{ matrix.os }}" == macos-15* ]]; then
elif [[ "${{ matrix.os }}" == "macos-15" ]]; then
HNSW_TARGET="15.0"
DISKANN_TARGET="15.0"
elif [[ "${{ matrix.os }}" == macos-26* ]]; then
HNSW_TARGET="26.0"
DISKANN_TARGET="26.0"
fi
# Repair HNSW wheel
@@ -365,15 +334,12 @@ jobs:
PY_TAG=$($UV_PY -c "import sys; print(f'cp{sys.version_info[0]}{sys.version_info[1]}')")
if [[ "$RUNNER_OS" == "macOS" ]]; then
# macos-15-intel runs macOS 15, so target 15.0 (system libraries require it)
if [[ "${{ matrix.os }}" == "macos-15-intel" ]]; then
export MACOSX_DEPLOYMENT_TARGET=15.0
elif [[ "${{ matrix.os }}" == macos-14* ]]; then
if [[ "${{ matrix.os }}" == "macos-13" ]]; then
export MACOSX_DEPLOYMENT_TARGET=13.3
elif [[ "${{ matrix.os }}" == "macos-14" ]]; then
export MACOSX_DEPLOYMENT_TARGET=14.0
elif [[ "${{ matrix.os }}" == macos-15* ]]; then
elif [[ "${{ matrix.os }}" == "macos-15" ]]; then
export MACOSX_DEPLOYMENT_TARGET=15.0
elif [[ "${{ matrix.os }}" == macos-26* ]]; then
export MACOSX_DEPLOYMENT_TARGET=26.0
fi
fi

View File

@@ -14,6 +14,6 @@ jobs:
- uses: actions/checkout@v4
- uses: lycheeverse/lychee-action@v2
with:
args: --no-progress --insecure --user-agent 'curl/7.68.0' --exclude '.*api\.star-history\.com.*' --accept 200,201,202,203,204,205,206,207,208,226,300,301,302,303,304,305,306,307,308,503 README.md docs/ apps/ examples/ benchmarks/
args: --no-progress --insecure --user-agent 'curl/7.68.0' README.md docs/ apps/ examples/ benchmarks/
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -16,27 +16,15 @@
</a>
</p>
<div align="center">
<a href="https://forms.gle/rDbZf864gMNxhpTq8">
<img src="https://img.shields.io/badge/📣_Community_Survey-Help_Shape_v0.4-007ec6?style=for-the-badge&logo=google-forms&logoColor=white" alt="Take Survey">
</a>
<p>
We track <b>zero telemetry</b>. This survey is the ONLY way to tell us if you want <br>
<b>GPU Acceleration</b> or <b>More Integrations</b> next.<br>
👉 <a href="https://forms.gle/rDbZf864gMNxhpTq8"><b>Click here to cast your vote (2 mins)</b></a>
</p>
</div>
<h2 align="center" tabindex="-1" class="heading-element" dir="auto">
The smallest vector index in the world. RAG Everything with LEANN!
</h2>
LEANN is an innovative vector database that democratizes personal AI. Transform your laptop into a powerful RAG system that can index and search through millions of documents while using **97% less storage** than traditional solutions **without accuracy loss**.
LEANN achieves this through *graph-based selective recomputation* with *high-degree preserving pruning*, computing embeddings on-demand instead of storing them all. [Illustration Fig →](#-architecture--how-it-works) | [Paper →](https://arxiv.org/abs/2506.08276)
**Ready to RAG Everything?** Transform your laptop into a personal AI assistant that can semantic search your **[file system](#-personal-data-manager-process-any-documents-pdf-txt-md)**, **[emails](#-your-personal-email-secretary-rag-on-apple-mail)**, **[browser history](#-time-machine-for-the-web-rag-your-entire-browser-history)**, **[chat history](#-wechat-detective-unlock-your-golden-memories)** ([WeChat](#-wechat-detective-unlock-your-golden-memories), [iMessage](#-imessage-history-your-personal-conversation-archive)), **[agent memory](#-chatgpt-chat-history-your-personal-ai-conversation-archive)** ([ChatGPT](#-chatgpt-chat-history-your-personal-ai-conversation-archive), [Claude](#-claude-chat-history-your-personal-ai-conversation-archive)), **[live data](#mcp-integration-rag-on-live-data-from-any-platform)** ([Slack](#slack-messages-search-your-team-conversations), [Twitter](#-twitter-bookmarks-your-personal-tweet-library)), **[codebase](#-claude-code-integration-transform-your-development-workflow)**\* , or external knowledge bases (i.e., 60M documents) - all on your laptop, with zero cloud costs and complete privacy.
**Ready to RAG Everything?** Transform your laptop into a personal AI assistant that can semantic search your **[file system](#-personal-data-manager-process-any-documents-pdf-txt-md)**, **[emails](#-your-personal-email-secretary-rag-on-apple-mail)**, **[browser history](#-time-machine-for-the-web-rag-your-entire-browser-history)**, **[chat history](#-wechat-detective-unlock-your-golden-memories)** ([WeChat](#-wechat-detective-unlock-your-golden-memories), [iMessage](#-imessage-history-your-personal-conversation-archive)), **[agent memory](#-chatgpt-chat-history-your-personal-ai-conversation-archive)** ([ChatGPT](#-chatgpt-chat-history-your-personal-ai-conversation-archive), [Claude](#-claude-chat-history-your-personal-ai-conversation-archive)), **[live data](#mcp-integration-rag-on-live-data-from-any-platform)** ([Slack](#mcp-integration-rag-on-live-data-from-any-platform), [Twitter](#mcp-integration-rag-on-live-data-from-any-platform)), **[codebase](#-claude-code-integration-transform-your-development-workflow)**\* , or external knowledge bases (i.e., 60M documents) - all on your laptop, with zero cloud costs and complete privacy.
\* Claude Code only supports basic `grep`-style keyword search. **LEANN** is a drop-in **semantic search MCP service fully compatible with Claude Code**, unlocking intelligent retrieval without changing your workflow. 🔥 Check out [the easy setup →](packages/leann-mcp/README.md)
@@ -201,7 +189,7 @@ LEANN supports RAG on various data sources including documents (`.pdf`, `.txt`,
#### LLM Backend
LEANN supports many LLM providers for text generation (HuggingFace, Ollama, Anthropic, and Any OpenAI compatible API).
LEANN supports many LLM providers for text generation (HuggingFace, Ollama, and Any OpenAI compatible API).
<details>
@@ -269,7 +257,6 @@ Below is a list of base URLs for common providers to get you started.
| **SiliconFlow** | `https://api.siliconflow.cn/v1` |
| **Zhipu (BigModel)** | `https://open.bigmodel.cn/api/paas/v4/` |
| **Mistral AI** | `https://api.mistral.ai/v1` |
| **Anthropic** | `https://api.anthropic.com/v1` |
@@ -329,7 +316,7 @@ All RAG examples share these common parameters. **Interactive mode** is availabl
--embedding-mode MODE # sentence-transformers, openai, mlx, or ollama
# LLM Parameters (Text generation models)
--llm TYPE # LLM backend: openai, ollama, hf, or anthropic (default: openai)
--llm TYPE # LLM backend: openai, ollama, or hf (default: openai)
--llm-model MODEL # Model name (default: gpt-4o) e.g., gpt-4o-mini, llama3.2:1b, Qwen/Qwen2.5-1.5B-Instruct
--thinking-budget LEVEL # Thinking budget for reasoning models: low/medium/high (supported by o3, o3-mini, GPT-Oss:20b, and other reasoning models)
@@ -392,54 +379,6 @@ python -m apps.code_rag --repo-dir "./my_codebase" --query "How does authenticat
</details>
### 🎨 ColQwen: Multimodal PDF Retrieval with Vision-Language Models
Search through PDFs using both text and visual understanding with ColQwen2/ColPali models. Perfect for research papers, technical documents, and any PDFs with complex layouts, figures, or diagrams.
> **🍎 Mac Users**: ColQwen is optimized for Apple Silicon with MPS acceleration for faster inference!
```bash
# Build index from PDFs
python -m apps.colqwen_rag build --pdfs ./my_papers/ --index research_papers
# Search with text queries
python -m apps.colqwen_rag search research_papers "How does attention mechanism work?"
# Interactive Q&A
python -m apps.colqwen_rag ask research_papers --interactive
```
<details>
<summary><strong>📋 Click to expand: ColQwen Setup & Usage</strong></summary>
#### Prerequisites
```bash
# Install dependencies
uv pip install colpali_engine pdf2image pillow matplotlib qwen_vl_utils einops seaborn
brew install poppler # macOS only, for PDF processing
```
#### Build Index
```bash
python -m apps.colqwen_rag build \
--pdfs ./pdf_directory/ \
--index my_index \
--model colqwen2 # or colpali
```
#### Search
```bash
python -m apps.colqwen_rag search my_index "your question here" --top-k 5
```
#### Models
- **ColQwen2** (`colqwen2`): Latest vision-language model with improved performance
- **ColPali** (`colpali`): Proven multimodal retriever
For detailed usage, see the [ColQwen Guide](docs/COLQWEN_GUIDE.md).
</details>
### 📧 Your Personal Email Secretary: RAG on Apple Mail!
> **Note:** The examples below currently support macOS only. Windows support coming soon.
@@ -1106,10 +1045,10 @@ Options:
leann ask INDEX_NAME [OPTIONS]
Options:
--llm {ollama,openai,hf,anthropic} LLM provider (default: ollama)
--model MODEL Model name (default: qwen3:8b)
--interactive Interactive chat mode
--top-k N Retrieval count (default: 20)
--llm {ollama,openai,hf} LLM provider (default: ollama)
--model MODEL Model name (default: qwen3:8b)
--interactive Interactive chat mode
--top-k N Retrieval count (default: 20)
```
**List Command:**

View File

@@ -257,8 +257,8 @@ class BaseRAGExample(ABC):
pass
@abstractmethod
async def load_data(self, args) -> list[dict[str, Any]]:
"""Load data from the source. Returns list of text chunks as dicts with 'text' and 'metadata' keys."""
async def load_data(self, args) -> list[str]:
"""Load data from the source. Returns list of text chunks."""
pass
def get_llm_config(self, args) -> dict[str, Any]:
@@ -282,8 +282,8 @@ class BaseRAGExample(ABC):
return config
async def build_index(self, args, texts: list[dict[str, Any]]) -> str:
"""Build LEANN index from text chunks (dicts with 'text' and 'metadata' keys)."""
async def build_index(self, args, texts: list[str]) -> str:
"""Build LEANN index from texts."""
index_path = str(Path(args.index_dir) / f"{self.default_index_name}.leann")
print(f"\n[Building Index] Creating {self.name} index...")
@@ -314,14 +314,8 @@ class BaseRAGExample(ABC):
batch_size = 1000
for i in range(0, len(texts), batch_size):
batch = texts[i : i + batch_size]
for item in batch:
# Handle both dict format (from create_text_chunks) and plain strings
if isinstance(item, dict):
text = item.get("text", "")
metadata = item.get("metadata")
builder.add_text(text, metadata)
else:
builder.add_text(item)
for text in batch:
builder.add_text(text)
print(f"Added {min(i + batch_size, len(texts))}/{len(texts)} texts...")
print("Building index structure...")

View File

@@ -6,7 +6,6 @@ Supports Chrome browser history.
import os
import sys
from pathlib import Path
from typing import Any
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent))
@@ -86,7 +85,7 @@ class BrowserRAG(BaseRAGExample):
return profiles
async def load_data(self, args) -> list[dict[str, Any]]:
async def load_data(self, args) -> list[str]:
"""Load browser history and convert to text chunks."""
# Determine Chrome profiles
if args.chrome_profile and not args.auto_find_profiles:

View File

@@ -5,7 +5,6 @@ Supports ChatGPT export data from chat.html files.
import sys
from pathlib import Path
from typing import Any
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent))
@@ -81,7 +80,7 @@ class ChatGPTRAG(BaseRAGExample):
return export_files
async def load_data(self, args) -> list[dict[str, Any]]:
async def load_data(self, args) -> list[str]:
"""Load ChatGPT export data and convert to text chunks."""
export_path = Path(args.export_path)

View File

@@ -5,7 +5,6 @@ Supports Claude export data from JSON files.
import sys
from pathlib import Path
from typing import Any
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent))
@@ -81,7 +80,7 @@ class ClaudeRAG(BaseRAGExample):
return export_files
async def load_data(self, args) -> list[dict[str, Any]]:
async def load_data(self, args) -> list[str]:
"""Load Claude export data and convert to text chunks."""
export_path = Path(args.export_path)

View File

@@ -6,7 +6,6 @@ optimized chunking parameters.
import sys
from pathlib import Path
from typing import Any
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent))
@@ -78,7 +77,7 @@ class CodeRAG(BaseRAGExample):
help="Try to preserve import statements in chunks (default: True)",
)
async def load_data(self, args) -> list[dict[str, Any]]:
async def load_data(self, args) -> list[str]:
"""Load code files and convert to AST-aware chunks."""
print(f"🔍 Scanning code repository: {args.repo_dir}")
print(f"📁 Including extensions: {args.include_extensions}")
@@ -89,6 +88,14 @@ class CodeRAG(BaseRAGExample):
if not repo_path.exists():
raise ValueError(f"Repository directory not found: {args.repo_dir}")
# Load code files with filtering
reader_kwargs = {
"recursive": True,
"encoding": "utf-8",
"required_exts": args.include_extensions,
"exclude_hidden": True,
}
# Create exclusion filter
def file_filter(file_path: str) -> bool:
"""Filter out unwanted files and directories."""
@@ -113,11 +120,8 @@ class CodeRAG(BaseRAGExample):
# Load documents with file filtering
documents = SimpleDirectoryReader(
args.repo_dir,
file_extractor=None,
recursive=True,
encoding="utf-8",
required_exts=args.include_extensions,
exclude_hidden=True,
file_extractor=None, # Use default extractors
**reader_kwargs,
).load_data(show_progress=True)
# Apply custom filtering

View File

@@ -1,364 +0,0 @@
#!/usr/bin/env python3
"""
ColQwen RAG - Easy-to-use multimodal PDF retrieval with ColQwen2/ColPali
Usage:
python -m apps.colqwen_rag build --pdfs ./my_pdfs/ --index my_index
python -m apps.colqwen_rag search my_index "How does attention work?"
python -m apps.colqwen_rag ask my_index --interactive
"""
import argparse
import os
import sys
from pathlib import Path
from typing import Optional, cast
# Add LEANN packages to path
_repo_root = Path(__file__).resolve().parents[1]
_leann_core_src = _repo_root / "packages" / "leann-core" / "src"
_leann_hnsw_pkg = _repo_root / "packages" / "leann-backend-hnsw"
if str(_leann_core_src) not in sys.path:
sys.path.append(str(_leann_core_src))
if str(_leann_hnsw_pkg) not in sys.path:
sys.path.append(str(_leann_hnsw_pkg))
import torch # noqa: E402
from colpali_engine import ColPali, ColPaliProcessor, ColQwen2, ColQwen2Processor # noqa: E402
from colpali_engine.utils.torch_utils import ListDataset # noqa: E402
from pdf2image import convert_from_path # noqa: E402
from PIL import Image # noqa: E402
from torch.utils.data import DataLoader # noqa: E402
from tqdm import tqdm # noqa: E402
# Import the existing multi-vector implementation
sys.path.append(str(_repo_root / "apps" / "multimodal" / "vision-based-pdf-multi-vector"))
from leann_multi_vector import LeannMultiVector # noqa: E402
class ColQwenRAG:
"""Easy-to-use ColQwen RAG system for multimodal PDF retrieval."""
def __init__(self, model_type: str = "colpali"):
"""
Initialize ColQwen RAG system.
Args:
model_type: "colqwen2" or "colpali"
"""
self.model_type = model_type
self.device = self._get_device()
# Use float32 on MPS to avoid memory issues, float16 on CUDA, bfloat16 on CPU
if self.device.type == "mps":
self.dtype = torch.float32
elif self.device.type == "cuda":
self.dtype = torch.float16
else:
self.dtype = torch.bfloat16
print(f"🚀 Initializing {model_type.upper()} on {self.device} with {self.dtype}")
# Load model and processor with MPS-optimized settings
try:
if model_type == "colqwen2":
self.model_name = "vidore/colqwen2-v1.0"
if self.device.type == "mps":
# For MPS, load on CPU first then move to avoid memory allocation issues
self.model = ColQwen2.from_pretrained(
self.model_name,
torch_dtype=self.dtype,
device_map="cpu",
low_cpu_mem_usage=True,
).eval()
self.model = self.model.to(self.device)
else:
self.model = ColQwen2.from_pretrained(
self.model_name,
torch_dtype=self.dtype,
device_map=self.device,
low_cpu_mem_usage=True,
).eval()
self.processor = ColQwen2Processor.from_pretrained(self.model_name)
else: # colpali
self.model_name = "vidore/colpali-v1.2"
if self.device.type == "mps":
# For MPS, load on CPU first then move to avoid memory allocation issues
self.model = ColPali.from_pretrained(
self.model_name,
torch_dtype=self.dtype,
device_map="cpu",
low_cpu_mem_usage=True,
).eval()
self.model = self.model.to(self.device)
else:
self.model = ColPali.from_pretrained(
self.model_name,
torch_dtype=self.dtype,
device_map=self.device,
low_cpu_mem_usage=True,
).eval()
self.processor = ColPaliProcessor.from_pretrained(self.model_name)
except Exception as e:
if "memory" in str(e).lower() or "offload" in str(e).lower():
print(f"⚠️ Memory constraint on {self.device}, using CPU with optimizations...")
self.device = torch.device("cpu")
self.dtype = torch.float32
if model_type == "colqwen2":
self.model = ColQwen2.from_pretrained(
self.model_name,
torch_dtype=self.dtype,
device_map="cpu",
low_cpu_mem_usage=True,
).eval()
else:
self.model = ColPali.from_pretrained(
self.model_name,
torch_dtype=self.dtype,
device_map="cpu",
low_cpu_mem_usage=True,
).eval()
else:
raise
def _get_device(self):
"""Auto-select best available device."""
if torch.cuda.is_available():
return torch.device("cuda")
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
return torch.device("mps")
else:
return torch.device("cpu")
def build_index(self, pdf_paths: list[str], index_name: str, pages_dir: Optional[str] = None):
"""
Build multimodal index from PDF files.
Args:
pdf_paths: List of PDF file paths
index_name: Name for the index
pages_dir: Directory to save page images (optional)
"""
print(f"Building index '{index_name}' from {len(pdf_paths)} PDFs...")
# Convert PDFs to images
all_images = []
all_metadata = []
if pages_dir:
os.makedirs(pages_dir, exist_ok=True)
for pdf_path in tqdm(pdf_paths, desc="Converting PDFs"):
try:
images = convert_from_path(pdf_path, dpi=150)
pdf_name = Path(pdf_path).stem
for i, image in enumerate(images):
# Save image if pages_dir specified
if pages_dir:
image_path = Path(pages_dir) / f"{pdf_name}_page_{i + 1}.png"
image.save(image_path)
all_images.append(image)
all_metadata.append(
{
"pdf_path": pdf_path,
"pdf_name": pdf_name,
"page_number": i + 1,
"image_path": str(image_path) if pages_dir else None,
}
)
except Exception as e:
print(f"❌ Error processing {pdf_path}: {e}")
continue
print(f"📄 Converted {len(all_images)} pages from {len(pdf_paths)} PDFs")
print(f"All metadata: {all_metadata}")
# Generate embeddings
print("🧠 Generating embeddings...")
embeddings = self._embed_images(all_images)
# Build LEANN index
print("🔍 Building LEANN index...")
leann_mv = LeannMultiVector(
index_path=index_name,
dim=embeddings.shape[-1],
embedding_model_name=self.model_type,
)
# Create collection and insert data
leann_mv.create_collection()
for i, (embedding, metadata) in enumerate(zip(embeddings, all_metadata)):
data = {
"doc_id": i,
"filepath": metadata.get("image_path", ""),
"colbert_vecs": embedding.numpy(), # Convert tensor to numpy
}
leann_mv.insert(data)
# Build the index
leann_mv.create_index()
print(f"✅ Index '{index_name}' built successfully!")
return leann_mv
def search(self, index_name: str, query: str, top_k: int = 5):
"""
Search the index with a text query.
Args:
index_name: Name of the index to search
query: Text query
top_k: Number of results to return
"""
print(f"🔍 Searching '{index_name}' for: '{query}'")
# Load index
leann_mv = LeannMultiVector(
index_path=index_name,
dim=128, # Will be updated when loading
embedding_model_name=self.model_type,
)
# Generate query embedding
query_embedding = self._embed_query(query)
# Search (returns list of (score, doc_id) tuples)
search_results = leann_mv.search(query_embedding.numpy(), topk=top_k)
# Display results
print(f"\n📋 Top {len(search_results)} results:")
for i, (score, doc_id) in enumerate(search_results, 1):
# Get metadata for this doc_id (we need to load the metadata)
print(f"{i}. Score: {score:.3f} | Doc ID: {doc_id}")
return search_results
def ask(self, index_name: str, interactive: bool = False):
"""
Interactive Q&A with the indexed documents.
Args:
index_name: Name of the index to query
interactive: Whether to run in interactive mode
"""
print(f"💬 ColQwen Chat with '{index_name}'")
if interactive:
print("Type 'quit' to exit, 'help' for commands")
while True:
try:
query = input("\n🤔 Your question: ").strip()
if query.lower() in ["quit", "exit", "q"]:
break
elif query.lower() == "help":
print("Commands: quit/exit/q (exit), help (this message)")
continue
elif not query:
continue
self.search(index_name, query, top_k=3)
# TODO: Add answer generation with Qwen-VL
print("\n💡 For detailed answers, we can integrate Qwen-VL here!")
except KeyboardInterrupt:
print("\n👋 Goodbye!")
break
else:
query = input("🤔 Your question: ").strip()
if query:
self.search(index_name, query)
def _embed_images(self, images: list[Image.Image]) -> torch.Tensor:
"""Generate embeddings for a list of images."""
dataset = ListDataset(images)
dataloader = DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=lambda x: x)
embeddings = []
with torch.no_grad():
for batch in tqdm(dataloader, desc="Embedding images"):
batch_images = cast(list, batch)
batch_inputs = self.processor.process_images(batch_images).to(self.device)
batch_embeddings = self.model(**batch_inputs)
embeddings.append(batch_embeddings.cpu())
return torch.cat(embeddings, dim=0)
def _embed_query(self, query: str) -> torch.Tensor:
"""Generate embedding for a text query."""
with torch.no_grad():
query_inputs = self.processor.process_queries([query]).to(self.device)
query_embedding = self.model(**query_inputs)
return query_embedding.cpu()
def main():
parser = argparse.ArgumentParser(description="ColQwen RAG - Easy multimodal PDF retrieval")
subparsers = parser.add_subparsers(dest="command", help="Available commands")
# Build command
build_parser = subparsers.add_parser("build", help="Build index from PDFs")
build_parser.add_argument("--pdfs", required=True, help="Directory containing PDF files")
build_parser.add_argument("--index", required=True, help="Index name")
build_parser.add_argument(
"--model", choices=["colqwen2", "colpali"], default="colqwen2", help="Model to use"
)
build_parser.add_argument("--pages-dir", help="Directory to save page images")
# Search command
search_parser = subparsers.add_parser("search", help="Search the index")
search_parser.add_argument("index", help="Index name")
search_parser.add_argument("query", help="Search query")
search_parser.add_argument("--top-k", type=int, default=5, help="Number of results")
search_parser.add_argument(
"--model", choices=["colqwen2", "colpali"], default="colqwen2", help="Model to use"
)
# Ask command
ask_parser = subparsers.add_parser("ask", help="Interactive Q&A")
ask_parser.add_argument("index", help="Index name")
ask_parser.add_argument("--interactive", action="store_true", help="Interactive mode")
ask_parser.add_argument(
"--model", choices=["colqwen2", "colpali"], default="colqwen2", help="Model to use"
)
args = parser.parse_args()
if not args.command:
parser.print_help()
return
# Initialize ColQwen RAG
if args.command == "build":
colqwen = ColQwenRAG(args.model)
# Get PDF files
pdf_dir = Path(args.pdfs)
if pdf_dir.is_file() and pdf_dir.suffix.lower() == ".pdf":
pdf_paths = [str(pdf_dir)]
elif pdf_dir.is_dir():
pdf_paths = [str(p) for p in pdf_dir.glob("*.pdf")]
else:
print(f"❌ Invalid PDF path: {args.pdfs}")
return
if not pdf_paths:
print(f"❌ No PDF files found in {args.pdfs}")
return
colqwen.build_index(pdf_paths, args.index, args.pages_dir)
elif args.command == "search":
colqwen = ColQwenRAG(args.model)
colqwen.search(args.index, args.query, args.top_k)
elif args.command == "ask":
colqwen = ColQwenRAG(args.model)
colqwen.ask(args.index, args.interactive)
if __name__ == "__main__":
main()

View File

@@ -5,7 +5,6 @@ Supports PDF, TXT, MD, and other document formats.
import sys
from pathlib import Path
from typing import Any
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent))
@@ -52,7 +51,7 @@ class DocumentRAG(BaseRAGExample):
help="Enable AST-aware chunking for code files in the data directory",
)
async def load_data(self, args) -> list[dict[str, Any]]:
async def load_data(self, args) -> list[str]:
"""Load documents and convert to text chunks."""
print(f"Loading documents from: {args.data_dir}")
if args.file_types:
@@ -66,12 +65,16 @@ class DocumentRAG(BaseRAGExample):
raise ValueError(f"Data directory not found: {args.data_dir}")
# Load documents
documents = SimpleDirectoryReader(
args.data_dir,
recursive=True,
encoding="utf-8",
required_exts=args.file_types if args.file_types else None,
).load_data(show_progress=True)
reader_kwargs = {
"recursive": True,
"encoding": "utf-8",
}
if args.file_types:
reader_kwargs["required_exts"] = args.file_types
documents = SimpleDirectoryReader(args.data_dir, **reader_kwargs).load_data(
show_progress=True
)
if not documents:
print(f"No documents found in {args.data_dir} with extensions {args.file_types}")

View File

@@ -127,12 +127,11 @@ class EmlxMboxReader(MboxReader):
def load_data(
self,
file: Path, # Note: for EmlxMboxReader, this is actually a directory
directory: Path,
extra_info: dict | None = None,
fs: AbstractFileSystem | None = None,
) -> list[Document]:
"""Parse .emlx files from directory into strings using MboxReader logic."""
directory = file # Rename for clarity - this is a directory of .emlx files
import os
import tempfile

View File

@@ -5,7 +5,6 @@ Supports Apple Mail on macOS.
import sys
from pathlib import Path
from typing import Any
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent))
@@ -65,7 +64,7 @@ class EmailRAG(BaseRAGExample):
return messages_dirs
async def load_data(self, args) -> list[dict[str, Any]]:
async def load_data(self, args) -> list[str]:
"""Load emails and convert to text chunks."""
# Determine mail directories
if args.mail_path:

View File

@@ -86,7 +86,7 @@ class WeChatHistoryReader(BaseReader):
text=True,
timeout=5,
)
return result.returncode == 0 and bool(result.stdout.strip())
return result.returncode == 0 and result.stdout.strip()
except Exception:
return False
@@ -314,9 +314,7 @@ class WeChatHistoryReader(BaseReader):
return concatenated_groups
def _create_concatenated_content(
self, message_group: dict, contact_name: str
) -> tuple[str, str]:
def _create_concatenated_content(self, message_group: dict, contact_name: str) -> str:
"""
Create concatenated content from a group of messages.

View File

@@ -1,219 +0,0 @@
#!/usr/bin/env python3
"""
CLIP Image RAG Application
This application enables RAG (Retrieval-Augmented Generation) on images using CLIP embeddings.
You can index a directory of images and search them using text queries.
Usage:
python -m apps.image_rag --image-dir ./my_images/ --query "a sunset over mountains"
python -m apps.image_rag --image-dir ./my_images/ --interactive
"""
import argparse
import pickle
import tempfile
from pathlib import Path
from typing import Any
import numpy as np
from PIL import Image
from sentence_transformers import SentenceTransformer
from tqdm import tqdm
from apps.base_rag_example import BaseRAGExample
class ImageRAG(BaseRAGExample):
"""
RAG application for images using CLIP embeddings.
This class provides a complete RAG pipeline for image data, including
CLIP embedding generation, indexing, and text-based image search.
"""
def __init__(self):
super().__init__(
name="Image RAG",
description="RAG application for images using CLIP embeddings",
default_index_name="image_index",
)
# Override default embedding model to use CLIP
self.embedding_model_default = "clip-ViT-L-14"
self.embedding_mode_default = "sentence-transformers"
self._image_data: list[dict] = []
def _add_specific_arguments(self, parser: argparse.ArgumentParser):
"""Add image-specific arguments."""
image_group = parser.add_argument_group("Image Parameters")
image_group.add_argument(
"--image-dir",
type=str,
required=True,
help="Directory containing images to index",
)
image_group.add_argument(
"--image-extensions",
type=str,
nargs="+",
default=[".jpg", ".jpeg", ".png", ".gif", ".bmp", ".webp"],
help="Image file extensions to process (default: .jpg .jpeg .png .gif .bmp .webp)",
)
image_group.add_argument(
"--batch-size",
type=int,
default=32,
help="Batch size for CLIP embedding generation (default: 32)",
)
async def load_data(self, args) -> list[dict[str, Any]]:
"""Load images, generate CLIP embeddings, and return text descriptions."""
self._image_data = self._load_images_and_embeddings(args)
return [entry["text"] for entry in self._image_data]
def _load_images_and_embeddings(self, args) -> list[dict]:
"""Helper to process images and produce embeddings/metadata."""
image_dir = Path(args.image_dir)
if not image_dir.exists():
raise ValueError(f"Image directory does not exist: {image_dir}")
print(f"📸 Loading images from {image_dir}...")
# Find all image files
image_files = []
for ext in args.image_extensions:
image_files.extend(image_dir.rglob(f"*{ext}"))
image_files.extend(image_dir.rglob(f"*{ext.upper()}"))
if not image_files:
raise ValueError(
f"No images found in {image_dir} with extensions {args.image_extensions}"
)
print(f"✅ Found {len(image_files)} images")
# Limit if max_items is set
if args.max_items > 0:
image_files = image_files[: args.max_items]
print(f"📊 Processing {len(image_files)} images (limited by --max-items)")
# Load CLIP model
print("🔍 Loading CLIP model...")
model = SentenceTransformer(self.embedding_model_default)
# Process images and generate embeddings
print("🖼️ Processing images and generating embeddings...")
image_data = []
batch_images = []
batch_paths = []
for image_path in tqdm(image_files, desc="Processing images"):
try:
image = Image.open(image_path).convert("RGB")
batch_images.append(image)
batch_paths.append(image_path)
# Process in batches
if len(batch_images) >= args.batch_size:
embeddings = model.encode(
batch_images,
convert_to_numpy=True,
normalize_embeddings=True,
batch_size=args.batch_size,
show_progress_bar=False,
)
for img_path, embedding in zip(batch_paths, embeddings):
image_data.append(
{
"text": f"Image: {img_path.name}\nPath: {img_path}",
"metadata": {
"image_path": str(img_path),
"image_name": img_path.name,
"image_dir": str(image_dir),
},
"embedding": embedding.astype(np.float32),
}
)
batch_images = []
batch_paths = []
except Exception as e:
print(f"⚠️ Failed to process {image_path}: {e}")
continue
# Process remaining images
if batch_images:
embeddings = model.encode(
batch_images,
convert_to_numpy=True,
normalize_embeddings=True,
batch_size=len(batch_images),
show_progress_bar=False,
)
for img_path, embedding in zip(batch_paths, embeddings):
image_data.append(
{
"text": f"Image: {img_path.name}\nPath: {img_path}",
"metadata": {
"image_path": str(img_path),
"image_name": img_path.name,
"image_dir": str(image_dir),
},
"embedding": embedding.astype(np.float32),
}
)
print(f"✅ Processed {len(image_data)} images")
return image_data
async def build_index(self, args, texts: list[dict[str, Any]]) -> str:
"""Build index using pre-computed CLIP embeddings."""
from leann.api import LeannBuilder
if not self._image_data or len(self._image_data) != len(texts):
raise RuntimeError("No image data found. Make sure load_data() ran successfully.")
print("🔨 Building LEANN index with CLIP embeddings...")
builder = LeannBuilder(
backend_name=args.backend_name,
embedding_model=self.embedding_model_default,
embedding_mode=self.embedding_mode_default,
is_recompute=False,
distance_metric="cosine",
graph_degree=args.graph_degree,
build_complexity=args.build_complexity,
is_compact=not args.no_compact,
)
for text, data in zip(texts, self._image_data):
builder.add_text(text=text, metadata=data["metadata"])
ids = [str(i) for i in range(len(self._image_data))]
embeddings = np.array([data["embedding"] for data in self._image_data], dtype=np.float32)
with tempfile.NamedTemporaryFile(mode="wb", suffix=".pkl", delete=False) as f:
pickle.dump((ids, embeddings), f)
pkl_path = f.name
try:
index_path = str(Path(args.index_dir) / f"{self.default_index_name}.leann")
builder.build_index_from_embeddings(index_path, pkl_path)
print(f"✅ Index built successfully at {index_path}")
return index_path
finally:
Path(pkl_path).unlink()
def main():
"""Main entry point for the image RAG application."""
import asyncio
app = ImageRAG()
asyncio.run(app.run())
if __name__ == "__main__":
main()

View File

@@ -6,7 +6,6 @@ This example demonstrates how to build a RAG system on your iMessage conversatio
import asyncio
from pathlib import Path
from typing import Any
from leann.chunking_utils import create_text_chunks
@@ -57,7 +56,7 @@ class IMessageRAG(BaseRAGExample):
help="Overlap between text chunks (default: 200)",
)
async def load_data(self, args) -> list[dict[str, Any]]:
async def load_data(self, args) -> list[str]:
"""Load iMessage history and convert to text chunks."""
print("Loading iMessage conversation history...")

View File

@@ -1,7 +1,5 @@
import concurrent.futures
import glob
import json
import logging
import os
import re
import sys
@@ -13,8 +11,6 @@ import numpy as np
from PIL import Image
from tqdm import tqdm
logger = logging.getLogger(__name__)
def _ensure_repo_paths_importable(current_file: str) -> None:
"""Make local leann packages importable without installing (mirrors multi-vector-leann.py)."""
@@ -100,63 +96,12 @@ def _natural_sort_key(name: str) -> int:
return int(m.group()) if m else 0
def _load_images_from_dir(
pages_dir: str, recursive: bool = False
) -> tuple[list[str], list[Image.Image]]:
"""
Load images from a directory.
Args:
pages_dir: Directory path containing images
recursive: If True, recursively search subdirectories (default: False)
Returns:
Tuple of (filepaths, images)
"""
# Supported image extensions
extensions = ("*.png", "*.jpg", "*.jpeg", "*.PNG", "*.JPG", "*.JPEG", "*.webp", "*.WEBP")
if recursive:
# Recursive search
filepaths = []
for ext in extensions:
pattern = os.path.join(pages_dir, "**", ext)
filepaths.extend(glob.glob(pattern, recursive=True))
else:
# Non-recursive search (only top-level directory)
filepaths = []
for ext in extensions:
pattern = os.path.join(pages_dir, ext)
filepaths.extend(glob.glob(pattern))
# Sort files naturally
filepaths = sorted(filepaths, key=lambda x: _natural_sort_key(os.path.basename(x)))
# Load images with error handling
images = []
valid_filepaths = []
failed_count = 0
for filepath in filepaths:
try:
img = Image.open(filepath)
# Convert to RGB if necessary (handles RGBA, P, etc.)
if img.mode != "RGB":
img = img.convert("RGB")
images.append(img)
valid_filepaths.append(filepath)
except Exception as e:
failed_count += 1
print(f"Warning: Failed to load image {filepath}: {e}")
continue
if failed_count > 0:
print(
f"Warning: Failed to load {failed_count} image(s) out of {len(filepaths)} total files"
)
return valid_filepaths, images
def _load_images_from_dir(pages_dir: str) -> tuple[list[str], list[Image.Image]]:
filenames = [n for n in os.listdir(pages_dir) if n.lower().endswith((".png", ".jpg", ".jpeg"))]
filenames = sorted(filenames, key=_natural_sort_key)
filepaths = [os.path.join(pages_dir, n) for n in filenames]
images = [Image.open(p) for p in filepaths]
return filepaths, images
def _maybe_convert_pdf_to_images(pdf_path: Optional[str], pages_dir: str, dpi: int = 200) -> None:
@@ -206,99 +151,36 @@ def _select_device_and_dtype():
def _load_colvision(model_choice: str):
import os
import torch
from colpali_engine.models import (
ColPali,
ColQwen2,
ColQwen2_5,
ColQwen2_5_Processor,
ColQwen2Processor,
)
from colpali_engine.models import ColPali, ColQwen2, ColQwen2Processor
from colpali_engine.models.paligemma.colpali.processing_colpali import ColPaliProcessor
from transformers.utils.import_utils import is_flash_attn_2_available
# Force HuggingFace Hub to use HF endpoint, avoid Google Drive
# Set environment variables to ensure models are downloaded from HuggingFace
os.environ.setdefault("HF_ENDPOINT", "https://huggingface.co")
os.environ.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "1")
# Log model loading info
logger.info(f"Loading ColVision model: {model_choice}")
logger.info(f"HF_ENDPOINT: {os.environ.get('HF_ENDPOINT', 'not set')}")
logger.info("Models will be downloaded from HuggingFace Hub, not Google Drive")
device_str, device, dtype = _select_device_and_dtype()
# Determine model name and type
# IMPORTANT: Check colqwen2.5 BEFORE colqwen2 to avoid false matches
model_choice_lower = model_choice.lower()
if model_choice == "colqwen2":
model_name = "vidore/colqwen2-v1.0"
model_type = "colqwen2"
elif model_choice == "colqwen2.5" or model_choice == "colqwen25":
model_name = "vidore/colqwen2.5-v0.2"
model_type = "colqwen2.5"
elif model_choice == "colpali":
model_name = "vidore/colpali-v1.2"
model_type = "colpali"
elif (
"colqwen2.5" in model_choice_lower
or "colqwen25" in model_choice_lower
or "colqwen2_5" in model_choice_lower
):
# Handle HuggingFace model names like "vidore/colqwen2.5-v0.2"
model_name = model_choice
model_type = "colqwen2.5"
elif "colqwen2" in model_choice_lower and "colqwen2-v1.0" in model_choice_lower:
# Handle HuggingFace model names like "vidore/colqwen2-v1.0" (but not colqwen2.5)
model_name = model_choice
model_type = "colqwen2"
elif "colpali" in model_choice_lower:
# Handle HuggingFace model names like "vidore/colpali-v1.2"
model_name = model_choice
model_type = "colpali"
else:
# Default to colpali for backward compatibility
model_name = "vidore/colpali-v1.2"
model_type = "colpali"
# Load model based on type
attn_implementation = (
"flash_attention_2" if (device_str == "cuda" and is_flash_attn_2_available()) else "eager"
)
# Load model from HuggingFace Hub (not Google Drive)
# Use local_files_only=False to ensure download from HF if not cached
if model_type == "colqwen2.5":
model = ColQwen2_5.from_pretrained(
model_name,
torch_dtype=torch.bfloat16,
device_map=device,
attn_implementation=attn_implementation,
local_files_only=False, # Ensure download from HuggingFace Hub
).eval()
processor = ColQwen2_5_Processor.from_pretrained(model_name, local_files_only=False)
elif model_type == "colqwen2":
# On CPU/MPS we must avoid flash-attn and stay eager; on CUDA prefer flash-attn if available
attn_implementation = (
"flash_attention_2"
if (device_str == "cuda" and is_flash_attn_2_available())
else "eager"
)
model = ColQwen2.from_pretrained(
model_name,
torch_dtype=torch.bfloat16,
device_map=device,
attn_implementation=attn_implementation,
local_files_only=False, # Ensure download from HuggingFace Hub
).eval()
processor = ColQwen2Processor.from_pretrained(model_name, local_files_only=False)
else: # colpali
processor = ColQwen2Processor.from_pretrained(model_name)
else:
model_name = "vidore/colpali-v1.2"
model = ColPali.from_pretrained(
model_name,
torch_dtype=torch.bfloat16,
device_map=device,
local_files_only=False, # Ensure download from HuggingFace Hub
).eval()
processor = cast(
ColPaliProcessor, ColPaliProcessor.from_pretrained(model_name, local_files_only=False)
)
processor = cast(ColPaliProcessor, ColPaliProcessor.from_pretrained(model_name))
return model_name, model, processor, device_str, device, dtype

View File

@@ -18,11 +18,10 @@ _repo_root = Path(__file__).resolve().parents[3]
_leann_core_src = _repo_root / "packages" / "leann-core" / "src"
_leann_hnsw_pkg = _repo_root / "packages" / "leann-backend-hnsw"
if str(_leann_core_src) not in sys.path:
sys.path.insert(0, str(_leann_core_src))
sys.path.append(str(_leann_core_src))
if str(_leann_hnsw_pkg) not in sys.path:
sys.path.insert(0, str(_leann_hnsw_pkg))
sys.path.append(str(_leann_hnsw_pkg))
from leann_multi_vector import LeannMultiVector
import torch
from colpali_engine.models import ColPali
@@ -94,9 +93,9 @@ for batch_doc in tqdm(dataloader):
print(ds[0].shape)
# %%
# Build HNSW index via LeannMultiVector primitives and run search
# Build HNSW index via LeannRetriever primitives and run search
index_path = "./indexes/colpali.leann"
retriever = LeannMultiVector(index_path=index_path, dim=int(ds[0].shape[-1]))
retriever = LeannRetriever(index_path=index_path, dim=int(ds[0].shape[-1]))
retriever.create_collection()
filepaths = [os.path.join("./pages", name) for name in page_filenames]
for i in range(len(filepaths)):

View File

@@ -5,7 +5,7 @@ import argparse
import faulthandler
import os
import time
from typing import Any, Optional, cast
from typing import Any, Optional
import numpy as np
from PIL import Image
@@ -62,7 +62,7 @@ DATASET_NAME: str = "weaviate/arXiv-AI-papers-multi-vector"
# DATASET_NAMES: Optional[list[str | tuple[str, Optional[str]]]] = None
DATASET_NAMES = [
"weaviate/arXiv-AI-papers-multi-vector",
# ("lmms-lab/DocVQA", "DocVQA"), # Specify config name for datasets with multiple configs
("lmms-lab/DocVQA", "DocVQA"), # Specify config name for datasets with multiple configs
]
# Load multiple splits to get more data (e.g., ["train", "test", "validation"])
# Set to None to try loading all available splits automatically
@@ -75,11 +75,6 @@ MAX_DOCS: Optional[int] = None # limit number of pages to index; None = all
# Local pages (used when USE_HF_DATASET == False)
PDF: Optional[str] = None # e.g., "./pdfs/2004.12832v2.pdf"
PAGES_DIR: str = "./pages"
# Custom folder path (takes precedence over USE_HF_DATASET and PAGES_DIR)
# If set, images will be loaded directly from this folder
CUSTOM_FOLDER_PATH: Optional[str] = None # e.g., "/home/ubuntu/dr-tulu/agent/screenshots"
# Whether to recursively search subdirectories when loading from custom folder
CUSTOM_FOLDER_RECURSIVE: bool = False # Set to True to search subdirectories
# Index + retrieval settings
# Use a different index path for larger dataset to avoid overwriting existing index
@@ -88,7 +83,7 @@ INDEX_PATH: str = "./indexes/colvision_large.leann"
# These are now command-line arguments (see CLI overrides section)
TOPK: int = 3
FIRST_STAGE_K: int = 500
REBUILD_INDEX: bool = False # Set to True to force rebuild even if index exists
REBUILD_INDEX: bool = True
# Artifacts
SAVE_TOP_IMAGE: Optional[str] = "./figures/retrieved_page.png"
@@ -133,33 +128,12 @@ parser.add_argument(
default=TOPK,
help=f"Number of top results to retrieve. Default: {TOPK}",
)
parser.add_argument(
"--custom-folder",
type=str,
default=None,
help="Path to a custom folder containing images to search. Takes precedence over dataset loading. Default: None",
)
parser.add_argument(
"--recursive",
action="store_true",
default=False,
help="Recursively search subdirectories when loading images from custom folder. Default: False",
)
parser.add_argument(
"--rebuild-index",
action="store_true",
default=False,
help="Force rebuild the index even if it already exists. Default: False (reuse existing index if available)",
)
cli_args, _unknown = parser.parse_known_args()
SEARCH_METHOD: str = cli_args.search_method
QUERY = cli_args.query # Override QUERY with CLI argument if provided
USE_FAST_PLAID: bool = cli_args.use_fast_plaid
FAST_PLAID_INDEX_PATH: str = cli_args.fast_plaid_index_path
TOPK: int = cli_args.topk # Override TOPK with CLI argument if provided
CUSTOM_FOLDER_PATH = cli_args.custom_folder if cli_args.custom_folder else CUSTOM_FOLDER_PATH # Override with CLI argument if provided
CUSTOM_FOLDER_RECURSIVE = cli_args.recursive if cli_args.recursive else CUSTOM_FOLDER_RECURSIVE # Override with CLI argument if provided
REBUILD_INDEX = cli_args.rebuild_index # Override REBUILD_INDEX with CLI argument
# %%
@@ -206,24 +180,8 @@ else:
# Step 2: Load data only if we need to build the index
if need_to_build_index:
print("Loading dataset...")
# Check for custom folder path first (takes precedence)
if CUSTOM_FOLDER_PATH:
if not os.path.isdir(CUSTOM_FOLDER_PATH):
raise RuntimeError(f"Custom folder path does not exist: {CUSTOM_FOLDER_PATH}")
print(f"Loading images from custom folder: {CUSTOM_FOLDER_PATH}")
if CUSTOM_FOLDER_RECURSIVE:
print(" (recursive mode: searching subdirectories)")
filepaths, images = _load_images_from_dir(CUSTOM_FOLDER_PATH, recursive=CUSTOM_FOLDER_RECURSIVE)
print(f" Found {len(filepaths)} image files")
if not images:
raise RuntimeError(
f"No images found in {CUSTOM_FOLDER_PATH}. Ensure the folder contains image files (.png, .jpg, .jpeg, .webp)."
)
print(f" Successfully loaded {len(images)} images")
# Use filenames as identifiers instead of full paths for cleaner metadata
filepaths = [os.path.basename(fp) for fp in filepaths]
elif USE_HF_DATASET:
from datasets import Dataset, DatasetDict, concatenate_datasets, load_dataset
if USE_HF_DATASET:
from datasets import load_dataset, concatenate_datasets, DatasetDict
# Determine which datasets to load
if DATASET_NAMES is not None:
@@ -281,12 +239,12 @@ if need_to_build_index:
splits_to_load = DATASET_SPLITS
# Load and concatenate multiple splits for this dataset
datasets_to_concat: list[Dataset] = []
datasets_to_concat = []
for split in splits_to_load:
if split not in dataset_dict:
print(f" Warning: Split '{split}' not found in dataset. Available splits: {list(dataset_dict.keys())}")
continue
split_dataset = cast(Dataset, dataset_dict[split])
split_dataset = dataset_dict[split]
print(f" Loaded split '{split}': {len(split_dataset)} pages")
datasets_to_concat.append(split_dataset)
@@ -663,6 +621,7 @@ else:
except Exception:
print(f"Saved retrieved page (rank {rank}) to: {out_path}")
## TODO stange results of second page of DeepSeek-V2 rather than the first page
# %%
# Step 6: Similarity maps for top-K results

View File

@@ -25,9 +25,9 @@ Usage:
import argparse
import json
import os
from typing import Any, Optional, cast
from typing import Optional
from datasets import Dataset, load_dataset
from datasets import load_dataset
from leann_multi_vector import (
ViDoReBenchmarkEvaluator,
_ensure_repo_paths_importable,
@@ -90,51 +90,6 @@ VIDORE_V1_TASKS = {
},
}
# Task name aliases (short names -> full names)
TASK_ALIASES = {
"arxivqa": "VidoreArxivQARetrieval",
"docvqa": "VidoreDocVQARetrieval",
"infovqa": "VidoreInfoVQARetrieval",
"tabfquad": "VidoreTabfquadRetrieval",
"tatdqa": "VidoreTatdqaRetrieval",
"shiftproject": "VidoreShiftProjectRetrieval",
"syntheticdocqa_ai": "VidoreSyntheticDocQAAIRetrieval",
"syntheticdocqa_energy": "VidoreSyntheticDocQAEnergyRetrieval",
"syntheticdocqa_government": "VidoreSyntheticDocQAGovernmentReportsRetrieval",
"syntheticdocqa_healthcare": "VidoreSyntheticDocQAHealthcareIndustryRetrieval",
}
def normalize_task_name(task_name: str) -> str:
"""Normalize task name (handle aliases)."""
task_name_lower = task_name.lower()
if task_name in VIDORE_V1_TASKS:
return task_name
if task_name_lower in TASK_ALIASES:
return TASK_ALIASES[task_name_lower]
# Try partial match
for alias, full_name in TASK_ALIASES.items():
if alias in task_name_lower or task_name_lower in alias:
return full_name
return task_name
def get_safe_model_name(model_name: str) -> str:
"""Get a safe model name for use in file paths."""
import hashlib
import os
# If it's a path, use basename or hash
if os.path.exists(model_name) and os.path.isdir(model_name):
# Use basename if it's reasonable, otherwise use hash
basename = os.path.basename(model_name.rstrip("/"))
if basename and len(basename) < 100 and not basename.startswith("."):
return basename
# Use hash for very long or problematic paths
return hashlib.md5(model_name.encode()).hexdigest()[:16]
# For HuggingFace model names, replace / with _
return model_name.replace("/", "_").replace(":", "_")
def load_vidore_v1_data(
dataset_path: str,
@@ -151,43 +106,40 @@ def load_vidore_v1_data(
"""
print(f"Loading dataset: {dataset_path} (split={split})")
# Load queries - cast to Dataset since we know split returns Dataset not DatasetDict
query_ds = cast(Dataset, load_dataset(dataset_path, "queries", split=split, revision=revision))
# Load queries
query_ds = load_dataset(dataset_path, "queries", split=split, revision=revision)
queries: dict[str, str] = {}
queries = {}
for row in query_ds:
row_dict = cast(dict[str, Any], row)
query_id = f"query-{split}-{row_dict['query-id']}"
queries[query_id] = row_dict["query"]
query_id = f"query-{split}-{row['query-id']}"
queries[query_id] = row["query"]
# Load corpus (images) - cast to Dataset
corpus_ds = cast(Dataset, load_dataset(dataset_path, "corpus", split=split, revision=revision))
# Load corpus (images)
corpus_ds = load_dataset(dataset_path, "corpus", split=split, revision=revision)
corpus: dict[str, Any] = {}
corpus = {}
for row in corpus_ds:
row_dict = cast(dict[str, Any], row)
corpus_id = f"corpus-{split}-{row_dict['corpus-id']}"
corpus_id = f"corpus-{split}-{row['corpus-id']}"
# Extract image from the dataset row
if "image" in row_dict:
corpus[corpus_id] = row_dict["image"]
elif "page_image" in row_dict:
corpus[corpus_id] = row_dict["page_image"]
if "image" in row:
corpus[corpus_id] = row["image"]
elif "page_image" in row:
corpus[corpus_id] = row["page_image"]
else:
raise ValueError(
f"No image field found in corpus. Available fields: {list(row_dict.keys())}"
f"No image field found in corpus. Available fields: {list(row.keys())}"
)
# Load qrels (relevance judgments) - cast to Dataset
qrels_ds = cast(Dataset, load_dataset(dataset_path, "qrels", split=split, revision=revision))
# Load qrels (relevance judgments)
qrels_ds = load_dataset(dataset_path, "qrels", split=split, revision=revision)
qrels: dict[str, dict[str, int]] = {}
qrels = {}
for row in qrels_ds:
row_dict = cast(dict[str, Any], row)
query_id = f"query-{split}-{row_dict['query-id']}"
corpus_id = f"corpus-{split}-{row_dict['corpus-id']}"
query_id = f"query-{split}-{row['query-id']}"
corpus_id = f"corpus-{split}-{row['corpus-id']}"
if query_id not in qrels:
qrels[query_id] = {}
qrels[query_id][corpus_id] = int(row_dict["score"])
qrels[query_id][corpus_id] = int(row["score"])
print(
f"Loaded {len(queries)} queries, {len(corpus)} corpus items, {len(qrels)} query-relevance mappings"
@@ -229,16 +181,13 @@ def evaluate_task(
print(f"Evaluating task: {task_name}")
print(f"{'=' * 80}")
# Normalize task name (handle aliases)
task_name = normalize_task_name(task_name)
# Get task config
if task_name not in VIDORE_V1_TASKS:
raise ValueError(f"Unknown task: {task_name}. Available: {list(VIDORE_V1_TASKS.keys())}")
task_config = VIDORE_V1_TASKS[task_name]
dataset_path = str(task_config["dataset_path"])
revision = str(task_config["revision"])
dataset_path = task_config["dataset_path"]
revision = task_config["revision"]
# Load data
corpus, queries, qrels = load_vidore_v1_data(
@@ -274,13 +223,11 @@ def evaluate_task(
)
# Build or load index
# Use safe model name for index path (different models need different indexes)
safe_model_name = get_safe_model_name(model_name)
index_path_full = index_path if not use_fast_plaid else fast_plaid_index_path
if index_path_full is None:
index_path_full = f"./indexes/{task_name}_{safe_model_name}"
index_path_full = f"./indexes/{task_name}_{model_name}"
if use_fast_plaid:
index_path_full = f"./indexes/{task_name}_{safe_model_name}_fastplaid"
index_path_full = f"./indexes/{task_name}_{model_name}_fastplaid"
index_or_retriever, corpus_ids_ordered = evaluator.build_index_from_corpus(
corpus=corpus,
@@ -289,7 +236,7 @@ def evaluate_task(
)
# Search queries
task_prompt = cast(Optional[dict[str, str]], task_config.get("prompt"))
task_prompt = task_config.get("prompt")
results = evaluator.search_queries(
queries=queries,
corpus_ids=corpus_ids_ordered,
@@ -334,7 +281,8 @@ def main():
"--model",
type=str,
default="colqwen2",
help="Model to use: 'colqwen2', 'colpali', or path to a model directory (supports LoRA adapters)",
choices=["colqwen2", "colpali"],
help="Model to use",
)
parser.add_argument(
"--task",
@@ -402,11 +350,11 @@ def main():
# Determine tasks to evaluate
if args.task:
tasks_to_eval = [normalize_task_name(args.task)]
tasks_to_eval = [args.task]
elif args.tasks.lower() == "all":
tasks_to_eval = list(VIDORE_V1_TASKS.keys())
else:
tasks_to_eval = [normalize_task_name(t.strip()) for t in args.tasks.split(",")]
tasks_to_eval = [t.strip() for t in args.tasks.split(",")]
print(f"Tasks to evaluate: {tasks_to_eval}")

View File

@@ -25,9 +25,9 @@ Usage:
import argparse
import json
import os
from typing import Any, Optional, cast
from typing import Optional
from datasets import Dataset, load_dataset
from datasets import load_dataset
from leann_multi_vector import (
ViDoReBenchmarkEvaluator,
_ensure_repo_paths_importable,
@@ -91,8 +91,8 @@ def load_vidore_v2_data(
"""
print(f"Loading dataset: {dataset_path} (split={split}, language={language})")
# Load queries - cast to Dataset since we know split returns Dataset not DatasetDict
query_ds = cast(Dataset, load_dataset(dataset_path, "queries", split=split, revision=revision))
# Load queries
query_ds = load_dataset(dataset_path, "queries", split=split, revision=revision)
# Check if dataset has language field before filtering
has_language_field = len(query_ds) > 0 and "language" in query_ds.column_names
@@ -112,9 +112,8 @@ def load_vidore_v2_data(
if len(query_ds_filtered) == 0:
# Try to get a sample to see actual language values
try:
sample_ds = cast(
Dataset,
load_dataset(dataset_path, "queries", split=split, revision=revision),
sample_ds = load_dataset(
dataset_path, "queries", split=split, revision=revision
)
if len(sample_ds) > 0 and "language" in sample_ds.column_names:
sample_langs = set(sample_ds["language"])
@@ -127,40 +126,37 @@ def load_vidore_v2_data(
)
query_ds = query_ds_filtered
queries: dict[str, str] = {}
queries = {}
for row in query_ds:
row_dict = cast(dict[str, Any], row)
query_id = f"query-{split}-{row_dict['query-id']}"
queries[query_id] = row_dict["query"]
query_id = f"query-{split}-{row['query-id']}"
queries[query_id] = row["query"]
# Load corpus (images) - cast to Dataset
corpus_ds = cast(Dataset, load_dataset(dataset_path, "corpus", split=split, revision=revision))
# Load corpus (images)
corpus_ds = load_dataset(dataset_path, "corpus", split=split, revision=revision)
corpus: dict[str, Any] = {}
corpus = {}
for row in corpus_ds:
row_dict = cast(dict[str, Any], row)
corpus_id = f"corpus-{split}-{row_dict['corpus-id']}"
corpus_id = f"corpus-{split}-{row['corpus-id']}"
# Extract image from the dataset row
if "image" in row_dict:
corpus[corpus_id] = row_dict["image"]
elif "page_image" in row_dict:
corpus[corpus_id] = row_dict["page_image"]
if "image" in row:
corpus[corpus_id] = row["image"]
elif "page_image" in row:
corpus[corpus_id] = row["page_image"]
else:
raise ValueError(
f"No image field found in corpus. Available fields: {list(row_dict.keys())}"
f"No image field found in corpus. Available fields: {list(row.keys())}"
)
# Load qrels (relevance judgments) - cast to Dataset
qrels_ds = cast(Dataset, load_dataset(dataset_path, "qrels", split=split, revision=revision))
# Load qrels (relevance judgments)
qrels_ds = load_dataset(dataset_path, "qrels", split=split, revision=revision)
qrels: dict[str, dict[str, int]] = {}
qrels = {}
for row in qrels_ds:
row_dict = cast(dict[str, Any], row)
query_id = f"query-{split}-{row_dict['query-id']}"
corpus_id = f"corpus-{split}-{row_dict['corpus-id']}"
query_id = f"query-{split}-{row['query-id']}"
corpus_id = f"corpus-{split}-{row['corpus-id']}"
if query_id not in qrels:
qrels[query_id] = {}
qrels[query_id][corpus_id] = int(row_dict["score"])
qrels[query_id][corpus_id] = int(row["score"])
print(
f"Loaded {len(queries)} queries, {len(corpus)} corpus items, {len(qrels)} query-relevance mappings"
@@ -208,13 +204,13 @@ def evaluate_task(
raise ValueError(f"Unknown task: {task_name}. Available: {list(VIDORE_V2_TASKS.keys())}")
task_config = VIDORE_V2_TASKS[task_name]
dataset_path = str(task_config["dataset_path"])
revision = str(task_config["revision"])
dataset_path = task_config["dataset_path"]
revision = task_config["revision"]
# Determine language
if language is None:
# Use first language if multiple available
languages = cast(Optional[list[str]], task_config.get("languages"))
languages = task_config.get("languages")
if languages is None:
# Task doesn't support language filtering (e.g., Vidore2ESGReportsHLRetrieval)
language = None
@@ -273,7 +269,7 @@ def evaluate_task(
)
# Search queries
task_prompt = cast(Optional[dict[str, str]], task_config.get("prompt"))
task_prompt = task_config.get("prompt")
results = evaluator.search_queries(
queries=queries,
corpus_ids=corpus_ids_ordered,

View File

@@ -7,7 +7,6 @@ for indexing in LEANN. It supports various Slack MCP server implementations and
flexible message processing options.
"""
import ast
import asyncio
import json
import logging
@@ -147,16 +146,16 @@ class SlackMCPReader:
match = re.search(r"'error':\s*(\{[^}]+\})", str(e))
if match:
try:
error_dict = ast.literal_eval(match.group(1))
except (ValueError, SyntaxError):
error_dict = eval(match.group(1))
except (ValueError, SyntaxError, NameError):
pass
else:
# Try alternative format
match = re.search(r"Failed to fetch messages:\s*(\{[^}]+\})", str(e))
if match:
try:
error_dict = ast.literal_eval(match.group(1))
except (ValueError, SyntaxError):
error_dict = eval(match.group(1))
except (ValueError, SyntaxError, NameError):
pass
if self._is_cache_sync_error(error_dict):
@@ -177,9 +176,7 @@ class SlackMCPReader:
break
# If we get here, all retries failed or it's not a retryable error
if last_exception is not None:
raise last_exception
raise RuntimeError("Unexpected error: no exception captured during retry loop")
raise last_exception
async def fetch_slack_messages(
self, channel: Optional[str] = None, limit: int = 100
@@ -269,10 +266,7 @@ class SlackMCPReader:
messages = json.loads(content["text"])
except json.JSONDecodeError:
# If not JSON, try to parse as CSV format (Slack MCP server format)
text_content = content.get("text", "")
messages = self._parse_csv_messages(
text_content if text_content else "", channel or "unknown"
)
messages = self._parse_csv_messages(content["text"], channel)
else:
messages = result["content"]
else:

View File

@@ -11,7 +11,6 @@ Usage:
import argparse
import asyncio
from typing import Any
from apps.base_rag_example import BaseRAGExample
from apps.slack_data.slack_mcp_reader import SlackMCPReader
@@ -140,7 +139,7 @@ class SlackMCPRAG(BaseRAGExample):
print("4. Try running the MCP server command directly to test it")
return False
async def load_data(self, args) -> list[dict[str, Any]]:
async def load_data(self, args) -> list[str]:
"""Load Slack messages via MCP server."""
print(f"Connecting to Slack MCP server: {args.mcp_server}")
@@ -189,8 +188,7 @@ class SlackMCPRAG(BaseRAGExample):
print(sample_text)
print("-" * 40)
# Convert strings to dict format expected by base class
return [{"text": text, "metadata": {"source": "slack"}} for text in texts]
return texts
except Exception as e:
print(f"Error loading Slack data: {e}")

View File

@@ -11,7 +11,6 @@ Usage:
import argparse
import asyncio
from typing import Any
from apps.base_rag_example import BaseRAGExample
from apps.twitter_data.twitter_mcp_reader import TwitterMCPReader
@@ -117,7 +116,7 @@ class TwitterMCPRAG(BaseRAGExample):
print("5. Try running the MCP server command directly to test it")
return False
async def load_data(self, args) -> list[dict[str, Any]]:
async def load_data(self, args) -> list[str]:
"""Load Twitter bookmarks via MCP server."""
print(f"Connecting to Twitter MCP server: {args.mcp_server}")
@@ -157,8 +156,7 @@ class TwitterMCPRAG(BaseRAGExample):
print(sample_text)
print("-" * 50)
# Convert strings to dict format expected by base class
return [{"text": text, "metadata": {"source": "twitter"}} for text in texts]
return texts
except Exception as e:
print(f"❌ Error loading Twitter bookmarks: {e}")

View File

@@ -6,7 +6,6 @@ Supports WeChat chat history export and search.
import subprocess
import sys
from pathlib import Path
from typing import Any
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent))
@@ -92,7 +91,7 @@ class WeChatRAG(BaseRAGExample):
print(f"Export error: {e}")
return False
async def load_data(self, args) -> list[dict[str, Any]]:
async def load_data(self, args) -> list[str]:
"""Load WeChat history and convert to text chunks."""
# Initialize WeChat reader with export capabilities
reader = WeChatHistoryReader()

View File

@@ -1,200 +0,0 @@
# ColQwen Integration Guide
Easy-to-use multimodal PDF retrieval with ColQwen2/ColPali models.
## Quick Start
> **🍎 Mac Users**: ColQwen is optimized for Apple Silicon with MPS acceleration for faster inference!
### 1. Install Dependencies
```bash
uv pip install colpali_engine pdf2image pillow matplotlib qwen_vl_utils einops seaborn
brew install poppler # macOS only, for PDF processing
```
### 2. Basic Usage
```bash
# Build index from PDFs
python -m apps.colqwen_rag build --pdfs ./my_papers/ --index research_papers
# Search with text queries
python -m apps.colqwen_rag search research_papers "How does attention mechanism work?"
# Interactive Q&A
python -m apps.colqwen_rag ask research_papers --interactive
```
## Commands
### Build Index
```bash
python -m apps.colqwen_rag build \
--pdfs ./pdf_directory/ \
--index my_index \
--model colqwen2 \
--pages-dir ./page_images/ # Optional: save page images
```
**Options:**
- `--pdfs`: Directory containing PDF files (or single PDF path)
- `--index`: Name for the index (required)
- `--model`: `colqwen2` (default) or `colpali`
- `--pages-dir`: Directory to save page images (optional)
### Search Index
```bash
python -m apps.colqwen_rag search my_index "your question here" --top-k 5
```
**Options:**
- `--top-k`: Number of results to return (default: 5)
- `--model`: Model used for search (should match build model)
### Interactive Q&A
```bash
python -m apps.colqwen_rag ask my_index --interactive
```
**Commands in interactive mode:**
- Type your questions naturally
- `help`: Show available commands
- `quit`/`exit`/`q`: Exit interactive mode
## 🧪 Test & Reproduce Results
Run the reproduction test for issue #119:
```bash
python test_colqwen_reproduction.py
```
This will:
1. ✅ Check dependencies
2. 📥 Download sample PDF (Attention Is All You Need paper)
3. 🏗️ Build test index
4. 🔍 Run sample queries
5. 📊 Show how to generate similarity maps
## 🎨 Advanced: Similarity Maps
For visual similarity analysis, use the existing advanced script:
```bash
cd apps/multimodal/vision-based-pdf-multi-vector/
python multi-vector-leann-similarity-map.py
```
Edit the script to customize:
- `QUERY`: Your question
- `MODEL`: "colqwen2" or "colpali"
- `USE_HF_DATASET`: Use HuggingFace dataset or local PDFs
- `SIMILARITY_MAP`: Generate heatmaps
- `ANSWER`: Enable Qwen-VL answer generation
## 🔧 How It Works
### ColQwen2 vs ColPali
- **ColQwen2** (`vidore/colqwen2-v1.0`): Latest vision-language model
- **ColPali** (`vidore/colpali-v1.2`): Proven multimodal retriever
### Architecture
1. **PDF → Images**: Convert PDF pages to images (150 DPI)
2. **Vision Encoding**: Process images with ColQwen2/ColPali
3. **Multi-Vector Index**: Build LEANN HNSW index with multiple embeddings per page
4. **Query Processing**: Encode text queries with same model
5. **Similarity Search**: Find most relevant pages/regions
6. **Visual Maps**: Generate attention heatmaps (optional)
### Device Support
- **CUDA**: Best performance with GPU acceleration
- **MPS**: Apple Silicon Mac support
- **CPU**: Fallback for any system (slower)
Auto-detection: CUDA > MPS > CPU
## 📊 Performance Tips
### For Best Performance:
```bash
# Use ColQwen2 for latest features
--model colqwen2
# Save page images for reuse
--pages-dir ./cached_pages/
# Adjust batch size based on GPU memory
# (automatically handled)
```
### For Large Document Sets:
- Process PDFs in batches
- Use SSD storage for index files
- Consider using CUDA if available
## 🔗 Related Resources
- **Fast-PLAID**: https://github.com/lightonai/fast-plaid
- **Pylate**: https://github.com/lightonai/pylate
- **ColBERT**: https://github.com/stanford-futuredata/ColBERT
- **ColPali Paper**: Vision-Language Models for Document Retrieval
- **Issue #119**: https://github.com/yichuan-w/LEANN/issues/119
## 🐛 Troubleshooting
### PDF Conversion Issues (macOS)
```bash
# Install poppler
brew install poppler
which pdfinfo && pdfinfo -v
```
### Memory Issues
- Reduce batch size (automatically handled)
- Use CPU instead of GPU: `export CUDA_VISIBLE_DEVICES=""`
- Process fewer PDFs at once
### Model Download Issues
- Ensure internet connection for first run
- Models are cached after first download
- Use HuggingFace mirrors if needed
### Import Errors
```bash
# Ensure all dependencies installed
uv pip install colpali_engine pdf2image pillow matplotlib qwen_vl_utils einops seaborn
# Check PyTorch installation
python -c "import torch; print(torch.__version__)"
```
## 💡 Examples
### Research Paper Analysis
```bash
# Index your research papers
python -m apps.colqwen_rag build --pdfs ~/Papers/AI/ --index ai_papers
# Ask research questions
python -m apps.colqwen_rag search ai_papers "What are the limitations of transformer models?"
python -m apps.colqwen_rag search ai_papers "How does BERT compare to GPT?"
```
### Document Q&A
```bash
# Index business documents
python -m apps.colqwen_rag build --pdfs ~/Documents/Reports/ --index reports
# Interactive analysis
python -m apps.colqwen_rag ask reports --interactive
```
### Visual Analysis
```bash
# Generate similarity maps for specific queries
cd apps/multimodal/vision-based-pdf-multi-vector/
# Edit multi-vector-leann-similarity-map.py with your query
python multi-vector-leann-similarity-map.py
# Check ./figures/ for generated heatmaps
```
---
**🎯 This integration makes ColQwen as easy to use as other LEANN features while maintaining the full power of multimodal document understanding!**

View File

@@ -158,95 +158,6 @@ builder.build_index("./indexes/my-notes", chunks)
`embedding_options` is persisted to the index `meta.json`, so subsequent `LeannSearcher` or `LeannChat` sessions automatically reuse the same provider settings (the embedding server manager forwards them to the provider for you).
## Optional Embedding Features
### Task-Specific Prompt Templates
Some embedding models are trained with task-specific prompts to differentiate between documents and queries. The most notable example is **Google's EmbeddingGemma**, which requires different prompts depending on the use case:
- **Indexing documents**: `"title: none | text: "`
- **Search queries**: `"task: search result | query: "`
LEANN supports automatic prompt prepending via the `--embedding-prompt-template` flag:
```bash
# Build index with EmbeddingGemma (via LM Studio or Ollama)
leann build my-docs \
--docs ./documents \
--embedding-mode openai \
--embedding-model text-embedding-embeddinggemma-300m-qat \
--embedding-api-base http://localhost:1234/v1 \
--embedding-prompt-template "title: none | text: " \
--force
# Search with query-specific prompt
leann search my-docs \
--query "What is quantum computing?" \
--embedding-prompt-template "task: search result | query: "
```
**Important Notes:**
- **Only use with compatible models**: EmbeddingGemma and similar task-specific models
- **NOT for regular models**: Adding prompts to models like `nomic-embed-text`, `text-embedding-3-small`, or `bge-base-en-v1.5` will corrupt embeddings
- **Template is saved**: Build-time templates are saved to `.meta.json` for reference
- **Flexible prompts**: You can use any prompt string, or leave it empty (`""`)
**Python API:**
```python
from leann.api import LeannBuilder
builder = LeannBuilder(
embedding_mode="openai",
embedding_model="text-embedding-embeddinggemma-300m-qat",
embedding_options={
"base_url": "http://localhost:1234/v1",
"api_key": "lm-studio",
"prompt_template": "title: none | text: ",
},
)
builder.build_index("./indexes/my-docs", chunks)
```
**References:**
- [HuggingFace Blog: EmbeddingGemma](https://huggingface.co/blog/embeddinggemma) - Technical details
### LM Studio Auto-Detection (Optional)
When using LM Studio with the OpenAI-compatible API, LEANN can optionally auto-detect model context lengths via the LM Studio SDK. This eliminates manual configuration for token limits.
**Prerequisites:**
```bash
# Install Node.js (if not already installed)
# Then install the LM Studio SDK globally
npm install -g @lmstudio/sdk
```
**How it works:**
1. LEANN detects LM Studio URLs (`:1234`, `lmstudio` in URL)
2. Queries model metadata via Node.js subprocess
3. Automatically unloads model after query (respects your JIT auto-evict settings)
4. Falls back to static registry if SDK unavailable
**No configuration needed** - it works automatically when SDK is installed:
```bash
leann build my-docs \
--docs ./documents \
--embedding-mode openai \
--embedding-model text-embedding-nomic-embed-text-v1.5 \
--embedding-api-base http://localhost:1234/v1
# Context length auto-detected if SDK available
# Falls back to registry (2048) if not
```
**Benefits:**
- ✅ Automatic token limit detection
- ✅ Respects LM Studio JIT auto-evict settings
- ✅ No manual registry maintenance
- ✅ Graceful fallback if SDK unavailable
**Note:** This is completely optional. LEANN works perfectly fine without the SDK using the built-in token limit registry.
## Index Selection: Matching Your Scale
### HNSW (Hierarchical Navigable Small World)
@@ -454,7 +365,7 @@ leann search my-index "your query" \
### 2) Run remote builds with SkyPilot (cloud GPU)
Offload embedding generation and index building to a GPU VM using [SkyPilot](https://docs.skypilot.co/en/latest/docs/index.html). A template is provided at `sky/leann-build.yaml`.
Offload embedding generation and index building to a GPU VM using [SkyPilot](https://skypilot.readthedocs.io/en/latest/). A template is provided at `sky/leann-build.yaml`.
```bash
# One-time: install and configure SkyPilot

View File

@@ -8,51 +8,3 @@ You can speed up the process by using a lightweight embedding model. Add this to
--embedding-model sentence-transformers/all-MiniLM-L6-v2
```
**Model sizes:** `all-MiniLM-L6-v2` (30M parameters), `facebook/contriever` (~100M parameters), `Qwen3-0.6B` (600M parameters)
## 2. When should I use prompt templates?
**Use prompt templates ONLY with task-specific embedding models** like Google's EmbeddingGemma. These models are specially trained to use different prompts for documents vs queries.
**DO NOT use with regular models** like `nomic-embed-text`, `text-embedding-3-small`, or `bge-base-en-v1.5` - adding prompts to these models will corrupt the embeddings.
**Example usage with EmbeddingGemma:**
```bash
# Build with document prompt
leann build my-docs --embedding-prompt-template "title: none | text: "
# Search with query prompt
leann search my-docs --query "your question" --embedding-prompt-template "task: search result | query: "
```
See the [Configuration Guide: Task-Specific Prompt Templates](configuration-guide.md#task-specific-prompt-templates) for detailed usage.
## 3. Why is LM Studio loading multiple copies of my model?
This was fixed in recent versions. LEANN now properly unloads models after querying metadata, respecting your LM Studio JIT auto-evict settings.
**If you still see duplicates:**
- Update to the latest LEANN version
- Restart LM Studio to clear loaded models
- Check that you have JIT auto-evict enabled in LM Studio settings
**How it works now:**
1. LEANN loads model temporarily to get context length
2. Immediately unloads after query
3. LM Studio JIT loads model on-demand for actual embeddings
4. Auto-evicts per your settings
## 4. Do I need Node.js and @lmstudio/sdk?
**No, it's completely optional.** LEANN works perfectly fine without them using a built-in token limit registry.
**Benefits if you install it:**
- Automatic context length detection for LM Studio models
- No manual registry maintenance
- Always gets accurate token limits from the model itself
**To install (optional):**
```bash
npm install -g @lmstudio/sdk
```
See [Configuration Guide: LM Studio Auto-Detection](configuration-guide.md#lm-studio-auto-detection-optional) for details.

View File

@@ -4,8 +4,8 @@ build-backend = "scikit_build_core.build"
[project]
name = "leann-backend-diskann"
version = "0.3.5"
dependencies = ["leann-core==0.3.5", "numpy", "protobuf>=3.19.0"]
version = "0.3.4"
dependencies = ["leann-core==0.3.4", "numpy", "protobuf>=3.19.0"]
[tool.scikit-build]
# Key: simplified CMake path

View File

@@ -6,10 +6,10 @@ build-backend = "scikit_build_core.build"
[project]
name = "leann-backend-hnsw"
version = "0.3.5"
version = "0.3.4"
description = "Custom-built HNSW (Faiss) backend for the Leann toolkit."
dependencies = [
"leann-core==0.3.5",
"leann-core==0.3.4",
"numpy",
"pyzmq>=23.0.0",
"msgpack>=1.0.0",

View File

@@ -4,10 +4,10 @@ build-backend = "setuptools.build_meta"
[project]
name = "leann-core"
version = "0.3.5"
version = "0.3.4"
description = "Core API and plugin system for LEANN"
readme = "README.md"
requires-python = ">=3.10"
requires-python = ">=3.9"
license = { text = "MIT" }
# All required dependencies included

View File

@@ -916,7 +916,6 @@ class LeannSearcher:
metadata_filters: Optional[dict[str, dict[str, Union[str, int, float, bool, list]]]] = None,
batch_size: int = 0,
use_grep: bool = False,
provider_options: Optional[dict[str, Any]] = None,
**kwargs,
) -> list[SearchResult]:
"""
@@ -980,24 +979,10 @@ class LeannSearcher:
start_time = time.time()
# Extract query template from stored embedding_options with fallback chain:
# 1. Check provider_options override (highest priority)
# 2. Check query_prompt_template (new format)
# 3. Check prompt_template (old format for backward compat)
# 4. None (no template)
query_template = None
if provider_options and "prompt_template" in provider_options:
query_template = provider_options["prompt_template"]
elif "query_prompt_template" in self.embedding_options:
query_template = self.embedding_options["query_prompt_template"]
elif "prompt_template" in self.embedding_options:
query_template = self.embedding_options["prompt_template"]
query_embedding = self.backend_impl.compute_query_embedding(
query,
use_server_if_available=recompute_embeddings,
zmq_port=zmq_port,
query_template=query_template,
)
logger.info(f" Generated embedding shape: {query_embedding.shape}")
embedding_time = time.time() - start_time
@@ -1251,15 +1236,15 @@ class LeannChat:
"Please provide the best answer you can based on this context and your knowledge."
)
logger.info("The context provided to the LLM is:")
logger.info(f"{'Relevance':<10} | {'Chunk id':<10} | {'Content':<60} | {'Source':<80}")
logger.info("-" * 150)
print("The context provided to the LLM is:")
print(f"{'Relevance':<10} | {'Chunk id':<10} | {'Content':<60} | {'Source':<80}")
print("-" * 150)
for r in results:
chunk_relevance = f"{r.score:.3f}"
chunk_id = r.id
chunk_content = r.text[:60]
chunk_source = r.metadata.get("source", "")[:80]
logger.info(
print(
f"{chunk_relevance:<10} | {chunk_id:<10} | {chunk_content:<60} | {chunk_source:<80}"
)
ask_time = time.time()

View File

@@ -12,13 +12,7 @@ from typing import Any, Optional
import torch
from .settings import (
resolve_anthropic_api_key,
resolve_anthropic_base_url,
resolve_ollama_host,
resolve_openai_api_key,
resolve_openai_base_url,
)
from .settings import resolve_ollama_host, resolve_openai_api_key, resolve_openai_base_url
# Configure logging
logging.basicConfig(level=logging.INFO)
@@ -851,81 +845,6 @@ class OpenAIChat(LLMInterface):
return f"Error: Could not get a response from OpenAI. Details: {e}"
class AnthropicChat(LLMInterface):
"""LLM interface for Anthropic Claude models."""
def __init__(
self,
model: str = "claude-haiku-4-5",
api_key: Optional[str] = None,
base_url: Optional[str] = None,
):
self.model = model
self.base_url = resolve_anthropic_base_url(base_url)
self.api_key = resolve_anthropic_api_key(api_key)
if not self.api_key:
raise ValueError(
"Anthropic API key is required. Set ANTHROPIC_API_KEY environment variable or pass api_key parameter."
)
logger.info(
"Initializing Anthropic Chat with model='%s' and base_url='%s'",
model,
self.base_url,
)
try:
import anthropic
# Allow custom Anthropic-compatible endpoints via base_url
self.client = anthropic.Anthropic(
api_key=self.api_key,
base_url=self.base_url,
)
except ImportError:
raise ImportError(
"The 'anthropic' library is required for Anthropic models. Please install it with 'pip install anthropic'."
)
def ask(self, prompt: str, **kwargs) -> str:
logger.info(f"Sending request to Anthropic with model {self.model}")
try:
# Anthropic API parameters
params = {
"model": self.model,
"max_tokens": kwargs.get("max_tokens", 1000),
"messages": [{"role": "user", "content": prompt}],
}
# Add optional parameters
if "temperature" in kwargs:
params["temperature"] = kwargs["temperature"]
if "top_p" in kwargs:
params["top_p"] = kwargs["top_p"]
response = self.client.messages.create(**params)
# Extract text from response
response_text = response.content[0].text
# Log token usage
print(
f"Total tokens = {response.usage.input_tokens + response.usage.output_tokens}, "
f"input tokens = {response.usage.input_tokens}, "
f"output tokens = {response.usage.output_tokens}"
)
if response.stop_reason == "max_tokens":
print("The query is exceeding the maximum allowed number of tokens")
return response_text.strip()
except Exception as e:
logger.error(f"Error communicating with Anthropic: {e}")
return f"Error: Could not get a response from Anthropic. Details: {e}"
class SimulatedChat(LLMInterface):
"""A simple simulated chat for testing and development."""
@@ -978,12 +897,6 @@ def get_llm(llm_config: Optional[dict[str, Any]] = None) -> LLMInterface:
)
elif llm_type == "gemini":
return GeminiChat(model=model or "gemini-2.5-flash", api_key=llm_config.get("api_key"))
elif llm_type == "anthropic":
return AnthropicChat(
model=model or "claude-3-5-sonnet-20241022",
api_key=llm_config.get("api_key"),
base_url=llm_config.get("base_url"),
)
elif llm_type == "simulated":
return SimulatedChat()
else:

View File

@@ -239,11 +239,11 @@ def create_ast_chunks(
chunks = chunk_builder.chunkify(code_content)
for chunk in chunks:
chunk_text: str | None = None
astchunk_metadata: dict[str, Any] = {}
chunk_text = None
astchunk_metadata = {}
if hasattr(chunk, "text"):
chunk_text = str(chunk.text) if chunk.text else None
chunk_text = chunk.text
elif isinstance(chunk, str):
chunk_text = chunk
elif isinstance(chunk, dict):

View File

@@ -11,15 +11,10 @@ from tqdm import tqdm
from .api import LeannBuilder, LeannChat, LeannSearcher
from .interactive_utils import create_cli_session
from .registry import register_project_directory
from .settings import (
resolve_anthropic_base_url,
resolve_ollama_host,
resolve_openai_api_key,
resolve_openai_base_url,
)
from .settings import resolve_ollama_host, resolve_openai_api_key, resolve_openai_base_url
def extract_pdf_text_with_pymupdf(file_path: str) -> str | None:
def extract_pdf_text_with_pymupdf(file_path: str) -> str:
"""Extract text from PDF using PyMuPDF for better quality."""
try:
import fitz # PyMuPDF
@@ -35,7 +30,7 @@ def extract_pdf_text_with_pymupdf(file_path: str) -> str | None:
return None
def extract_pdf_text_with_pdfplumber(file_path: str) -> str | None:
def extract_pdf_text_with_pdfplumber(file_path: str) -> str:
"""Extract text from PDF using pdfplumber for better quality."""
try:
import pdfplumber
@@ -149,18 +144,6 @@ Examples:
default=None,
help="API key for embedding service (defaults to OPENAI_API_KEY)",
)
build_parser.add_argument(
"--embedding-prompt-template",
type=str,
default=None,
help="Prompt template to prepend to all texts for embedding (e.g., 'query: ' for search)",
)
build_parser.add_argument(
"--query-prompt-template",
type=str,
default=None,
help="Prompt template for queries (different from build template for task-specific models)",
)
build_parser.add_argument(
"--force", "-f", action="store_true", help="Force rebuild existing index"
)
@@ -277,12 +260,6 @@ Examples:
action="store_true",
help="Display file paths and metadata in search results",
)
search_parser.add_argument(
"--embedding-prompt-template",
type=str,
default=None,
help="Prompt template to prepend to query for embedding (e.g., 'query: ' for search)",
)
# Ask command
ask_parser = subparsers.add_parser("ask", help="Ask questions")
@@ -296,7 +273,7 @@ Examples:
"--llm",
type=str,
default="ollama",
choices=["simulated", "ollama", "hf", "openai", "anthropic"],
choices=["simulated", "ollama", "hf", "openai"],
help="LLM provider (default: ollama)",
)
ask_parser.add_argument(
@@ -346,7 +323,7 @@ Examples:
"--api-key",
type=str,
default=None,
help="API key for cloud LLM providers (OpenAI, Anthropic)",
help="API key for OpenAI-compatible APIs (defaults to OPENAI_API_KEY)",
)
# List command
@@ -1185,11 +1162,6 @@ Examples:
print(f"Warning: Could not process {file_path}: {e}")
# Load other file types with default reader
# Exclude PDFs from code_extensions if they were already processed separately
other_file_extensions = code_extensions
if should_process_pdfs and ".pdf" in code_extensions:
other_file_extensions = [ext for ext in code_extensions if ext != ".pdf"]
try:
# Create a custom file filter function using our PathSpec
def file_filter(
@@ -1205,19 +1177,15 @@ Examples:
except (ValueError, OSError):
return True # Include files that can't be processed
# Only load other file types if there are extensions to process
if other_file_extensions:
other_docs = SimpleDirectoryReader(
docs_dir,
recursive=True,
encoding="utf-8",
required_exts=other_file_extensions,
file_extractor={}, # Use default extractors
exclude_hidden=not include_hidden,
filename_as_id=True,
).load_data(show_progress=True)
else:
other_docs = []
other_docs = SimpleDirectoryReader(
docs_dir,
recursive=True,
encoding="utf-8",
required_exts=code_extensions,
file_extractor={}, # Use default extractors
exclude_hidden=not include_hidden,
filename_as_id=True,
).load_data(show_progress=True)
# Filter documents after loading based on gitignore rules
filtered_docs = []
@@ -1430,14 +1398,6 @@ Examples:
resolved_embedding_key = resolve_openai_api_key(args.embedding_api_key)
if resolved_embedding_key:
embedding_options["api_key"] = resolved_embedding_key
if args.query_prompt_template:
# New format: separate templates
if args.embedding_prompt_template:
embedding_options["build_prompt_template"] = args.embedding_prompt_template
embedding_options["query_prompt_template"] = args.query_prompt_template
elif args.embedding_prompt_template:
# Old format: single template (backward compat)
embedding_options["prompt_template"] = args.embedding_prompt_template
builder = LeannBuilder(
backend_name=args.backend_name,
@@ -1559,11 +1519,6 @@ Examples:
print("Invalid input. Aborting search.")
return
# Build provider_options for runtime override
provider_options = {}
if args.embedding_prompt_template:
provider_options["prompt_template"] = args.embedding_prompt_template
searcher = LeannSearcher(index_path=index_path)
results = searcher.search(
query,
@@ -1573,7 +1528,6 @@ Examples:
prune_ratio=args.prune_ratio,
recompute_embeddings=args.recompute_embeddings,
pruning_strategy=args.pruning_strategy,
provider_options=provider_options if provider_options else None,
)
print(f"Search results for '{query}' (top {len(results)}):")
@@ -1621,12 +1575,6 @@ Examples:
resolved_api_key = resolve_openai_api_key(args.api_key)
if resolved_api_key:
llm_config["api_key"] = resolved_api_key
elif args.llm == "anthropic":
# For Anthropic, pass base_url and API key if provided
if args.api_base:
llm_config["base_url"] = resolve_anthropic_base_url(args.api_base)
if args.api_key:
llm_config["api_key"] = args.api_key
chat = LeannChat(index_path=index_path, llm_config=llm_config)

View File

@@ -4,10 +4,8 @@ Consolidates all embedding computation logic using SentenceTransformer
Preserves all optimization parameters to ensure performance
"""
import json
import logging
import os
import subprocess
import time
from typing import Any, Optional
@@ -42,11 +40,6 @@ EMBEDDING_MODEL_LIMITS = {
"text-embedding-ada-002": 8192,
}
# Runtime cache for dynamically discovered token limits
# Key: (model_name, base_url), Value: token_limit
# Prevents repeated SDK/API calls for the same model
_token_limit_cache: dict[tuple[str, str], int] = {}
def get_model_token_limit(
model_name: str,
@@ -56,7 +49,6 @@ def get_model_token_limit(
"""
Get token limit for a given embedding model.
Uses hybrid approach: dynamic discovery for Ollama, registry fallback for others.
Caches discovered limits to prevent repeated API/SDK calls.
Args:
model_name: Name of the embedding model
@@ -66,33 +58,12 @@ def get_model_token_limit(
Returns:
Token limit for the model in tokens
"""
# Check cache first to avoid repeated SDK/API calls
cache_key = (model_name, base_url or "")
if cache_key in _token_limit_cache:
cached_limit = _token_limit_cache[cache_key]
logger.debug(f"Using cached token limit for {model_name}: {cached_limit}")
return cached_limit
# Try Ollama dynamic discovery if base_url provided
if base_url:
# Detect Ollama servers by port or "ollama" in URL
if "11434" in base_url or "ollama" in base_url.lower():
limit = _query_ollama_context_limit(model_name, base_url)
if limit:
_token_limit_cache[cache_key] = limit
return limit
# Try LM Studio SDK discovery
if "1234" in base_url or "lmstudio" in base_url.lower() or "lm.studio" in base_url.lower():
# Convert HTTP to WebSocket URL
ws_url = base_url.replace("https://", "wss://").replace("http://", "ws://")
# Remove /v1 suffix if present
if ws_url.endswith("/v1"):
ws_url = ws_url[:-3]
limit = _query_lmstudio_context_limit(model_name, ws_url)
if limit:
_token_limit_cache[cache_key] = limit
return limit
# Fallback to known model registry with version handling (from PR #154)
@@ -101,25 +72,19 @@ def get_model_token_limit(
# Check exact match first
if model_name in EMBEDDING_MODEL_LIMITS:
limit = EMBEDDING_MODEL_LIMITS[model_name]
_token_limit_cache[cache_key] = limit
return limit
return EMBEDDING_MODEL_LIMITS[model_name]
# Check base name match
if base_model_name in EMBEDDING_MODEL_LIMITS:
limit = EMBEDDING_MODEL_LIMITS[base_model_name]
_token_limit_cache[cache_key] = limit
return limit
return EMBEDDING_MODEL_LIMITS[base_model_name]
# Check partial matches for common patterns
for known_model, registry_limit in EMBEDDING_MODEL_LIMITS.items():
for known_model, limit in EMBEDDING_MODEL_LIMITS.items():
if known_model in base_model_name or base_model_name in known_model:
_token_limit_cache[cache_key] = registry_limit
return registry_limit
return limit
# Default fallback
logger.warning(f"Unknown model '{model_name}', using default {default} token limit")
_token_limit_cache[cache_key] = default
return default
@@ -220,91 +185,6 @@ def _query_ollama_context_limit(model_name: str, base_url: str) -> Optional[int]
return None
def _query_lmstudio_context_limit(model_name: str, base_url: str) -> Optional[int]:
"""
Query LM Studio SDK for model context length via Node.js subprocess.
Args:
model_name: Name of the LM Studio model
base_url: Base URL of the LM Studio server (WebSocket format, e.g., "ws://localhost:1234")
Returns:
Context limit in tokens if found, None otherwise
"""
# Inline JavaScript using @lmstudio/sdk
# Note: Load model temporarily for metadata, then unload to respect JIT auto-evict
js_code = f"""
const {{ LMStudioClient }} = require('@lmstudio/sdk');
(async () => {{
try {{
const client = new LMStudioClient({{ baseUrl: '{base_url}' }});
const model = await client.embedding.load('{model_name}', {{ verbose: false }});
const contextLength = await model.getContextLength();
await model.unload(); // Unload immediately to respect JIT auto-evict settings
console.log(JSON.stringify({{ contextLength, identifier: '{model_name}' }}));
}} catch (error) {{
console.error(JSON.stringify({{ error: error.message }}));
process.exit(1);
}}
}})();
"""
try:
# Set NODE_PATH to include global modules for @lmstudio/sdk resolution
env = os.environ.copy()
# Try to get npm global root (works with nvm, brew node, etc.)
try:
npm_root = subprocess.run(
["npm", "root", "-g"],
capture_output=True,
text=True,
timeout=5,
)
if npm_root.returncode == 0:
global_modules = npm_root.stdout.strip()
# Append to existing NODE_PATH if present
existing_node_path = env.get("NODE_PATH", "")
env["NODE_PATH"] = (
f"{global_modules}:{existing_node_path}"
if existing_node_path
else global_modules
)
except Exception:
# If npm not available, continue with existing NODE_PATH
pass
result = subprocess.run(
["node", "-e", js_code],
capture_output=True,
text=True,
timeout=10,
env=env,
)
if result.returncode != 0:
logger.debug(f"LM Studio SDK error: {result.stderr}")
return None
data = json.loads(result.stdout)
context_length = data.get("contextLength")
if context_length and context_length > 0:
logger.info(f"LM Studio SDK detected {model_name} context length: {context_length}")
return context_length
except FileNotFoundError:
logger.debug("Node.js not found - install Node.js for LM Studio SDK features")
except subprocess.TimeoutExpired:
logger.debug("LM Studio SDK query timeout")
except json.JSONDecodeError:
logger.debug("LM Studio SDK returned invalid JSON")
except Exception as e:
logger.debug(f"LM Studio SDK query failed: {e}")
return None
# Global model cache to avoid repeated loading
_model_cache: dict[str, Any] = {}
@@ -352,7 +232,6 @@ def compute_embeddings(
model_name,
base_url=provider_options.get("base_url"),
api_key=provider_options.get("api_key"),
provider_options=provider_options,
)
elif mode == "mlx":
return compute_embeddings_mlx(texts, model_name)
@@ -362,7 +241,6 @@ def compute_embeddings(
model_name,
is_build=is_build,
host=provider_options.get("host"),
provider_options=provider_options,
)
elif mode == "gemini":
return compute_embeddings_gemini(texts, model_name, is_build=is_build)
@@ -451,8 +329,7 @@ def compute_embeddings_sentence_transformers(
# TODO: Haven't tested this yet
torch.set_num_threads(min(8, os.cpu_count() or 4))
try:
# PyTorch's ContextProp type is complex; cast for type checker
torch.backends.mkldnn.enabled = True # type: ignore[assignment]
torch.backends.mkldnn.enabled = True
except AttributeError:
pass
@@ -702,7 +579,6 @@ def compute_embeddings_openai(
model_name: str,
base_url: Optional[str] = None,
api_key: Optional[str] = None,
provider_options: Optional[dict[str, Any]] = None,
) -> np.ndarray:
# TODO: @yichuan-w add progress bar only in build mode
"""Compute embeddings using OpenAI API"""
@@ -721,40 +597,26 @@ def compute_embeddings_openai(
f"Found {invalid_count} empty/invalid text(s) in input. Upstream should filter before calling OpenAI."
)
# Extract base_url and api_key from provider_options if not provided directly
provider_options = provider_options or {}
effective_base_url = base_url or provider_options.get("base_url")
effective_api_key = api_key or provider_options.get("api_key")
resolved_base_url = resolve_openai_base_url(effective_base_url)
resolved_api_key = resolve_openai_api_key(effective_api_key)
resolved_base_url = resolve_openai_base_url(base_url)
resolved_api_key = resolve_openai_api_key(api_key)
if not resolved_api_key:
raise RuntimeError("OPENAI_API_KEY environment variable not set")
# Create OpenAI client
client = openai.OpenAI(api_key=resolved_api_key, base_url=resolved_base_url)
# Cache OpenAI client
cache_key = f"openai_client::{resolved_base_url}"
if cache_key in _model_cache:
client = _model_cache[cache_key]
else:
client = openai.OpenAI(api_key=resolved_api_key, base_url=resolved_base_url)
_model_cache[cache_key] = client
logger.info("OpenAI client cached")
logger.info(
f"Computing embeddings for {len(texts)} texts using OpenAI API, model: '{model_name}'"
)
print(f"len of texts: {len(texts)}")
# Apply prompt template if provided
# Priority: build_prompt_template (new format) > prompt_template (old format)
prompt_template = provider_options.get("build_prompt_template") or provider_options.get(
"prompt_template"
)
if prompt_template:
logger.warning(f"Applying prompt template: '{prompt_template}'")
texts = [f"{prompt_template}{text}" for text in texts]
# Query token limit and apply truncation
token_limit = get_model_token_limit(model_name, base_url=effective_base_url)
logger.info(f"Using token limit: {token_limit} for model '{model_name}'")
texts = truncate_to_token_limit(texts, token_limit)
# OpenAI has limits on batch size and input length
max_batch_size = 800 # Conservative batch size because the token limit is 300K
all_embeddings = []
@@ -785,15 +647,7 @@ def compute_embeddings_openai(
try:
response = client.embeddings.create(model=model_name, input=batch_texts)
batch_embeddings = [embedding.embedding for embedding in response.data]
# Verify we got the expected number of embeddings
if len(batch_embeddings) != len(batch_texts):
logger.warning(
f"Expected {len(batch_texts)} embeddings but got {len(batch_embeddings)}"
)
# Only take the number of embeddings that match the batch size
all_embeddings.extend(batch_embeddings[: len(batch_texts)])
all_embeddings.extend(batch_embeddings)
except Exception as e:
logger.error(f"Batch {i} failed: {e}")
raise
@@ -883,7 +737,6 @@ def compute_embeddings_ollama(
model_name: str,
is_build: bool = False,
host: Optional[str] = None,
provider_options: Optional[dict[str, Any]] = None,
) -> np.ndarray:
"""
Compute embeddings using Ollama API with true batch processing.
@@ -896,7 +749,6 @@ def compute_embeddings_ollama(
model_name: Ollama model name (e.g., "nomic-embed-text", "mxbai-embed-large")
is_build: Whether this is a build operation (shows progress bar)
host: Ollama host URL (defaults to environment or http://localhost:11434)
provider_options: Optional provider-specific options (e.g., prompt_template)
Returns:
Normalized embeddings array, shape: (len(texts), embedding_dim)
@@ -1033,17 +885,6 @@ def compute_embeddings_ollama(
logger.info(f"Using batch size: {batch_size} for true batch processing")
# Apply prompt template if provided
provider_options = provider_options or {}
# Priority: build_prompt_template (new format) > prompt_template (old format)
prompt_template = provider_options.get("build_prompt_template") or provider_options.get(
"prompt_template"
)
if prompt_template:
logger.warning(f"Applying prompt template: '{prompt_template}'")
texts = [f"{prompt_template}{text}" for text in texts]
# Get model token limit and apply truncation before batching
token_limit = get_model_token_limit(model_name, base_url=resolved_host)
logger.info(f"Model '{model_name}' token limit: {token_limit}")

View File

@@ -11,15 +11,14 @@ from pathlib import Path
from typing import Callable, Optional
# Try to import readline with fallback for Windows
HAS_READLINE = False
readline = None # type: ignore[assignment]
try:
import readline # type: ignore[no-redef]
import readline
HAS_READLINE = True
except ImportError:
# Windows doesn't have readline by default
pass
HAS_READLINE = False
readline = None
class InteractiveSession:

View File

@@ -77,7 +77,6 @@ class LeannBackendSearcherInterface(ABC):
query: str,
use_server_if_available: bool = True,
zmq_port: Optional[int] = None,
query_template: Optional[str] = None,
) -> np.ndarray:
"""Compute embedding for a query string
@@ -85,7 +84,6 @@ class LeannBackendSearcherInterface(ABC):
query: The query string to embed
zmq_port: ZMQ port for embedding server
use_server_if_available: Whether to try using embedding server first
query_template: Optional prompt template to prepend to query
Returns:
Query embedding as numpy array with shape (1, D)

View File

@@ -7,7 +7,7 @@ operators for different data types including numbers, strings, booleans, and lis
"""
import logging
from typing import Any, Optional, Union
from typing import Any, Union
logger = logging.getLogger(__name__)
@@ -47,7 +47,7 @@ class MetadataFilterEngine:
}
def apply_filters(
self, search_results: list[dict[str, Any]], metadata_filters: Optional[MetadataFilters]
self, search_results: list[dict[str, Any]], metadata_filters: MetadataFilters
) -> list[dict[str, Any]]:
"""
Apply metadata filters to a list of search results.

View File

@@ -33,8 +33,6 @@ def autodiscover_backends():
discovered_backends = []
for dist in importlib.metadata.distributions():
dist_name = dist.metadata["name"]
if dist_name is None:
continue
if dist_name.startswith("leann-backend-"):
backend_module_name = dist_name.replace("-", "_")
discovered_backends.append(backend_module_name)

View File

@@ -56,9 +56,7 @@ class BaseSearcher(LeannBackendSearcherInterface, ABC):
with open(meta_path, encoding="utf-8") as f:
return json.load(f)
def _ensure_server_running(
self, passages_source_file: str, port: Optional[int], **kwargs
) -> int:
def _ensure_server_running(self, passages_source_file: str, port: int, **kwargs) -> int:
"""
Ensures the embedding server is running if recompute is needed.
This is a helper for subclasses.
@@ -73,23 +71,14 @@ class BaseSearcher(LeannBackendSearcherInterface, ABC):
or "mips"
)
# Filter out ALL prompt templates from provider_options during search
# Templates are applied in compute_query_embedding (line 109-110) BEFORE server call
# The server should never apply templates during search to avoid double-templating
search_provider_options = {
k: v
for k, v in self.embedding_options.items()
if k not in ("build_prompt_template", "query_prompt_template", "prompt_template")
}
server_started, actual_port = self.embedding_server_manager.start_server(
port=port if port is not None else 5557,
port=port,
model_name=self.embedding_model,
embedding_mode=self.embedding_mode,
passages_file=passages_source_file,
distance_metric=distance_metric,
enable_warmup=kwargs.get("enable_warmup", False),
provider_options=search_provider_options,
provider_options=self.embedding_options,
)
if not server_started:
raise RuntimeError(f"Failed to start embedding server on port {actual_port}")
@@ -100,8 +89,7 @@ class BaseSearcher(LeannBackendSearcherInterface, ABC):
self,
query: str,
use_server_if_available: bool = True,
zmq_port: Optional[int] = None,
query_template: Optional[str] = None,
zmq_port: int = 5557,
) -> np.ndarray:
"""
Compute embedding for a query string.
@@ -110,16 +98,10 @@ class BaseSearcher(LeannBackendSearcherInterface, ABC):
query: The query string to embed
zmq_port: ZMQ port for embedding server
use_server_if_available: Whether to try using embedding server first
query_template: Optional prompt template to prepend to query
Returns:
Query embedding as numpy array
"""
# Apply query template BEFORE any computation path
# This ensures template is applied consistently for both server and fallback paths
if query_template:
query = f"{query_template}{query}"
# Try to use embedding server if available and requested
if use_server_if_available:
try:

View File

@@ -9,7 +9,6 @@ from typing import Any
# Default fallbacks to preserve current behaviour while keeping them in one place.
_DEFAULT_OLLAMA_HOST = "http://localhost:11434"
_DEFAULT_OPENAI_BASE_URL = "https://api.openai.com/v1"
_DEFAULT_ANTHROPIC_BASE_URL = "https://api.anthropic.com"
def _clean_url(value: str) -> str:
@@ -53,23 +52,6 @@ def resolve_openai_base_url(explicit: str | None = None) -> str:
return _clean_url(_DEFAULT_OPENAI_BASE_URL)
def resolve_anthropic_base_url(explicit: str | None = None) -> str:
"""Resolve the base URL for Anthropic-compatible services."""
candidates = (
explicit,
os.getenv("LEANN_ANTHROPIC_BASE_URL"),
os.getenv("ANTHROPIC_BASE_URL"),
os.getenv("LOCAL_ANTHROPIC_BASE_URL"),
)
for candidate in candidates:
if candidate:
return _clean_url(candidate)
return _clean_url(_DEFAULT_ANTHROPIC_BASE_URL)
def resolve_openai_api_key(explicit: str | None = None) -> str | None:
"""Resolve the API key for OpenAI-compatible services."""
@@ -79,15 +61,6 @@ def resolve_openai_api_key(explicit: str | None = None) -> str | None:
return os.getenv("OPENAI_API_KEY")
def resolve_anthropic_api_key(explicit: str | None = None) -> str | None:
"""Resolve the API key for Anthropic services."""
if explicit:
return explicit
return os.getenv("ANTHROPIC_API_KEY")
def encode_provider_options(options: dict[str, Any] | None) -> str | None:
"""Serialize provider options for child processes."""

View File

@@ -53,11 +53,6 @@ leann build my-project --docs $(git ls-files)
# Start Claude Code
claude
```
**Performance tip**: For maximum speed when storage space is not a concern, add the `--no-recompute` flag to your build command. This materializes all tensors and stores them on disk, avoiding recomputation on subsequent builds:
```bash
leann build my-project --docs $(git ls-files) --no-recompute
```
## 🚀 Advanced Usage Examples to build the index

View File

@@ -4,10 +4,10 @@ build-backend = "setuptools.build_meta"
[project]
name = "leann"
version = "0.3.5"
version = "0.3.4"
description = "LEANN - The smallest vector index in the world. RAG Everything with LEANN!"
readme = "README.md"
requires-python = ">=3.10"
requires-python = ">=3.9"
license = { text = "MIT" }
authors = [
{ name = "LEANN Team" }
@@ -18,10 +18,10 @@ classifiers = [
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
]
# Default installation: core + hnsw + diskann

View File

@@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "leann-workspace"
version = "0.1.0"
requires-python = ">=3.10"
requires-python = ">=3.9"
dependencies = [
"leann-core",
@@ -69,8 +69,7 @@ diskann = [
# Add a new optional dependency group for document processing
documents = [
"beautifulsoup4>=4.13.0", # For HTML parsing
"python-docx>=0.8.11", # For Word documents (creating/editing)
"docx2txt>=0.9", # For Word documents (text extraction)
"python-docx>=0.8.11", # For Word documents
"openpyxl>=3.1.0", # For Excel files
"pandas>=2.2.0", # For data processing
]
@@ -157,19 +156,6 @@ exclude = ["localhost", "127.0.0.1", "example.com"]
exclude_path = [".git/", ".venv/", "__pycache__/", "third_party/"]
scheme = ["https", "http"]
[tool.ty]
# Type checking with ty (Astral's fast Python type checker)
# ty is 10-100x faster than mypy. See: https://docs.astral.sh/ty/
[tool.ty.environment]
python-version = "3.11"
extra-paths = ["apps", "packages/leann-core/src"]
[tool.ty.rules]
# Disable some noisy rules that have many false positives
possibly-missing-attribute = "ignore"
unresolved-import = "ignore" # Many optional dependencies
[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py"]
@@ -178,7 +164,6 @@ python_functions = ["test_*"]
markers = [
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
"openai: marks tests that require OpenAI API key",
"integration: marks tests that require live services (Ollama, LM Studio, etc.)",
]
timeout = 300 # Reduced from 600s (10min) to 300s (5min) for CI safety
addopts = [

View File

@@ -36,14 +36,6 @@ Tests DiskANN graph partitioning functionality:
- Includes performance comparison between DiskANN (with partition) and HNSW
- **Note**: These tests are skipped in CI due to hardware requirements and computation time
### `test_prompt_template_e2e.py`
Integration tests for prompt template feature with live embedding services:
- Tests prompt template prepending with EmbeddingGemma (OpenAI-compatible API via LM Studio)
- Tests hybrid token limit discovery (Ollama dynamic detection, registry fallback, default)
- Tests LM Studio SDK bridge for automatic context length detection (requires Node.js + @lmstudio/sdk)
- **Note**: These tests require live services (LM Studio, Ollama) and are marked with `@pytest.mark.integration`
- **Important**: Prompt templates are ONLY for EmbeddingGemma and similar task-specific models, NOT regular embedding models
## Running Tests
### Install test dependencies:
@@ -74,12 +66,6 @@ pytest tests/ -m "not openai"
# Skip slow tests
pytest tests/ -m "not slow"
# Skip integration tests (that require live services)
pytest tests/ -m "not integration"
# Run only integration tests (requires LM Studio or Ollama running)
pytest tests/test_prompt_template_e2e.py -v -s
# Run DiskANN partition tests (requires local machine, not CI)
pytest tests/test_diskann_partition.py
```
@@ -115,20 +101,6 @@ The `pytest.ini` file configures:
- Custom markers for slow and OpenAI tests
- Verbose output with short tracebacks
### Integration Test Prerequisites
Integration tests (`test_prompt_template_e2e.py`) require live services:
**Required:**
- LM Studio running at `http://localhost:1234` with EmbeddingGemma model loaded
**Optional:**
- Ollama running at `http://localhost:11434` for token limit detection tests
- Node.js + @lmstudio/sdk installed (`npm install -g @lmstudio/sdk`) for SDK bridge tests
Tests gracefully skip if services are unavailable.
### Known Issues
- OpenAI tests are automatically skipped if no API key is provided
- Integration tests require live embedding services and may fail due to proxy settings (set `unset ALL_PROXY all_proxy` if needed)

View File

@@ -91,7 +91,7 @@ def test_large_index():
builder.build_index(index_path)
searcher = LeannSearcher(index_path)
results = searcher.search("word10 word20", top_k=10)
assert len(results) == 10
results = searcher.search(["word10 word20"], top_k=10)
assert len(results[0]) == 10
# Cleanup
searcher.cleanup()

View File

@@ -1,533 +0,0 @@
"""
Tests for CLI argument integration of --embedding-prompt-template.
These tests verify that:
1. The --embedding-prompt-template flag is properly registered on build and search commands
2. The template value flows from CLI args to embedding_options dict
3. The template is passed through to compute_embeddings() function
4. Default behavior (no flag) is handled correctly
"""
from unittest.mock import Mock, patch
from leann.cli import LeannCLI
class TestCLIPromptTemplateArgument:
"""Tests for --embedding-prompt-template on build and search commands."""
def test_commands_accept_prompt_template_argument(self):
"""Verify that build and search parsers accept --embedding-prompt-template flag."""
cli = LeannCLI()
parser = cli.create_parser()
template_value = "search_query: "
# Test build command
build_args = parser.parse_args(
[
"build",
"test-index",
"--docs",
"/tmp/test-docs",
"--embedding-prompt-template",
template_value,
]
)
assert build_args.command == "build"
assert hasattr(build_args, "embedding_prompt_template"), (
"build command should have embedding_prompt_template attribute"
)
assert build_args.embedding_prompt_template == template_value
# Test search command
search_args = parser.parse_args(
["search", "test-index", "my query", "--embedding-prompt-template", template_value]
)
assert search_args.command == "search"
assert hasattr(search_args, "embedding_prompt_template"), (
"search command should have embedding_prompt_template attribute"
)
assert search_args.embedding_prompt_template == template_value
def test_commands_default_to_none(self):
"""Verify default value is None when flag not provided (backward compatibility)."""
cli = LeannCLI()
parser = cli.create_parser()
# Test build command default
build_args = parser.parse_args(["build", "test-index", "--docs", "/tmp/test-docs"])
assert hasattr(build_args, "embedding_prompt_template"), (
"build command should have embedding_prompt_template attribute"
)
assert build_args.embedding_prompt_template is None, (
"Build default value should be None when flag not provided"
)
# Test search command default
search_args = parser.parse_args(["search", "test-index", "my query"])
assert hasattr(search_args, "embedding_prompt_template"), (
"search command should have embedding_prompt_template attribute"
)
assert search_args.embedding_prompt_template is None, (
"Search default value should be None when flag not provided"
)
class TestBuildCommandPromptTemplateArgumentExtras:
"""Additional build-specific tests for prompt template argument."""
def test_build_command_prompt_template_with_multiword_value(self):
"""
Verify that template values with spaces are handled correctly.
Templates like "search_document: " or "Represent this sentence for searching: "
should be accepted as a single string argument.
"""
cli = LeannCLI()
parser = cli.create_parser()
template = "Represent this sentence for searching: "
args = parser.parse_args(
[
"build",
"test-index",
"--docs",
"/tmp/test-docs",
"--embedding-prompt-template",
template,
]
)
assert args.embedding_prompt_template == template
class TestPromptTemplateStoredInEmbeddingOptions:
"""Tests for template storage in embedding_options dict."""
@patch("leann.cli.LeannBuilder")
def test_prompt_template_stored_in_embedding_options_on_build(
self, mock_builder_class, tmp_path
):
"""
Verify that when --embedding-prompt-template is provided to build command,
the value is stored in embedding_options dict passed to LeannBuilder.
This test will fail because the CLI doesn't currently process this argument
and add it to embedding_options.
"""
# Setup mocks
mock_builder = Mock()
mock_builder_class.return_value = mock_builder
# Create CLI and run build command
cli = LeannCLI()
# Mock load_documents to return a document so builder is created
cli.load_documents = Mock(return_value=[{"text": "test content", "metadata": {}}]) # type: ignore[assignment]
parser = cli.create_parser()
template = "search_query: "
args = parser.parse_args(
[
"build",
"test-index",
"--docs",
str(tmp_path),
"--embedding-prompt-template",
template,
"--force", # Force rebuild to ensure LeannBuilder is called
]
)
# Run the build command
import asyncio
asyncio.run(cli.build_index(args))
# Check that LeannBuilder was called with embedding_options containing prompt_template
call_kwargs = mock_builder_class.call_args.kwargs
assert "embedding_options" in call_kwargs, "LeannBuilder should receive embedding_options"
embedding_options = call_kwargs["embedding_options"]
assert embedding_options is not None, (
"embedding_options should not be None when template provided"
)
assert "prompt_template" in embedding_options, (
"embedding_options should contain 'prompt_template' key"
)
assert embedding_options["prompt_template"] == template, (
f"Template should be '{template}', got {embedding_options.get('prompt_template')}"
)
@patch("leann.cli.LeannBuilder")
def test_prompt_template_not_in_options_when_not_provided(self, mock_builder_class, tmp_path):
"""
Verify that when --embedding-prompt-template is NOT provided,
embedding_options either doesn't have the key or it's None.
This ensures we don't pass empty/None values unnecessarily.
"""
# Setup mocks
mock_builder = Mock()
mock_builder_class.return_value = mock_builder
cli = LeannCLI()
# Mock load_documents to return a document so builder is created
cli.load_documents = Mock(return_value=[{"text": "test content", "metadata": {}}]) # type: ignore[assignment]
parser = cli.create_parser()
args = parser.parse_args(
[
"build",
"test-index",
"--docs",
str(tmp_path),
"--force", # Force rebuild to ensure LeannBuilder is called
]
)
import asyncio
asyncio.run(cli.build_index(args))
# Check that if embedding_options is passed, it doesn't have prompt_template
call_kwargs = mock_builder_class.call_args.kwargs
if call_kwargs.get("embedding_options"):
embedding_options = call_kwargs["embedding_options"]
# Either the key shouldn't exist, or it should be None
assert (
"prompt_template" not in embedding_options
or embedding_options["prompt_template"] is None
), "prompt_template should not be set when flag not provided"
# R1 Tests: Build-time separate template storage
@patch("leann.cli.LeannBuilder")
def test_build_stores_separate_templates(self, mock_builder_class, tmp_path):
"""
R1 Test 1: Verify that when both --embedding-prompt-template and
--query-prompt-template are provided to build command, both values
are stored separately in embedding_options dict as build_prompt_template
and query_prompt_template.
This test will fail because:
1. CLI doesn't accept --query-prompt-template flag yet
2. CLI doesn't store templates as separate build_prompt_template and
query_prompt_template keys
Expected behavior after implementation:
- .meta.json contains: {"embedding_options": {
"build_prompt_template": "doc: ",
"query_prompt_template": "query: "
}}
"""
# Setup mocks
mock_builder = Mock()
mock_builder_class.return_value = mock_builder
cli = LeannCLI()
# Mock load_documents to return a document so builder is created
cli.load_documents = Mock(return_value=[{"text": "test content", "metadata": {}}]) # type: ignore[assignment]
parser = cli.create_parser()
build_template = "doc: "
query_template = "query: "
args = parser.parse_args(
[
"build",
"test-index",
"--docs",
str(tmp_path),
"--embedding-prompt-template",
build_template,
"--query-prompt-template",
query_template,
"--force",
]
)
# Run the build command
import asyncio
asyncio.run(cli.build_index(args))
# Check that LeannBuilder was called with separate template keys
call_kwargs = mock_builder_class.call_args.kwargs
assert "embedding_options" in call_kwargs, "LeannBuilder should receive embedding_options"
embedding_options = call_kwargs["embedding_options"]
assert embedding_options is not None, (
"embedding_options should not be None when templates provided"
)
assert "build_prompt_template" in embedding_options, (
"embedding_options should contain 'build_prompt_template' key"
)
assert embedding_options["build_prompt_template"] == build_template, (
f"build_prompt_template should be '{build_template}'"
)
assert "query_prompt_template" in embedding_options, (
"embedding_options should contain 'query_prompt_template' key"
)
assert embedding_options["query_prompt_template"] == query_template, (
f"query_prompt_template should be '{query_template}'"
)
# Old key should NOT be present when using new separate template format
assert "prompt_template" not in embedding_options, (
"Old 'prompt_template' key should not be present with separate templates"
)
@patch("leann.cli.LeannBuilder")
def test_build_backward_compat_single_template(self, mock_builder_class, tmp_path):
"""
R1 Test 2: Verify backward compatibility - when only
--embedding-prompt-template is provided (old behavior), it should
still be stored as 'prompt_template' in embedding_options.
This ensures existing workflows continue to work unchanged.
This test currently passes because it matches existing behavior, but it
documents the requirement that this behavior must be preserved after
implementing the separate template feature.
Expected behavior:
- .meta.json contains: {"embedding_options": {"prompt_template": "prompt: "}}
- No build_prompt_template or query_prompt_template keys
"""
# Setup mocks
mock_builder = Mock()
mock_builder_class.return_value = mock_builder
cli = LeannCLI()
# Mock load_documents to return a document so builder is created
cli.load_documents = Mock(return_value=[{"text": "test content", "metadata": {}}]) # type: ignore[assignment]
parser = cli.create_parser()
template = "prompt: "
args = parser.parse_args(
[
"build",
"test-index",
"--docs",
str(tmp_path),
"--embedding-prompt-template",
template,
"--force",
]
)
# Run the build command
import asyncio
asyncio.run(cli.build_index(args))
# Check that LeannBuilder was called with old format
call_kwargs = mock_builder_class.call_args.kwargs
assert "embedding_options" in call_kwargs, "LeannBuilder should receive embedding_options"
embedding_options = call_kwargs["embedding_options"]
assert embedding_options is not None, (
"embedding_options should not be None when template provided"
)
assert "prompt_template" in embedding_options, (
"embedding_options should contain old 'prompt_template' key for backward compat"
)
assert embedding_options["prompt_template"] == template, (
f"prompt_template should be '{template}'"
)
# New keys should NOT be present in backward compat mode
assert "build_prompt_template" not in embedding_options, (
"build_prompt_template should not be present with single template flag"
)
assert "query_prompt_template" not in embedding_options, (
"query_prompt_template should not be present with single template flag"
)
@patch("leann.cli.LeannBuilder")
def test_build_no_templates(self, mock_builder_class, tmp_path):
"""
R1 Test 3: Verify that when no template flags are provided,
embedding_options has no prompt template keys.
This ensures clean defaults and no unnecessary keys in .meta.json.
This test currently passes because it matches existing behavior, but it
documents the requirement that this behavior must be preserved after
implementing the separate template feature.
Expected behavior:
- .meta.json has no prompt_template, build_prompt_template, or
query_prompt_template keys (or embedding_options is empty/None)
"""
# Setup mocks
mock_builder = Mock()
mock_builder_class.return_value = mock_builder
cli = LeannCLI()
# Mock load_documents to return a document so builder is created
cli.load_documents = Mock(return_value=[{"text": "test content", "metadata": {}}]) # type: ignore[assignment]
parser = cli.create_parser()
args = parser.parse_args(["build", "test-index", "--docs", str(tmp_path), "--force"])
# Run the build command
import asyncio
asyncio.run(cli.build_index(args))
# Check that no template keys are present
call_kwargs = mock_builder_class.call_args.kwargs
if call_kwargs.get("embedding_options"):
embedding_options = call_kwargs["embedding_options"]
# None of the template keys should be present
assert "prompt_template" not in embedding_options, (
"prompt_template should not be present when no flags provided"
)
assert "build_prompt_template" not in embedding_options, (
"build_prompt_template should not be present when no flags provided"
)
assert "query_prompt_template" not in embedding_options, (
"query_prompt_template should not be present when no flags provided"
)
class TestPromptTemplateFlowsToComputeEmbeddings:
"""Tests for template flowing through to compute_embeddings function."""
@patch("leann.api.compute_embeddings")
def test_prompt_template_flows_to_compute_embeddings_via_provider_options(
self, mock_compute_embeddings, tmp_path
):
"""
Verify that the prompt template flows from CLI args through LeannBuilder
to compute_embeddings() function via provider_options parameter.
This is an integration test that verifies the complete flow:
CLI → embedding_options → LeannBuilder → compute_embeddings(provider_options)
This test will fail because:
1. CLI doesn't capture the argument yet
2. embedding_options doesn't include prompt_template
3. LeannBuilder doesn't pass it through to compute_embeddings
"""
# Mock compute_embeddings to return dummy embeddings as numpy array
import numpy as np
mock_compute_embeddings.return_value = np.array([[0.1, 0.2, 0.3]], dtype=np.float32)
# Use real LeannBuilder (not mocked) to test the actual flow
cli = LeannCLI()
# Mock load_documents to return a simple document
cli.load_documents = Mock(return_value=[{"text": "test content", "metadata": {}}]) # type: ignore[assignment]
parser = cli.create_parser()
template = "search_document: "
args = parser.parse_args(
[
"build",
"test-index",
"--docs",
str(tmp_path),
"--embedding-prompt-template",
template,
"--backend-name",
"hnsw", # Use hnsw backend
"--force", # Force rebuild to ensure index is created
]
)
# This should fail because the flow isn't implemented yet
import asyncio
asyncio.run(cli.build_index(args))
# Verify compute_embeddings was called with provider_options containing prompt_template
assert mock_compute_embeddings.called, "compute_embeddings should have been called"
# Check the call arguments
call_kwargs = mock_compute_embeddings.call_args.kwargs
assert "provider_options" in call_kwargs, (
"compute_embeddings should receive provider_options parameter"
)
provider_options = call_kwargs["provider_options"]
assert provider_options is not None, "provider_options should not be None"
assert "prompt_template" in provider_options, (
"provider_options should contain prompt_template key"
)
assert provider_options["prompt_template"] == template, (
f"Template should be '{template}', got {provider_options.get('prompt_template')}"
)
class TestPromptTemplateArgumentHelp:
"""Tests for argument help text and documentation."""
def test_build_command_prompt_template_has_help_text(self):
"""
Verify that --embedding-prompt-template has descriptive help text.
Good help text is crucial for CLI usability.
"""
cli = LeannCLI()
parser = cli.create_parser()
# Get the build subparser
# This is a bit tricky - we need to parse to get the help
# We'll check that the help includes relevant keywords
import io
from contextlib import redirect_stdout
f = io.StringIO()
try:
with redirect_stdout(f):
parser.parse_args(["build", "--help"])
except SystemExit:
pass # --help causes sys.exit(0)
help_text = f.getvalue()
assert "--embedding-prompt-template" in help_text, (
"Help text should mention --embedding-prompt-template"
)
# Check for keywords that should be in the help
help_lower = help_text.lower()
assert any(keyword in help_lower for keyword in ["template", "prompt", "prepend"]), (
"Help text should explain what the prompt template does"
)
def test_search_command_prompt_template_has_help_text(self):
"""
Verify that search command also has help text for --embedding-prompt-template.
"""
cli = LeannCLI()
parser = cli.create_parser()
import io
from contextlib import redirect_stdout
f = io.StringIO()
try:
with redirect_stdout(f):
parser.parse_args(["search", "--help"])
except SystemExit:
pass # --help causes sys.exit(0)
help_text = f.getvalue()
assert "--embedding-prompt-template" in help_text, (
"Search help text should mention --embedding-prompt-template"
)

View File

@@ -1,281 +0,0 @@
"""Unit tests for prompt template prepending in OpenAI embeddings.
This test suite defines the contract for prompt template functionality that allows
users to prepend a consistent prompt to all embedding inputs. These tests verify:
1. Template prepending to all input texts before embedding computation
2. Graceful handling of None/missing provider_options
3. Empty string template behavior (no-op)
4. Logging of template application for observability
5. Template application before token truncation
All tests are written in Red Phase - they should FAIL initially because the
implementation does not exist yet.
"""
from unittest.mock import MagicMock, Mock, patch
import numpy as np
import pytest
from leann.embedding_compute import compute_embeddings_openai
class TestPromptTemplatePrepending:
"""Tests for prompt template prepending in compute_embeddings_openai."""
@pytest.fixture
def mock_openai_client(self):
"""Create mock OpenAI client that captures input texts."""
mock_client = MagicMock()
# Mock the embeddings.create response
mock_response = Mock()
mock_response.data = [
Mock(embedding=[0.1, 0.2, 0.3]),
Mock(embedding=[0.4, 0.5, 0.6]),
]
mock_client.embeddings.create.return_value = mock_response
return mock_client
@pytest.fixture
def mock_openai_module(self, mock_openai_client, monkeypatch):
"""Mock the openai module to return our mock client."""
# Mock the API key environment variable
monkeypatch.setenv("OPENAI_API_KEY", "fake-test-key-for-mocking")
# openai is imported inside the function, so we need to patch it there
with patch("openai.OpenAI", return_value=mock_openai_client) as mock_openai:
yield mock_openai
def test_prompt_template_prepended_to_all_texts(self, mock_openai_module, mock_openai_client):
"""Verify template is prepended to all input texts.
When provider_options contains "prompt_template", that template should
be prepended to every text in the input list before sending to OpenAI API.
This is the core functionality: the template acts as a consistent prefix
that provides context or instruction for the embedding model.
"""
texts = ["First document", "Second document"]
template = "search_document: "
provider_options = {"prompt_template": template}
# Call compute_embeddings_openai with provider_options
result = compute_embeddings_openai(
texts=texts,
model_name="text-embedding-3-small",
provider_options=provider_options,
)
# Verify embeddings.create was called with templated texts
mock_openai_client.embeddings.create.assert_called_once()
call_args = mock_openai_client.embeddings.create.call_args
# Extract the input texts sent to API
sent_texts = call_args.kwargs["input"]
# Verify template was prepended to all texts
assert len(sent_texts) == 2, "Should send same number of texts"
assert sent_texts[0] == "search_document: First document", (
"Template should be prepended to first text"
)
assert sent_texts[1] == "search_document: Second document", (
"Template should be prepended to second text"
)
# Verify result is valid embeddings array
assert isinstance(result, np.ndarray)
assert result.shape == (2, 3), "Should return correct shape"
def test_template_not_applied_when_missing_or_empty(
self, mock_openai_module, mock_openai_client
):
"""Verify template not applied when provider_options is None, missing key, or empty string.
This consolidated test covers three scenarios where templates should NOT be applied:
1. provider_options is None (default behavior)
2. provider_options exists but missing 'prompt_template' key
3. prompt_template is explicitly set to empty string ""
In all cases, texts should be sent to the API unchanged.
"""
# Scenario 1: None provider_options
texts = ["Original text one", "Original text two"]
result = compute_embeddings_openai(
texts=texts,
model_name="text-embedding-3-small",
provider_options=None,
)
call_args = mock_openai_client.embeddings.create.call_args
sent_texts = call_args.kwargs["input"]
assert sent_texts[0] == "Original text one", (
"Text should be unchanged with None provider_options"
)
assert sent_texts[1] == "Original text two"
assert isinstance(result, np.ndarray)
assert result.shape == (2, 3)
# Reset mock for next scenario
mock_openai_client.reset_mock()
mock_response = Mock()
mock_response.data = [
Mock(embedding=[0.1, 0.2, 0.3]),
Mock(embedding=[0.4, 0.5, 0.6]),
]
mock_openai_client.embeddings.create.return_value = mock_response
# Scenario 2: Missing 'prompt_template' key
texts = ["Text without template", "Another text"]
provider_options = {"base_url": "https://api.openai.com/v1"}
result = compute_embeddings_openai(
texts=texts,
model_name="text-embedding-3-small",
provider_options=provider_options,
)
call_args = mock_openai_client.embeddings.create.call_args
sent_texts = call_args.kwargs["input"]
assert sent_texts[0] == "Text without template", "Text should be unchanged with missing key"
assert sent_texts[1] == "Another text"
assert isinstance(result, np.ndarray)
# Reset mock for next scenario
mock_openai_client.reset_mock()
mock_openai_client.embeddings.create.return_value = mock_response
# Scenario 3: Empty string template
texts = ["Text one", "Text two"]
provider_options = {"prompt_template": ""}
result = compute_embeddings_openai(
texts=texts,
model_name="text-embedding-3-small",
provider_options=provider_options,
)
call_args = mock_openai_client.embeddings.create.call_args
sent_texts = call_args.kwargs["input"]
assert sent_texts[0] == "Text one", "Empty template should not modify text"
assert sent_texts[1] == "Text two"
assert isinstance(result, np.ndarray)
def test_prompt_template_with_multiple_batches(self, mock_openai_module, mock_openai_client):
"""Verify template is prepended in all batches when texts exceed batch size.
OpenAI API has batch size limits. When input texts are split into
multiple batches, the template should be prepended to texts in every batch.
This ensures consistency across all API calls.
"""
# Create many texts that will be split into multiple batches
texts = [f"Document {i}" for i in range(1000)]
template = "passage: "
provider_options = {"prompt_template": template}
# Mock multiple batch responses
mock_response = Mock()
mock_response.data = [Mock(embedding=[0.1, 0.2, 0.3]) for _ in range(1000)]
mock_openai_client.embeddings.create.return_value = mock_response
result = compute_embeddings_openai(
texts=texts,
model_name="text-embedding-3-small",
provider_options=provider_options,
)
# Verify embeddings.create was called multiple times (batching)
assert mock_openai_client.embeddings.create.call_count >= 2, (
"Should make multiple API calls for large text list"
)
# Verify template was prepended in ALL batches
for call in mock_openai_client.embeddings.create.call_args_list:
sent_texts = call.kwargs["input"]
for text in sent_texts:
assert text.startswith(template), (
f"All texts in all batches should start with template. Got: {text}"
)
# Verify result shape
assert result.shape[0] == 1000, "Should return embeddings for all texts"
def test_prompt_template_with_special_characters(self, mock_openai_module, mock_openai_client):
"""Verify template with special characters is handled correctly.
Templates may contain special characters, Unicode, newlines, etc.
These should all be prepended correctly without encoding issues.
"""
texts = ["Document content"]
# Template with various special characters
template = "🔍 Search query [EN]: "
provider_options = {"prompt_template": template}
result = compute_embeddings_openai(
texts=texts,
model_name="text-embedding-3-small",
provider_options=provider_options,
)
# Verify special characters in template were preserved
call_args = mock_openai_client.embeddings.create.call_args
sent_texts = call_args.kwargs["input"]
assert sent_texts[0] == "🔍 Search query [EN]: Document content", (
"Special characters in template should be preserved"
)
assert isinstance(result, np.ndarray)
def test_prompt_template_integration_with_existing_validation(
self, mock_openai_module, mock_openai_client
):
"""Verify template works with existing input validation.
compute_embeddings_openai has validation for empty texts and whitespace.
Template prepending should happen AFTER validation, so validation errors
are thrown based on original texts, not templated texts.
This ensures users get clear error messages about their input.
"""
# Empty text should still raise ValueError even with template
texts = [""]
provider_options = {"prompt_template": "prefix: "}
with pytest.raises(ValueError, match="empty/invalid"):
compute_embeddings_openai(
texts=texts,
model_name="text-embedding-3-small",
provider_options=provider_options,
)
def test_prompt_template_with_api_key_and_base_url(
self, mock_openai_module, mock_openai_client
):
"""Verify template works alongside other provider_options.
provider_options may contain multiple settings: prompt_template,
base_url, api_key. All should work together correctly.
"""
texts = ["Test document"]
provider_options = {
"prompt_template": "embed: ",
"base_url": "https://custom.api.com/v1",
"api_key": "test-key-123",
}
result = compute_embeddings_openai(
texts=texts,
model_name="text-embedding-3-small",
provider_options=provider_options,
)
# Verify template was applied
call_args = mock_openai_client.embeddings.create.call_args
sent_texts = call_args.kwargs["input"]
assert sent_texts[0] == "embed: Test document"
# Verify OpenAI client was created with correct base_url
mock_openai_module.assert_called()
client_init_kwargs = mock_openai_module.call_args.kwargs
assert client_init_kwargs["base_url"] == "https://custom.api.com/v1"
assert client_init_kwargs["api_key"] == "test-key-123"
assert isinstance(result, np.ndarray)

View File

@@ -1,315 +0,0 @@
"""Unit tests for LM Studio TypeScript SDK bridge functionality.
This test suite defines the contract for the LM Studio SDK bridge that queries
model context length via Node.js subprocess. These tests verify:
1. Successful SDK query returns context length
2. Graceful fallback when Node.js not installed (FileNotFoundError)
3. Graceful fallback when SDK not installed (npm error)
4. Timeout handling (subprocess.TimeoutExpired)
5. Invalid JSON response handling
All tests are written in Red Phase - they should FAIL initially because the
`_query_lmstudio_context_limit` function does not exist yet.
The function contract:
- Inputs: model_name (str), base_url (str, WebSocket format "ws://localhost:1234")
- Outputs: context_length (int) or None on error
- Requirements:
1. Call Node.js with inline JavaScript using @lmstudio/sdk
2. 10-second timeout (accounts for Node.js startup)
3. Graceful fallback on any error (returns None, doesn't raise)
4. Parse JSON response with contextLength field
5. Log errors at debug level (not warning/error)
"""
import subprocess
from unittest.mock import Mock
import pytest
# Try to import the function - if it doesn't exist, tests will fail as expected
try:
from leann.embedding_compute import _query_lmstudio_context_limit
except ImportError:
# Function doesn't exist yet (Red Phase) - create a placeholder that will fail
def _query_lmstudio_context_limit(*args, **kwargs):
raise NotImplementedError(
"_query_lmstudio_context_limit not implemented yet - this is the Red Phase"
)
class TestLMStudioBridge:
"""Tests for LM Studio TypeScript SDK bridge integration."""
def test_query_lmstudio_success(self, monkeypatch):
"""Verify successful SDK query returns context length.
When the Node.js subprocess successfully queries the LM Studio SDK,
it should return a JSON response with contextLength field. The function
should parse this and return the integer context length.
"""
def mock_run(*args, **kwargs):
# Verify timeout is set to 10 seconds
assert kwargs.get("timeout") == 10, "Should use 10-second timeout for Node.js startup"
# Verify capture_output and text=True are set
assert kwargs.get("capture_output") is True, "Should capture stdout/stderr"
assert kwargs.get("text") is True, "Should decode output as text"
# Return successful JSON response
mock_result = Mock()
mock_result.returncode = 0
mock_result.stdout = '{"contextLength": 8192, "identifier": "custom-model"}'
mock_result.stderr = ""
return mock_result
monkeypatch.setattr("subprocess.run", mock_run)
# Test with typical LM Studio model
limit = _query_lmstudio_context_limit(
model_name="custom-model", base_url="ws://localhost:1234"
)
assert limit == 8192, "Should return context length from SDK response"
def test_query_lmstudio_nodejs_not_found(self, monkeypatch):
"""Verify graceful fallback when Node.js not installed.
When Node.js is not installed, subprocess.run will raise FileNotFoundError.
The function should catch this and return None (graceful fallback to registry).
"""
def mock_run(*args, **kwargs):
raise FileNotFoundError("node: command not found")
monkeypatch.setattr("subprocess.run", mock_run)
limit = _query_lmstudio_context_limit(
model_name="custom-model", base_url="ws://localhost:1234"
)
assert limit is None, "Should return None when Node.js not installed"
def test_query_lmstudio_sdk_not_installed(self, monkeypatch):
"""Verify graceful fallback when @lmstudio/sdk not installed.
When the SDK npm package is not installed, Node.js will return non-zero
exit code with error message in stderr. The function should detect this
and return None.
"""
def mock_run(*args, **kwargs):
mock_result = Mock()
mock_result.returncode = 1
mock_result.stdout = ""
mock_result.stderr = (
"Error: Cannot find module '@lmstudio/sdk'\nRequire stack:\n- /path/to/script.js"
)
return mock_result
monkeypatch.setattr("subprocess.run", mock_run)
limit = _query_lmstudio_context_limit(
model_name="custom-model", base_url="ws://localhost:1234"
)
assert limit is None, "Should return None when SDK not installed"
def test_query_lmstudio_timeout(self, monkeypatch):
"""Verify graceful fallback when subprocess times out.
When the Node.js process takes longer than 10 seconds (e.g., LM Studio
not responding), subprocess.TimeoutExpired should be raised. The function
should catch this and return None.
"""
def mock_run(*args, **kwargs):
raise subprocess.TimeoutExpired(cmd=["node", "lmstudio_bridge.js"], timeout=10)
monkeypatch.setattr("subprocess.run", mock_run)
limit = _query_lmstudio_context_limit(
model_name="custom-model", base_url="ws://localhost:1234"
)
assert limit is None, "Should return None on timeout"
def test_query_lmstudio_invalid_json(self, monkeypatch):
"""Verify graceful fallback when response is invalid JSON.
When the subprocess returns malformed JSON (e.g., due to SDK error),
json.loads will raise ValueError/JSONDecodeError. The function should
catch this and return None.
"""
def mock_run(*args, **kwargs):
mock_result = Mock()
mock_result.returncode = 0
mock_result.stdout = "This is not valid JSON"
mock_result.stderr = ""
return mock_result
monkeypatch.setattr("subprocess.run", mock_run)
limit = _query_lmstudio_context_limit(
model_name="custom-model", base_url="ws://localhost:1234"
)
assert limit is None, "Should return None when JSON parsing fails"
def test_query_lmstudio_missing_context_length_field(self, monkeypatch):
"""Verify graceful fallback when JSON lacks contextLength field.
When the SDK returns valid JSON but without the expected contextLength
field (e.g., error response), the function should return None.
"""
def mock_run(*args, **kwargs):
mock_result = Mock()
mock_result.returncode = 0
mock_result.stdout = '{"identifier": "test-model", "error": "Model not found"}'
mock_result.stderr = ""
return mock_result
monkeypatch.setattr("subprocess.run", mock_run)
limit = _query_lmstudio_context_limit(
model_name="nonexistent-model", base_url="ws://localhost:1234"
)
assert limit is None, "Should return None when contextLength field missing"
def test_query_lmstudio_null_context_length(self, monkeypatch):
"""Verify graceful fallback when contextLength is null.
When the SDK returns contextLength: null (model couldn't be loaded),
the function should return None for registry fallback.
"""
def mock_run(*args, **kwargs):
mock_result = Mock()
mock_result.returncode = 0
mock_result.stdout = '{"contextLength": null, "identifier": "test-model"}'
mock_result.stderr = ""
return mock_result
monkeypatch.setattr("subprocess.run", mock_run)
limit = _query_lmstudio_context_limit(
model_name="test-model", base_url="ws://localhost:1234"
)
assert limit is None, "Should return None when contextLength is null"
def test_query_lmstudio_zero_context_length(self, monkeypatch):
"""Verify graceful fallback when contextLength is zero.
When the SDK returns contextLength: 0 (invalid value), the function
should return None to trigger registry fallback.
"""
def mock_run(*args, **kwargs):
mock_result = Mock()
mock_result.returncode = 0
mock_result.stdout = '{"contextLength": 0, "identifier": "test-model"}'
mock_result.stderr = ""
return mock_result
monkeypatch.setattr("subprocess.run", mock_run)
limit = _query_lmstudio_context_limit(
model_name="test-model", base_url="ws://localhost:1234"
)
assert limit is None, "Should return None when contextLength is zero"
def test_query_lmstudio_with_custom_port(self, monkeypatch):
"""Verify SDK query works with non-default WebSocket port.
LM Studio can run on custom ports. The function should pass the
provided base_url to the Node.js subprocess.
"""
def mock_run(*args, **kwargs):
# Verify the base_url argument is passed correctly
command = args[0] if args else kwargs.get("args", [])
assert "ws://localhost:8080" in " ".join(command), (
"Should pass custom port to subprocess"
)
mock_result = Mock()
mock_result.returncode = 0
mock_result.stdout = '{"contextLength": 4096, "identifier": "custom-model"}'
mock_result.stderr = ""
return mock_result
monkeypatch.setattr("subprocess.run", mock_run)
limit = _query_lmstudio_context_limit(
model_name="custom-model", base_url="ws://localhost:8080"
)
assert limit == 4096, "Should work with custom WebSocket port"
@pytest.mark.parametrize(
"context_length,expected",
[
(512, 512), # Small context
(2048, 2048), # Common context
(8192, 8192), # Large context
(32768, 32768), # Very large context
],
)
def test_query_lmstudio_various_context_lengths(self, monkeypatch, context_length, expected):
"""Verify SDK query handles various context length values.
Different models have different context lengths. The function should
correctly parse and return any positive integer value.
"""
def mock_run(*args, **kwargs):
mock_result = Mock()
mock_result.returncode = 0
mock_result.stdout = f'{{"contextLength": {context_length}, "identifier": "test"}}'
mock_result.stderr = ""
return mock_result
monkeypatch.setattr("subprocess.run", mock_run)
limit = _query_lmstudio_context_limit(
model_name="test-model", base_url="ws://localhost:1234"
)
assert limit == expected, f"Should return {expected} for context length {context_length}"
def test_query_lmstudio_logs_at_debug_level(self, monkeypatch, caplog):
"""Verify errors are logged at DEBUG level, not WARNING/ERROR.
Following the graceful fallback pattern from Ollama implementation,
errors should be logged at debug level to avoid alarming users when
fallback to registry works fine.
"""
import logging
caplog.set_level(logging.DEBUG, logger="leann.embedding_compute")
def mock_run(*args, **kwargs):
raise FileNotFoundError("node: command not found")
monkeypatch.setattr("subprocess.run", mock_run)
_query_lmstudio_context_limit(model_name="test-model", base_url="ws://localhost:1234")
# Check that debug logging occurred (not warning/error)
debug_logs = [record for record in caplog.records if record.levelname == "DEBUG"]
assert len(debug_logs) > 0, "Should log error at DEBUG level"
# Verify no WARNING or ERROR logs
warning_or_error_logs = [
record for record in caplog.records if record.levelname in ["WARNING", "ERROR"]
]
assert len(warning_or_error_logs) == 0, (
"Should not log at WARNING/ERROR level for expected failures"
)

View File

@@ -1,403 +0,0 @@
"""End-to-end integration tests for prompt template and token limit features.
These tests verify real-world functionality with live services:
- OpenAI-compatible APIs (OpenAI, LM Studio) with prompt template support
- Ollama with dynamic token limit detection
- Hybrid token limit discovery mechanism
Run with: pytest tests/test_prompt_template_e2e.py -v -s
Skip if services unavailable: pytest tests/test_prompt_template_e2e.py -m "not integration"
Prerequisites:
1. LM Studio running with embedding model: http://localhost:1234
2. [Optional] Ollama running: ollama serve
3. [Optional] Ollama model: ollama pull nomic-embed-text
4. [Optional] Node.js + @lmstudio/sdk for context length detection
"""
import logging
import socket
import numpy as np
import pytest
import requests
from leann.embedding_compute import (
compute_embeddings_ollama,
compute_embeddings_openai,
get_model_token_limit,
)
# Test markers for conditional execution
pytestmark = pytest.mark.integration
logger = logging.getLogger(__name__)
def check_service_available(host: str, port: int, timeout: float = 2.0) -> bool:
"""Check if a service is available on the given host:port."""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
result = sock.connect_ex((host, port))
sock.close()
return result == 0
except Exception:
return False
def check_ollama_available() -> bool:
"""Check if Ollama service is available."""
if not check_service_available("localhost", 11434):
return False
try:
response = requests.get("http://localhost:11434/api/tags", timeout=2.0)
return response.status_code == 200
except Exception:
return False
def check_lmstudio_available() -> bool:
"""Check if LM Studio service is available."""
if not check_service_available("localhost", 1234):
return False
try:
response = requests.get("http://localhost:1234/v1/models", timeout=2.0)
return response.status_code == 200
except Exception:
return False
def get_lmstudio_first_model() -> str | None:
"""Get the first available model from LM Studio."""
try:
response = requests.get("http://localhost:1234/v1/models", timeout=5.0)
data = response.json()
models = data.get("data", [])
if models:
return models[0]["id"]
except Exception:
pass
return None
class TestPromptTemplateOpenAI:
"""End-to-end tests for prompt template with OpenAI-compatible APIs (LM Studio)."""
@pytest.mark.skipif(
not check_lmstudio_available(), reason="LM Studio service not available on localhost:1234"
)
def test_lmstudio_embedding_with_prompt_template(self):
"""Test prompt templates with LM Studio using OpenAI-compatible API."""
model_name = get_lmstudio_first_model()
if not model_name:
pytest.skip("No models loaded in LM Studio")
assert model_name is not None # Type narrowing for type checker
texts = ["artificial intelligence", "machine learning"]
prompt_template = "search_query: "
# Get embeddings with prompt template via provider_options
provider_options = {"prompt_template": prompt_template}
embeddings = compute_embeddings_openai(
texts=texts,
model_name=model_name,
base_url="http://localhost:1234/v1",
api_key="lm-studio", # LM Studio doesn't require real key
provider_options=provider_options,
)
assert embeddings is not None
assert len(embeddings) == 2
assert all(isinstance(emb, np.ndarray) for emb in embeddings)
assert all(len(emb) > 0 for emb in embeddings)
logger.info(
f"✓ LM Studio embeddings with prompt template: {len(embeddings)} vectors, {len(embeddings[0])} dimensions"
)
@pytest.mark.skipif(not check_lmstudio_available(), reason="LM Studio service not available")
def test_lmstudio_prompt_template_affects_embeddings(self):
"""Verify that prompt templates actually change embedding values."""
model_name = get_lmstudio_first_model()
if not model_name:
pytest.skip("No models loaded in LM Studio")
assert model_name is not None # Type narrowing for type checker
text = "machine learning"
base_url = "http://localhost:1234/v1"
api_key = "lm-studio"
# Get embeddings without template
embeddings_no_template = compute_embeddings_openai(
texts=[text],
model_name=model_name,
base_url=base_url,
api_key=api_key,
provider_options={},
)
# Get embeddings with template
embeddings_with_template = compute_embeddings_openai(
texts=[text],
model_name=model_name,
base_url=base_url,
api_key=api_key,
provider_options={"prompt_template": "search_query: "},
)
# Embeddings should be different when template is applied
assert not np.allclose(embeddings_no_template[0], embeddings_with_template[0])
logger.info("✓ Prompt template changes embedding values as expected")
class TestPromptTemplateOllama:
"""End-to-end tests for prompt template with Ollama."""
@pytest.mark.skipif(
not check_ollama_available(), reason="Ollama service not available on localhost:11434"
)
def test_ollama_embedding_with_prompt_template(self):
"""Test prompt templates with Ollama using any available embedding model."""
# Get any available embedding model
try:
response = requests.get("http://localhost:11434/api/tags", timeout=2.0)
models = response.json().get("models", [])
embedding_models = []
for model in models:
name = model["name"]
base_name = name.split(":")[0]
if any(emb in base_name for emb in ["embed", "bge", "minilm", "e5", "nomic"]):
embedding_models.append(name)
if not embedding_models:
pytest.skip("No embedding models available in Ollama")
model_name = embedding_models[0]
texts = ["artificial intelligence", "machine learning"]
prompt_template = "search_query: "
# Get embeddings with prompt template via provider_options
provider_options = {"prompt_template": prompt_template}
embeddings = compute_embeddings_ollama(
texts=texts,
model_name=model_name,
is_build=False,
host="http://localhost:11434",
provider_options=provider_options,
)
assert embeddings is not None
assert len(embeddings) == 2
assert all(isinstance(emb, np.ndarray) for emb in embeddings)
assert all(len(emb) > 0 for emb in embeddings)
logger.info(
f"✓ Ollama embeddings with prompt template: {len(embeddings)} vectors, {len(embeddings[0])} dimensions"
)
except Exception as e:
pytest.skip(f"Could not test Ollama prompt template: {e}")
@pytest.mark.skipif(not check_ollama_available(), reason="Ollama service not available")
def test_ollama_prompt_template_affects_embeddings(self):
"""Verify that prompt templates actually change embedding values with Ollama."""
# Get any available embedding model
try:
response = requests.get("http://localhost:11434/api/tags", timeout=2.0)
models = response.json().get("models", [])
embedding_models = []
for model in models:
name = model["name"]
base_name = name.split(":")[0]
if any(emb in base_name for emb in ["embed", "bge", "minilm", "e5", "nomic"]):
embedding_models.append(name)
if not embedding_models:
pytest.skip("No embedding models available in Ollama")
model_name = embedding_models[0]
text = "machine learning"
host = "http://localhost:11434"
# Get embeddings without template
embeddings_no_template = compute_embeddings_ollama(
texts=[text], model_name=model_name, is_build=False, host=host, provider_options={}
)
# Get embeddings with template
embeddings_with_template = compute_embeddings_ollama(
texts=[text],
model_name=model_name,
is_build=False,
host=host,
provider_options={"prompt_template": "search_query: "},
)
# Embeddings should be different when template is applied
assert not np.allclose(embeddings_no_template[0], embeddings_with_template[0])
logger.info("✓ Ollama prompt template changes embedding values as expected")
except Exception as e:
pytest.skip(f"Could not test Ollama prompt template: {e}")
class TestLMStudioSDK:
"""End-to-end tests for LM Studio SDK integration."""
@pytest.mark.skipif(not check_lmstudio_available(), reason="LM Studio service not available")
def test_lmstudio_model_listing(self):
"""Test that we can list models from LM Studio."""
try:
response = requests.get("http://localhost:1234/v1/models", timeout=5.0)
assert response.status_code == 200
data = response.json()
assert "data" in data
models = data["data"]
logger.info(f"✓ LM Studio models available: {len(models)}")
if models:
logger.info(f" First model: {models[0].get('id', 'unknown')}")
except Exception as e:
pytest.skip(f"LM Studio API error: {e}")
@pytest.mark.skipif(not check_lmstudio_available(), reason="LM Studio service not available")
def test_lmstudio_sdk_context_length_detection(self):
"""Test context length detection via LM Studio SDK bridge (requires Node.js + SDK)."""
model_name = get_lmstudio_first_model()
if not model_name:
pytest.skip("No models loaded in LM Studio")
assert model_name is not None # Type narrowing for type checker
try:
from leann.embedding_compute import _query_lmstudio_context_limit
# SDK requires WebSocket URL (ws://)
context_length = _query_lmstudio_context_limit(
model_name=model_name, base_url="ws://localhost:1234"
)
if context_length is None:
logger.warning(
"⚠ LM Studio SDK bridge returned None (Node.js or SDK may not be available)"
)
pytest.skip("Node.js or @lmstudio/sdk not available - SDK bridge unavailable")
else:
assert context_length > 0
logger.info(
f"✓ LM Studio context length detected via SDK: {context_length} for {model_name}"
)
except ImportError:
pytest.skip("_query_lmstudio_context_limit not implemented yet")
except Exception as e:
logger.error(f"LM Studio SDK test error: {e}")
raise
class TestOllamaTokenLimit:
"""End-to-end tests for Ollama token limit discovery."""
@pytest.mark.skipif(not check_ollama_available(), reason="Ollama service not available")
def test_ollama_token_limit_detection(self):
"""Test dynamic token limit detection from Ollama /api/show endpoint."""
# Get any available embedding model
try:
response = requests.get("http://localhost:11434/api/tags", timeout=2.0)
models = response.json().get("models", [])
embedding_models = []
for model in models:
name = model["name"]
base_name = name.split(":")[0]
if any(emb in base_name for emb in ["embed", "bge", "minilm", "e5", "nomic"]):
embedding_models.append(name)
if not embedding_models:
pytest.skip("No embedding models available in Ollama")
test_model = embedding_models[0]
# Test token limit detection
limit = get_model_token_limit(model_name=test_model, base_url="http://localhost:11434")
assert limit > 0
logger.info(f"✓ Ollama token limit detected: {limit} for {test_model}")
except Exception as e:
pytest.skip(f"Could not test Ollama token detection: {e}")
class TestHybridTokenLimit:
"""End-to-end tests for hybrid token limit discovery mechanism."""
def test_hybrid_discovery_registry_fallback(self):
"""Test fallback to static registry for known OpenAI models."""
# Use a known OpenAI model (should be in registry)
limit = get_model_token_limit(
model_name="text-embedding-3-small",
base_url="http://fake-server:9999", # Fake URL to force registry lookup
)
# text-embedding-3-small should have 8192 in registry
assert limit == 8192
logger.info(f"✓ Hybrid discovery (registry fallback): {limit} tokens")
def test_hybrid_discovery_default_fallback(self):
"""Test fallback to safe default for completely unknown models."""
limit = get_model_token_limit(
model_name="completely-unknown-model-xyz-12345",
base_url="http://fake-server:9999",
default=512,
)
# Should get the specified default
assert limit == 512
logger.info(f"✓ Hybrid discovery (default fallback): {limit} tokens")
@pytest.mark.skipif(not check_ollama_available(), reason="Ollama service not available")
def test_hybrid_discovery_ollama_dynamic_first(self):
"""Test that Ollama models use dynamic discovery first."""
# Get any available embedding model
try:
response = requests.get("http://localhost:11434/api/tags", timeout=2.0)
models = response.json().get("models", [])
embedding_models = []
for model in models:
name = model["name"]
base_name = name.split(":")[0]
if any(emb in base_name for emb in ["embed", "bge", "minilm", "e5", "nomic"]):
embedding_models.append(name)
if not embedding_models:
pytest.skip("No embedding models available in Ollama")
test_model = embedding_models[0]
# Should query Ollama /api/show dynamically
limit = get_model_token_limit(model_name=test_model, base_url="http://localhost:11434")
assert limit > 0
logger.info(f"✓ Hybrid discovery (Ollama dynamic): {limit} tokens for {test_model}")
except Exception as e:
pytest.skip(f"Could not test hybrid Ollama discovery: {e}")
if __name__ == "__main__":
print("\n" + "=" * 70)
print("INTEGRATION TEST SUITE - Real Service Testing")
print("=" * 70)
print("\nThese tests require live services:")
print(" • LM Studio: http://localhost:1234 (with embedding model loaded)")
print(" • [Optional] Ollama: http://localhost:11434")
print(" • [Optional] Node.js + @lmstudio/sdk for SDK bridge tests")
print("\nRun with: pytest tests/test_prompt_template_e2e.py -v -s")
print("=" * 70 + "\n")

View File

@@ -1,863 +0,0 @@
"""
Integration tests for prompt template metadata persistence and reuse.
These tests verify the complete lifecycle of prompt template persistence:
1. Template is saved to .meta.json during index build
2. Template is automatically loaded during search operations
3. Template can be overridden with explicit flag during search
4. Template is reused during chat/ask operations
These are integration tests that:
- Use real file system with temporary directories
- Run actual build and search operations
- Inspect .meta.json file contents directly
- Mock embedding servers to avoid external dependencies
- Use small test codebases for fast execution
Expected to FAIL in Red Phase because metadata persistence verification is not yet implemented.
"""
import json
import tempfile
from pathlib import Path
from unittest.mock import Mock, patch
import numpy as np
import pytest
from leann.api import LeannBuilder, LeannSearcher
class TestPromptTemplateMetadataPersistence:
"""Tests for prompt template storage in .meta.json during build."""
@pytest.fixture
def temp_index_dir(self):
"""Create temporary directory for test indexes."""
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir)
@pytest.fixture
def mock_embeddings(self):
"""Mock compute_embeddings to return dummy embeddings."""
with patch("leann.api.compute_embeddings") as mock_compute:
# Return dummy embeddings as numpy array
mock_compute.return_value = np.array([[0.1, 0.2, 0.3]], dtype=np.float32)
yield mock_compute
def test_prompt_template_saved_to_metadata(self, temp_index_dir, mock_embeddings):
"""
Verify that when build is run with embedding_options containing prompt_template,
the template value is saved to .meta.json file.
This is the core persistence requirement - templates must be saved to allow
reuse in subsequent search operations without re-specifying the flag.
Expected failure: .meta.json exists but doesn't contain embedding_options
with prompt_template, or the value is not persisted correctly.
"""
# Setup test data
index_path = temp_index_dir / "test_index.leann"
template = "search_document: "
# Build index with prompt template in embedding_options
builder = LeannBuilder(
backend_name="hnsw",
embedding_model="text-embedding-3-small",
embedding_mode="openai",
embedding_options={"prompt_template": template},
)
# Add a simple document
builder.add_text("This is a test document for indexing")
# Build the index
builder.build_index(str(index_path))
# Verify .meta.json was created and contains the template
meta_path = temp_index_dir / "test_index.leann.meta.json"
assert meta_path.exists(), ".meta.json file should be created during build"
# Read and parse metadata
with open(meta_path, encoding="utf-8") as f:
meta_data = json.load(f)
# Verify embedding_options exists in metadata
assert "embedding_options" in meta_data, (
"embedding_options should be saved to .meta.json when provided"
)
# Verify prompt_template is in embedding_options
embedding_options = meta_data["embedding_options"]
assert "prompt_template" in embedding_options, (
"prompt_template should be saved within embedding_options"
)
# Verify the template value matches what we provided
assert embedding_options["prompt_template"] == template, (
f"Template should be '{template}', got '{embedding_options.get('prompt_template')}'"
)
def test_prompt_template_absent_when_not_provided(self, temp_index_dir, mock_embeddings):
"""
Verify that when no prompt template is provided during build,
.meta.json either doesn't have embedding_options or prompt_template key.
This ensures clean metadata without unnecessary keys when features aren't used.
Expected behavior: Build succeeds, .meta.json doesn't contain prompt_template.
"""
index_path = temp_index_dir / "test_no_template.leann"
# Build index WITHOUT prompt template
builder = LeannBuilder(
backend_name="hnsw",
embedding_model="text-embedding-3-small",
embedding_mode="openai",
# No embedding_options provided
)
builder.add_text("Document without template")
builder.build_index(str(index_path))
# Verify metadata
meta_path = temp_index_dir / "test_no_template.leann.meta.json"
assert meta_path.exists()
with open(meta_path, encoding="utf-8") as f:
meta_data = json.load(f)
# If embedding_options exists, it should not contain prompt_template
if "embedding_options" in meta_data:
embedding_options = meta_data["embedding_options"]
assert "prompt_template" not in embedding_options, (
"prompt_template should not be in metadata when not provided"
)
class TestPromptTemplateAutoLoadOnSearch:
"""Tests for automatic loading of prompt template during search operations.
NOTE: Over-mocked test removed (test_prompt_template_auto_loaded_on_search).
This functionality is now comprehensively tested by TestQueryPromptTemplateAutoLoad
which uses simpler mocking and doesn't hang.
"""
@pytest.fixture
def temp_index_dir(self):
"""Create temporary directory for test indexes."""
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir)
@pytest.fixture
def mock_embeddings(self):
"""Mock compute_embeddings to capture calls and return dummy embeddings."""
with patch("leann.api.compute_embeddings") as mock_compute:
mock_compute.return_value = np.array([[0.1, 0.2, 0.3]], dtype=np.float32)
yield mock_compute
def test_search_without_template_in_metadata(self, temp_index_dir, mock_embeddings):
"""
Verify that searching an index built WITHOUT a prompt template
works correctly (backward compatibility).
The searcher should handle missing prompt_template gracefully.
Expected behavior: Search succeeds, no template is used.
"""
# Build index without template
index_path = temp_index_dir / "no_template.leann"
builder = LeannBuilder(
backend_name="hnsw",
embedding_model="text-embedding-3-small",
embedding_mode="openai",
)
builder.add_text("Document without template")
builder.build_index(str(index_path))
# Reset mocks
mock_embeddings.reset_mock()
# Create searcher and search
searcher = LeannSearcher(index_path=str(index_path))
# Verify no template in embedding_options
assert "prompt_template" not in searcher.embedding_options, (
"Searcher should not have prompt_template when not in metadata"
)
class TestQueryPromptTemplateAutoLoad:
"""Tests for automatic loading of separate query_prompt_template during search (R2).
These tests verify the new two-template system where:
- build_prompt_template: Applied during index building
- query_prompt_template: Applied during search operations
Expected to FAIL in Red Phase (R2) because query template extraction
and application is not yet implemented in LeannSearcher.search().
"""
@pytest.fixture
def temp_index_dir(self):
"""Create temporary directory for test indexes."""
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir)
@pytest.fixture
def mock_compute_embeddings(self):
"""Mock compute_embeddings to capture calls and return dummy embeddings."""
with patch("leann.embedding_compute.compute_embeddings") as mock_compute:
mock_compute.return_value = np.array([[0.1, 0.2, 0.3]], dtype=np.float32)
yield mock_compute
def test_search_auto_loads_query_template(self, temp_index_dir, mock_compute_embeddings):
"""
Verify that search() automatically loads and applies query_prompt_template from .meta.json.
Given: Index built with separate build_prompt_template and query_prompt_template
When: LeannSearcher.search("my query") is called
Then: Query embedding is computed with "query: my query" (query template applied)
This is the core R2 requirement - query templates must be auto-loaded and applied
during search without user intervention.
Expected failure: compute_embeddings called with raw "my query" instead of
"query: my query" because query template extraction is not implemented.
"""
# Setup: Build index with separate templates in new format
index_path = temp_index_dir / "query_template.leann"
builder = LeannBuilder(
backend_name="hnsw",
embedding_model="text-embedding-3-small",
embedding_mode="openai",
embedding_options={
"build_prompt_template": "doc: ",
"query_prompt_template": "query: ",
},
)
builder.add_text("Test document")
builder.build_index(str(index_path))
# Reset mock to ignore build calls
mock_compute_embeddings.reset_mock()
# Act: Search with query
searcher = LeannSearcher(index_path=str(index_path))
# Mock the backend search to avoid actual search
with patch.object(searcher.backend_impl, "search") as mock_backend_search:
mock_backend_search.return_value = {
"labels": [["test_id_0"]], # IDs (nested list for batch support)
"distances": [[0.9]], # Distances (nested list for batch support)
}
searcher.search("my query", top_k=1, recompute_embeddings=False)
# Assert: compute_embeddings was called with query template applied
assert mock_compute_embeddings.called, "compute_embeddings should be called during search"
# Get the actual text passed to compute_embeddings
call_args = mock_compute_embeddings.call_args
texts_arg = call_args[0][0] # First positional arg (list of texts)
assert len(texts_arg) == 1, "Should compute embedding for one query"
assert texts_arg[0] == "query: my query", (
f"Query template should be applied: expected 'query: my query', got '{texts_arg[0]}'"
)
def test_search_backward_compat_single_template(self, temp_index_dir, mock_compute_embeddings):
"""
Verify backward compatibility with old single prompt_template format.
Given: Index with old format (single prompt_template, no query_prompt_template)
When: LeannSearcher.search("my query") is called
Then: Query embedding computed with "doc: my query" (old template applied)
This ensures indexes built with the old single-template system continue
to work correctly with the new search implementation.
Expected failure: Old template not recognized/applied because backward
compatibility logic is not implemented.
"""
# Setup: Build index with old single-template format
index_path = temp_index_dir / "old_template.leann"
builder = LeannBuilder(
backend_name="hnsw",
embedding_model="text-embedding-3-small",
embedding_mode="openai",
embedding_options={"prompt_template": "doc: "}, # Old format
)
builder.add_text("Test document")
builder.build_index(str(index_path))
# Reset mock
mock_compute_embeddings.reset_mock()
# Act: Search
searcher = LeannSearcher(index_path=str(index_path))
with patch.object(searcher.backend_impl, "search") as mock_backend_search:
mock_backend_search.return_value = {"labels": [["test_id_0"]], "distances": [[0.9]]}
searcher.search("my query", top_k=1, recompute_embeddings=False)
# Assert: Old template was applied
call_args = mock_compute_embeddings.call_args
texts_arg = call_args[0][0]
assert texts_arg[0] == "doc: my query", (
f"Old prompt_template should be applied for backward compatibility: "
f"expected 'doc: my query', got '{texts_arg[0]}'"
)
def test_search_backward_compat_no_template(self, temp_index_dir, mock_compute_embeddings):
"""
Verify backward compatibility when no template is present in .meta.json.
Given: Index with no template in .meta.json (very old indexes)
When: LeannSearcher.search("my query") is called
Then: Query embedding computed with "my query" (no template, raw query)
This ensures the most basic backward compatibility - indexes without
any template support continue to work as before.
Expected failure: May fail if default template is incorrectly applied,
or if missing template causes error.
"""
# Setup: Build index without any template
index_path = temp_index_dir / "no_template.leann"
builder = LeannBuilder(
backend_name="hnsw",
embedding_model="text-embedding-3-small",
embedding_mode="openai",
# No embedding_options at all
)
builder.add_text("Test document")
builder.build_index(str(index_path))
# Reset mock
mock_compute_embeddings.reset_mock()
# Act: Search
searcher = LeannSearcher(index_path=str(index_path))
with patch.object(searcher.backend_impl, "search") as mock_backend_search:
mock_backend_search.return_value = {"labels": [["test_id_0"]], "distances": [[0.9]]}
searcher.search("my query", top_k=1, recompute_embeddings=False)
# Assert: No template applied (raw query)
call_args = mock_compute_embeddings.call_args
texts_arg = call_args[0][0]
assert texts_arg[0] == "my query", (
f"No template should be applied when missing from metadata: "
f"expected 'my query', got '{texts_arg[0]}'"
)
def test_search_override_via_provider_options(self, temp_index_dir, mock_compute_embeddings):
"""
Verify that explicit provider_options can override stored query template.
Given: Index with query_prompt_template: "query: "
When: search() called with provider_options={"prompt_template": "override: "}
Then: Query embedding computed with "override: test" (override takes precedence)
This enables users to experiment with different query templates without
rebuilding the index, or to handle special query types differently.
Expected failure: provider_options parameter is accepted via **kwargs but
not used. Query embedding computed with raw "test" instead of "override: test"
because override logic is not implemented.
"""
# Setup: Build index with query template
index_path = temp_index_dir / "override_template.leann"
builder = LeannBuilder(
backend_name="hnsw",
embedding_model="text-embedding-3-small",
embedding_mode="openai",
embedding_options={
"build_prompt_template": "doc: ",
"query_prompt_template": "query: ",
},
)
builder.add_text("Test document")
builder.build_index(str(index_path))
# Reset mock
mock_compute_embeddings.reset_mock()
# Act: Search with override
searcher = LeannSearcher(index_path=str(index_path))
with patch.object(searcher.backend_impl, "search") as mock_backend_search:
mock_backend_search.return_value = {"labels": [["test_id_0"]], "distances": [[0.9]]}
# This should accept provider_options parameter
searcher.search(
"test",
top_k=1,
recompute_embeddings=False,
provider_options={"prompt_template": "override: "},
)
# Assert: Override template was applied
call_args = mock_compute_embeddings.call_args
texts_arg = call_args[0][0]
assert texts_arg[0] == "override: test", (
f"Override template should take precedence: "
f"expected 'override: test', got '{texts_arg[0]}'"
)
class TestPromptTemplateReuseInChat:
"""Tests for prompt template reuse in chat/ask operations."""
@pytest.fixture
def temp_index_dir(self):
"""Create temporary directory for test indexes."""
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir)
@pytest.fixture
def mock_embeddings(self):
"""Mock compute_embeddings to return dummy embeddings."""
with patch("leann.api.compute_embeddings") as mock_compute:
mock_compute.return_value = np.array([[0.1, 0.2, 0.3]], dtype=np.float32)
yield mock_compute
@pytest.fixture
def mock_embedding_server_manager(self):
"""Mock EmbeddingServerManager for chat tests."""
with patch("leann.searcher_base.EmbeddingServerManager") as mock_manager_class:
mock_manager = Mock()
mock_manager.start_server.return_value = (True, 5557)
mock_manager_class.return_value = mock_manager
yield mock_manager
@pytest.fixture
def index_with_template(self, temp_index_dir, mock_embeddings):
"""Build an index with a prompt template."""
index_path = temp_index_dir / "chat_template_index.leann"
template = "document_query: "
builder = LeannBuilder(
backend_name="hnsw",
embedding_model="text-embedding-3-small",
embedding_mode="openai",
embedding_options={"prompt_template": template},
)
builder.add_text("Test document for chat")
builder.build_index(str(index_path))
return str(index_path), template
class TestPromptTemplateIntegrationWithEmbeddingModes:
"""Tests for prompt template compatibility with different embedding modes."""
@pytest.fixture
def temp_index_dir(self):
"""Create temporary directory for test indexes."""
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir)
@pytest.mark.parametrize(
"mode,model,template,filename_prefix",
[
(
"openai",
"text-embedding-3-small",
"Represent this for searching: ",
"openai_template",
),
("ollama", "nomic-embed-text", "search_query: ", "ollama_template"),
("sentence-transformers", "facebook/contriever", "query: ", "st_template"),
],
)
def test_prompt_template_metadata_with_embedding_modes(
self, temp_index_dir, mode, model, template, filename_prefix
):
"""Verify prompt template is saved correctly across different embedding modes.
Tests that prompt templates are persisted to .meta.json for:
- OpenAI mode (primary use case)
- Ollama mode (also supports templates)
- Sentence-transformers mode (saved for forward compatibility)
Expected behavior: Template is saved to .meta.json regardless of mode.
"""
with patch("leann.api.compute_embeddings") as mock_compute:
mock_compute.return_value = np.array([[0.1, 0.2, 0.3]], dtype=np.float32)
index_path = temp_index_dir / f"{filename_prefix}.leann"
builder = LeannBuilder(
backend_name="hnsw",
embedding_model=model,
embedding_mode=mode,
embedding_options={"prompt_template": template},
)
builder.add_text(f"{mode.capitalize()} test document")
builder.build_index(str(index_path))
# Verify metadata
meta_path = temp_index_dir / f"{filename_prefix}.leann.meta.json"
with open(meta_path, encoding="utf-8") as f:
meta_data = json.load(f)
assert meta_data["embedding_mode"] == mode
# Template should be saved for all modes (even if not used by some)
if "embedding_options" in meta_data:
assert meta_data["embedding_options"]["prompt_template"] == template
class TestQueryTemplateApplicationInComputeEmbedding:
"""Tests for query template application in compute_query_embedding() (Bug Fix).
These tests verify that query templates are applied consistently in BOTH
code paths (server and fallback) when computing query embeddings.
This addresses the bug where query templates were only applied in the
fallback path, not when using the embedding server (the default path).
Bug Context:
- Issue: Query templates were stored in metadata but only applied during
fallback (direct) computation, not when using embedding server
- Fix: Move template application to BEFORE any computation path in
compute_query_embedding() (searcher_base.py:107-110)
- Impact: Critical for models like EmbeddingGemma that require task-specific
templates for optimal performance
These tests ensure the fix works correctly and prevent regression.
"""
@pytest.fixture
def temp_index_with_template(self):
"""Create a temporary index with query template in metadata"""
with tempfile.TemporaryDirectory() as tmpdir:
index_dir = Path(tmpdir)
index_file = index_dir / "test.leann"
meta_file = index_dir / "test.leann.meta.json"
# Create minimal metadata with query template
metadata = {
"version": "1.0",
"backend_name": "hnsw",
"embedding_model": "text-embedding-embeddinggemma-300m-qat",
"dimensions": 768,
"embedding_mode": "openai",
"backend_kwargs": {
"graph_degree": 32,
"complexity": 64,
"distance_metric": "cosine",
},
"embedding_options": {
"base_url": "http://localhost:1234/v1",
"api_key": "test-key",
"build_prompt_template": "title: none | text: ",
"query_prompt_template": "task: search result | query: ",
},
}
meta_file.write_text(json.dumps(metadata, indent=2))
# Create minimal HNSW index file (empty is okay for this test)
index_file.write_bytes(b"")
yield str(index_file)
def test_query_template_applied_in_fallback_path(self, temp_index_with_template):
"""Test that query template is applied when using fallback (direct) path"""
from leann.searcher_base import BaseSearcher
# Create a concrete implementation for testing
class TestSearcher(BaseSearcher):
def search(
self,
query,
top_k,
complexity=64,
beam_width=1,
prune_ratio=0.0,
recompute_embeddings=False,
pruning_strategy="global",
zmq_port=None,
**kwargs,
):
return {"labels": [], "distances": []}
searcher = object.__new__(TestSearcher)
searcher.index_path = Path(temp_index_with_template)
searcher.index_dir = searcher.index_path.parent
# Load metadata
meta_file = searcher.index_dir / f"{searcher.index_path.name}.meta.json"
with open(meta_file) as f:
searcher.meta = json.load(f)
searcher.embedding_model = searcher.meta["embedding_model"]
searcher.embedding_mode = searcher.meta.get("embedding_mode", "sentence-transformers")
searcher.embedding_options = searcher.meta.get("embedding_options", {})
# Mock compute_embeddings to capture the query text
captured_queries = []
def mock_compute_embeddings(texts, model, mode, provider_options=None):
captured_queries.extend(texts)
return np.random.rand(len(texts), 768).astype(np.float32)
with patch(
"leann.embedding_compute.compute_embeddings", side_effect=mock_compute_embeddings
):
# Call compute_query_embedding with template (fallback path)
result = searcher.compute_query_embedding(
query="vector database",
use_server_if_available=False, # Force fallback path
query_template="task: search result | query: ",
)
# Verify template was applied
assert len(captured_queries) == 1
assert captured_queries[0] == "task: search result | query: vector database"
assert result.shape == (1, 768)
def test_query_template_applied_in_server_path(self, temp_index_with_template):
"""Test that query template is applied when using server path"""
from leann.searcher_base import BaseSearcher
# Create a concrete implementation for testing
class TestSearcher(BaseSearcher):
def search(
self,
query,
top_k,
complexity=64,
beam_width=1,
prune_ratio=0.0,
recompute_embeddings=False,
pruning_strategy="global",
zmq_port=None,
**kwargs,
):
return {"labels": [], "distances": []}
searcher = object.__new__(TestSearcher)
searcher.index_path = Path(temp_index_with_template)
searcher.index_dir = searcher.index_path.parent
# Load metadata
meta_file = searcher.index_dir / f"{searcher.index_path.name}.meta.json"
with open(meta_file) as f:
searcher.meta = json.load(f)
searcher.embedding_model = searcher.meta["embedding_model"]
searcher.embedding_mode = searcher.meta.get("embedding_mode", "sentence-transformers")
searcher.embedding_options = searcher.meta.get("embedding_options", {})
# Mock the server methods to capture the query text
captured_queries = []
def mock_ensure_server_running(passages_file, port):
return port
def mock_compute_embedding_via_server(chunks, port):
captured_queries.extend(chunks)
return np.random.rand(len(chunks), 768).astype(np.float32)
searcher._ensure_server_running = mock_ensure_server_running
searcher._compute_embedding_via_server = mock_compute_embedding_via_server
# Call compute_query_embedding with template (server path)
result = searcher.compute_query_embedding(
query="vector database",
use_server_if_available=True, # Use server path
query_template="task: search result | query: ",
)
# Verify template was applied BEFORE calling server
assert len(captured_queries) == 1
assert captured_queries[0] == "task: search result | query: vector database"
assert result.shape == (1, 768)
def test_query_template_without_template_parameter(self, temp_index_with_template):
"""Test that query is unchanged when no template is provided"""
from leann.searcher_base import BaseSearcher
class TestSearcher(BaseSearcher):
def search(
self,
query,
top_k,
complexity=64,
beam_width=1,
prune_ratio=0.0,
recompute_embeddings=False,
pruning_strategy="global",
zmq_port=None,
**kwargs,
):
return {"labels": [], "distances": []}
searcher = object.__new__(TestSearcher)
searcher.index_path = Path(temp_index_with_template)
searcher.index_dir = searcher.index_path.parent
meta_file = searcher.index_dir / f"{searcher.index_path.name}.meta.json"
with open(meta_file) as f:
searcher.meta = json.load(f)
searcher.embedding_model = searcher.meta["embedding_model"]
searcher.embedding_mode = searcher.meta.get("embedding_mode", "sentence-transformers")
searcher.embedding_options = searcher.meta.get("embedding_options", {})
captured_queries = []
def mock_compute_embeddings(texts, model, mode, provider_options=None):
captured_queries.extend(texts)
return np.random.rand(len(texts), 768).astype(np.float32)
with patch(
"leann.embedding_compute.compute_embeddings", side_effect=mock_compute_embeddings
):
searcher.compute_query_embedding(
query="vector database",
use_server_if_available=False,
query_template=None, # No template
)
# Verify query is unchanged
assert len(captured_queries) == 1
assert captured_queries[0] == "vector database"
def test_query_template_consistency_between_paths(self, temp_index_with_template):
"""Test that both paths apply template identically"""
from leann.searcher_base import BaseSearcher
class TestSearcher(BaseSearcher):
def search(
self,
query,
top_k,
complexity=64,
beam_width=1,
prune_ratio=0.0,
recompute_embeddings=False,
pruning_strategy="global",
zmq_port=None,
**kwargs,
):
return {"labels": [], "distances": []}
searcher = object.__new__(TestSearcher)
searcher.index_path = Path(temp_index_with_template)
searcher.index_dir = searcher.index_path.parent
meta_file = searcher.index_dir / f"{searcher.index_path.name}.meta.json"
with open(meta_file) as f:
searcher.meta = json.load(f)
searcher.embedding_model = searcher.meta["embedding_model"]
searcher.embedding_mode = searcher.meta.get("embedding_mode", "sentence-transformers")
searcher.embedding_options = searcher.meta.get("embedding_options", {})
query_template = "task: search result | query: "
original_query = "vector database"
# Capture queries from fallback path
fallback_queries = []
def mock_compute_embeddings(texts, model, mode, provider_options=None):
fallback_queries.extend(texts)
return np.random.rand(len(texts), 768).astype(np.float32)
with patch(
"leann.embedding_compute.compute_embeddings", side_effect=mock_compute_embeddings
):
searcher.compute_query_embedding(
query=original_query,
use_server_if_available=False,
query_template=query_template,
)
# Capture queries from server path
server_queries = []
def mock_ensure_server_running(passages_file, port):
return port
def mock_compute_embedding_via_server(chunks, port):
server_queries.extend(chunks)
return np.random.rand(len(chunks), 768).astype(np.float32)
searcher._ensure_server_running = mock_ensure_server_running
searcher._compute_embedding_via_server = mock_compute_embedding_via_server
searcher.compute_query_embedding(
query=original_query,
use_server_if_available=True,
query_template=query_template,
)
# Verify both paths produced identical templated queries
assert len(fallback_queries) == 1
assert len(server_queries) == 1
assert fallback_queries[0] == server_queries[0]
assert fallback_queries[0] == f"{query_template}{original_query}"
def test_query_template_with_empty_string(self, temp_index_with_template):
"""Test behavior with empty template string"""
from leann.searcher_base import BaseSearcher
class TestSearcher(BaseSearcher):
def search(
self,
query,
top_k,
complexity=64,
beam_width=1,
prune_ratio=0.0,
recompute_embeddings=False,
pruning_strategy="global",
zmq_port=None,
**kwargs,
):
return {"labels": [], "distances": []}
searcher = object.__new__(TestSearcher)
searcher.index_path = Path(temp_index_with_template)
searcher.index_dir = searcher.index_path.parent
meta_file = searcher.index_dir / f"{searcher.index_path.name}.meta.json"
with open(meta_file) as f:
searcher.meta = json.load(f)
searcher.embedding_model = searcher.meta["embedding_model"]
searcher.embedding_mode = searcher.meta.get("embedding_mode", "sentence-transformers")
searcher.embedding_options = searcher.meta.get("embedding_options", {})
captured_queries = []
def mock_compute_embeddings(texts, model, mode, provider_options=None):
captured_queries.extend(texts)
return np.random.rand(len(texts), 768).astype(np.float32)
with patch(
"leann.embedding_compute.compute_embeddings", side_effect=mock_compute_embeddings
):
searcher.compute_query_embedding(
query="vector database",
use_server_if_available=False,
query_template="", # Empty string
)
# Empty string is falsy, so no template should be applied
assert captured_queries[0] == "vector database"

View File

@@ -97,17 +97,17 @@ def test_backend_options():
with tempfile.TemporaryDirectory() as temp_dir:
# Use smaller model in CI to avoid memory issues
is_ci = os.environ.get("CI") == "true"
embedding_model = (
"sentence-transformers/all-MiniLM-L6-v2" if is_ci else "facebook/contriever"
)
dimensions = 384 if is_ci else None
if os.environ.get("CI") == "true":
model_args = {
"embedding_model": "sentence-transformers/all-MiniLM-L6-v2",
"dimensions": 384,
}
else:
model_args = {}
# Test HNSW backend (as shown in README)
hnsw_path = str(Path(temp_dir) / "test_hnsw.leann")
builder_hnsw = LeannBuilder(
backend_name="hnsw", embedding_model=embedding_model, dimensions=dimensions
)
builder_hnsw = LeannBuilder(backend_name="hnsw", **model_args)
builder_hnsw.add_text("Test document for HNSW backend")
builder_hnsw.build_index(hnsw_path)
assert Path(hnsw_path).parent.exists()
@@ -115,9 +115,7 @@ def test_backend_options():
# Test DiskANN backend (mentioned as available option)
diskann_path = str(Path(temp_dir) / "test_diskann.leann")
builder_diskann = LeannBuilder(
backend_name="diskann", embedding_model=embedding_model, dimensions=dimensions
)
builder_diskann = LeannBuilder(backend_name="diskann", **model_args)
builder_diskann.add_text("Test document for DiskANN backend")
builder_diskann.build_index(diskann_path)
assert Path(diskann_path).parent.exists()

View File

@@ -266,378 +266,3 @@ class TestTokenTruncation:
assert result_tokens <= target_tokens, (
f"Should be ≤{target_tokens} tokens, got {result_tokens}"
)
class TestLMStudioHybridDiscovery:
"""Tests for LM Studio integration in get_model_token_limit() hybrid discovery.
These tests verify that get_model_token_limit() properly integrates with
the LM Studio SDK bridge for dynamic token limit discovery. The integration
should:
1. Detect LM Studio URLs (port 1234 or 'lmstudio'/'lm.studio' in URL)
2. Convert HTTP URLs to WebSocket format for SDK queries
3. Query LM Studio SDK and use discovered limit
4. Fall back to registry when SDK returns None
5. Execute AFTER Ollama detection but BEFORE registry fallback
All tests are written in Red Phase - they should FAIL initially because the
LM Studio detection and integration logic does not exist yet in get_model_token_limit().
"""
def test_get_model_token_limit_lmstudio_success(self, monkeypatch):
"""Verify LM Studio SDK query succeeds and returns detected limit.
When a LM Studio base_url is detected and the SDK query succeeds,
get_model_token_limit() should return the dynamically discovered
context length without falling back to the registry.
"""
# Mock _query_lmstudio_context_limit to return successful SDK query
def mock_query_lmstudio(model_name, base_url):
# Verify WebSocket URL was passed (not HTTP)
assert base_url.startswith("ws://"), (
f"Should convert HTTP to WebSocket format, got: {base_url}"
)
return 8192 # Successful SDK query
monkeypatch.setattr(
"leann.embedding_compute._query_lmstudio_context_limit",
mock_query_lmstudio,
)
# Test with HTTP URL that should be converted to WebSocket
limit = get_model_token_limit(
model_name="custom-model", base_url="http://localhost:1234/v1"
)
assert limit == 8192, "Should return limit from LM Studio SDK query"
def test_get_model_token_limit_lmstudio_fallback_to_registry(self, monkeypatch):
"""Verify fallback to registry when LM Studio SDK returns None.
When LM Studio SDK query fails (returns None), get_model_token_limit()
should fall back to the EMBEDDING_MODEL_LIMITS registry.
"""
# Mock _query_lmstudio_context_limit to return None (SDK failure)
def mock_query_lmstudio(model_name, base_url):
return None # SDK query failed
monkeypatch.setattr(
"leann.embedding_compute._query_lmstudio_context_limit",
mock_query_lmstudio,
)
# Test with known model that exists in registry
limit = get_model_token_limit(
model_name="nomic-embed-text", base_url="http://localhost:1234/v1"
)
# Should fall back to registry value
assert limit == 2048, "Should fall back to registry when SDK returns None"
def test_get_model_token_limit_lmstudio_port_detection(self, monkeypatch):
"""Verify detection of LM Studio via port 1234.
get_model_token_limit() should recognize port 1234 as a LM Studio
server and attempt SDK query, regardless of hostname.
"""
query_called = False
def mock_query_lmstudio(model_name, base_url):
nonlocal query_called
query_called = True
return 4096
monkeypatch.setattr(
"leann.embedding_compute._query_lmstudio_context_limit",
mock_query_lmstudio,
)
# Test with port 1234 (default LM Studio port)
limit = get_model_token_limit(model_name="test-model", base_url="http://127.0.0.1:1234/v1")
assert query_called, "Should detect port 1234 and call LM Studio SDK query"
assert limit == 4096, "Should return SDK query result"
@pytest.mark.parametrize(
"test_url,expected_limit,keyword",
[
("http://lmstudio.local:8080/v1", 16384, "lmstudio"),
("http://api.lm.studio:5000/v1", 32768, "lm.studio"),
],
)
def test_get_model_token_limit_lmstudio_url_keyword_detection(
self, monkeypatch, test_url, expected_limit, keyword
):
"""Verify detection of LM Studio via keywords in URL.
get_model_token_limit() should recognize 'lmstudio' or 'lm.studio'
in the URL as indicating a LM Studio server.
"""
query_called = False
def mock_query_lmstudio(model_name, base_url):
nonlocal query_called
query_called = True
return expected_limit
monkeypatch.setattr(
"leann.embedding_compute._query_lmstudio_context_limit",
mock_query_lmstudio,
)
limit = get_model_token_limit(model_name="test-model", base_url=test_url)
assert query_called, f"Should detect '{keyword}' keyword and call SDK query"
assert limit == expected_limit, f"Should return SDK query result for {keyword}"
@pytest.mark.parametrize(
"input_url,expected_protocol,expected_host",
[
("http://localhost:1234/v1", "ws://", "localhost:1234"),
("https://lmstudio.example.com:1234/v1", "wss://", "lmstudio.example.com:1234"),
],
)
def test_get_model_token_limit_protocol_conversion(
self, monkeypatch, input_url, expected_protocol, expected_host
):
"""Verify HTTP/HTTPS URL is converted to WebSocket format for SDK query.
LM Studio SDK requires WebSocket URLs. get_model_token_limit() should:
1. Convert 'http://' to 'ws://'
2. Convert 'https://' to 'wss://'
3. Remove '/v1' or other path suffixes (SDK expects base URL)
4. Preserve host and port
"""
conversions_tested = []
def mock_query_lmstudio(model_name, base_url):
conversions_tested.append(base_url)
return 8192
monkeypatch.setattr(
"leann.embedding_compute._query_lmstudio_context_limit",
mock_query_lmstudio,
)
get_model_token_limit(model_name="test-model", base_url=input_url)
# Verify conversion happened
assert len(conversions_tested) == 1, "Should have called SDK query once"
assert conversions_tested[0].startswith(expected_protocol), (
f"Should convert to {expected_protocol}"
)
assert expected_host in conversions_tested[0], (
f"Should preserve host and port: {expected_host}"
)
def test_get_model_token_limit_lmstudio_executes_after_ollama(self, monkeypatch):
"""Verify LM Studio detection happens AFTER Ollama detection.
The hybrid discovery order should be:
1. Ollama dynamic discovery (port 11434 or 'ollama' in URL)
2. LM Studio dynamic discovery (port 1234 or 'lmstudio' in URL)
3. Registry fallback
If both Ollama and LM Studio patterns match, Ollama should take precedence.
This test verifies that LM Studio is checked but doesn't interfere with Ollama.
"""
ollama_called = False
lmstudio_called = False
def mock_query_ollama(model_name, base_url):
nonlocal ollama_called
ollama_called = True
return 2048 # Ollama query succeeds
def mock_query_lmstudio(model_name, base_url):
nonlocal lmstudio_called
lmstudio_called = True
return None # Should not be reached if Ollama succeeds
monkeypatch.setattr(
"leann.embedding_compute._query_ollama_context_limit",
mock_query_ollama,
)
monkeypatch.setattr(
"leann.embedding_compute._query_lmstudio_context_limit",
mock_query_lmstudio,
)
# Test with Ollama URL
limit = get_model_token_limit(
model_name="test-model", base_url="http://localhost:11434/api"
)
assert ollama_called, "Should attempt Ollama query first"
assert not lmstudio_called, "Should not attempt LM Studio query when Ollama succeeds"
assert limit == 2048, "Should return Ollama result"
def test_get_model_token_limit_lmstudio_not_detected_for_non_lmstudio_urls(self, monkeypatch):
"""Verify LM Studio SDK query is NOT called for non-LM Studio URLs.
Only URLs with port 1234 or 'lmstudio'/'lm.studio' keywords should
trigger LM Studio SDK queries. Other URLs should skip to registry fallback.
"""
lmstudio_called = False
def mock_query_lmstudio(model_name, base_url):
nonlocal lmstudio_called
lmstudio_called = True
return 8192
monkeypatch.setattr(
"leann.embedding_compute._query_lmstudio_context_limit",
mock_query_lmstudio,
)
# Test with non-LM Studio URLs
test_cases = [
"http://localhost:8080/v1", # Different port
"http://openai.example.com/v1", # Different service
"http://localhost:3000/v1", # Another port
]
for base_url in test_cases:
lmstudio_called = False # Reset for each test
get_model_token_limit(model_name="nomic-embed-text", base_url=base_url)
assert not lmstudio_called, f"Should NOT call LM Studio SDK for URL: {base_url}"
def test_get_model_token_limit_lmstudio_case_insensitive_detection(self, monkeypatch):
"""Verify LM Studio detection is case-insensitive for keywords.
Keywords 'lmstudio' and 'lm.studio' should be detected regardless
of case (LMStudio, LMSTUDIO, LmStudio, etc.).
"""
query_called = False
def mock_query_lmstudio(model_name, base_url):
nonlocal query_called
query_called = True
return 8192
monkeypatch.setattr(
"leann.embedding_compute._query_lmstudio_context_limit",
mock_query_lmstudio,
)
# Test various case variations
test_cases = [
"http://LMStudio.local:8080/v1",
"http://LMSTUDIO.example.com/v1",
"http://LmStudio.local/v1",
"http://api.LM.STUDIO:5000/v1",
]
for base_url in test_cases:
query_called = False # Reset for each test
limit = get_model_token_limit(model_name="test-model", base_url=base_url)
assert query_called, f"Should detect LM Studio in URL: {base_url}"
assert limit == 8192, f"Should return SDK result for URL: {base_url}"
class TestTokenLimitCaching:
"""Tests for token limit caching to prevent repeated SDK/API calls.
Caching prevents duplicate SDK/API calls within the same Python process,
which is important because:
1. LM Studio SDK load() can load duplicate model instances
2. Ollama /api/show queries add latency
3. Registry lookups are pure overhead
Cache is process-scoped and resets between leann build invocations.
"""
def setup_method(self):
"""Clear cache before each test."""
from leann.embedding_compute import _token_limit_cache
_token_limit_cache.clear()
def test_registry_lookup_is_cached(self):
"""Verify that registry lookups are cached."""
from leann.embedding_compute import _token_limit_cache
# First call
limit1 = get_model_token_limit("text-embedding-3-small")
assert limit1 == 8192
# Verify it's in cache
cache_key = ("text-embedding-3-small", "")
assert cache_key in _token_limit_cache
assert _token_limit_cache[cache_key] == 8192
# Second call should use cache
limit2 = get_model_token_limit("text-embedding-3-small")
assert limit2 == 8192
def test_default_fallback_is_cached(self):
"""Verify that default fallbacks are cached."""
from leann.embedding_compute import _token_limit_cache
# First call with unknown model
limit1 = get_model_token_limit("unknown-model-xyz", default=512)
assert limit1 == 512
# Verify it's in cache
cache_key = ("unknown-model-xyz", "")
assert cache_key in _token_limit_cache
assert _token_limit_cache[cache_key] == 512
# Second call should use cache
limit2 = get_model_token_limit("unknown-model-xyz", default=512)
assert limit2 == 512
def test_different_urls_create_separate_cache_entries(self):
"""Verify that different base_urls create separate cache entries."""
from leann.embedding_compute import _token_limit_cache
# Same model, different URLs
limit1 = get_model_token_limit("nomic-embed-text", base_url="http://localhost:11434")
limit2 = get_model_token_limit("nomic-embed-text", base_url="http://localhost:1234/v1")
# Both should find the model in registry (2048)
assert limit1 == 2048
assert limit2 == 2048
# But they should be separate cache entries
cache_key1 = ("nomic-embed-text", "http://localhost:11434")
cache_key2 = ("nomic-embed-text", "http://localhost:1234/v1")
assert cache_key1 in _token_limit_cache
assert cache_key2 in _token_limit_cache
assert len(_token_limit_cache) == 2
def test_cache_prevents_repeated_lookups(self):
"""Verify that cache prevents repeated registry/API lookups."""
from leann.embedding_compute import _token_limit_cache
model_name = "text-embedding-ada-002"
# First call - should add to cache
assert len(_token_limit_cache) == 0
limit1 = get_model_token_limit(model_name)
cache_size_after_first = len(_token_limit_cache)
assert cache_size_after_first == 1
# Multiple subsequent calls - cache size should not change
for _ in range(5):
limit = get_model_token_limit(model_name)
assert limit == limit1
assert len(_token_limit_cache) == cache_size_after_first
def test_versioned_model_names_cached_correctly(self):
"""Verify that versioned model names (e.g., model:tag) are cached."""
from leann.embedding_compute import _token_limit_cache
# Model with version tag
limit = get_model_token_limit("nomic-embed-text:latest", base_url="http://localhost:11434")
assert limit == 2048
# Should be cached with full name including version
cache_key = ("nomic-embed-text:latest", "http://localhost:11434")
assert cache_key in _token_limit_cache
assert _token_limit_cache[cache_key] == 2048

1163
uv.lock generated
View File

File diff suppressed because it is too large Load Diff