Compare commits
29 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e3defbca84 | ||
|
|
e407f63977 | ||
|
|
7add391b2c | ||
|
|
efd6373b32 | ||
|
|
d502fa24b0 | ||
|
|
258a9a5c7f | ||
|
|
5d41ac6115 | ||
|
|
2a0fdb49b8 | ||
|
|
9d1b7231b6 | ||
|
|
ed3095b478 | ||
|
|
88eca75917 | ||
|
|
42de27e16a | ||
|
|
c083bda5b7 | ||
|
|
e86da38726 | ||
|
|
99076e38bc | ||
|
|
9698c1a02c | ||
|
|
851f0f04c3 | ||
|
|
ae16d9d888 | ||
|
|
6e1af2eb0c | ||
|
|
7695dd0d50 | ||
|
|
c2065473ad | ||
|
|
5f3870564d | ||
|
|
c214b2e33e | ||
|
|
2420c5fd35 | ||
|
|
f48f526f0a | ||
|
|
5dd74982ba | ||
|
|
e07aaf52a7 | ||
|
|
30e5f12616 | ||
|
|
594427bf87 |
256
.github/workflows/build-and-publish.yml
vendored
Normal file
256
.github/workflows/build-and-publish.yml
vendored
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
name: Build and Publish to PyPI
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*'
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
publish:
|
||||||
|
description: 'Publish to PyPI'
|
||||||
|
required: true
|
||||||
|
default: 'false'
|
||||||
|
type: choice
|
||||||
|
options:
|
||||||
|
- 'false'
|
||||||
|
- 'test'
|
||||||
|
- 'prod'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
# Build pure Python package: leann-core
|
||||||
|
build-core:
|
||||||
|
name: Build leann-core
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
uses: astral-sh/setup-uv@v4
|
||||||
|
|
||||||
|
- name: Install build dependencies
|
||||||
|
run: |
|
||||||
|
uv pip install --system build twine
|
||||||
|
|
||||||
|
- name: Build package
|
||||||
|
run: |
|
||||||
|
cd packages/leann-core
|
||||||
|
uv build
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: leann-core-dist
|
||||||
|
path: packages/leann-core/dist/
|
||||||
|
|
||||||
|
# Build binary package: leann-backend-hnsw (default backend)
|
||||||
|
build-hnsw:
|
||||||
|
name: Build leann-backend-hnsw
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest, macos-latest]
|
||||||
|
python-version: ['3.9', '3.10', '3.11', '3.12']
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
uses: astral-sh/setup-uv@v4
|
||||||
|
|
||||||
|
- name: Install system dependencies (Ubuntu)
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y libomp-dev libboost-all-dev libzmq3-dev \
|
||||||
|
pkg-config libopenblas-dev patchelf
|
||||||
|
|
||||||
|
- name: Install system dependencies (macOS)
|
||||||
|
if: runner.os == 'macOS'
|
||||||
|
run: |
|
||||||
|
brew install libomp boost zeromq
|
||||||
|
|
||||||
|
- name: Install build dependencies
|
||||||
|
run: |
|
||||||
|
uv pip install --system scikit-build-core numpy swig
|
||||||
|
uv pip install --system auditwheel delocate
|
||||||
|
|
||||||
|
- name: Build wheel
|
||||||
|
run: |
|
||||||
|
cd packages/leann-backend-hnsw
|
||||||
|
uv build --wheel
|
||||||
|
|
||||||
|
- name: Repair wheel (Linux)
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
run: |
|
||||||
|
cd packages/leann-backend-hnsw
|
||||||
|
auditwheel repair dist/*.whl -w dist_repaired
|
||||||
|
rm -rf dist
|
||||||
|
mv dist_repaired dist
|
||||||
|
|
||||||
|
- name: Repair wheel (macOS)
|
||||||
|
if: runner.os == 'macOS'
|
||||||
|
run: |
|
||||||
|
cd packages/leann-backend-hnsw
|
||||||
|
delocate-wheel -w dist_repaired -v dist/*.whl
|
||||||
|
rm -rf dist
|
||||||
|
mv dist_repaired dist
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: hnsw-${{ matrix.os }}-py${{ matrix.python-version }}
|
||||||
|
path: packages/leann-backend-hnsw/dist/
|
||||||
|
|
||||||
|
# Build binary package: leann-backend-diskann (multi-platform)
|
||||||
|
build-diskann:
|
||||||
|
name: Build leann-backend-diskann
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest, macos-latest]
|
||||||
|
python-version: ['3.9', '3.10', '3.11', '3.12']
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
uses: astral-sh/setup-uv@v4
|
||||||
|
|
||||||
|
- name: Install system dependencies (Ubuntu)
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y libomp-dev libboost-all-dev libaio-dev libzmq3-dev \
|
||||||
|
protobuf-compiler libprotobuf-dev libabsl-dev patchelf
|
||||||
|
|
||||||
|
# Install Intel MKL using Intel's installer
|
||||||
|
wget https://registrationcenter-download.intel.com/akdlm/IRC_NAS/79153e0f-74d7-45af-b8c2-258941adf58a/intel-onemkl-2025.0.0.940.sh
|
||||||
|
sudo sh intel-onemkl-2025.0.0.940.sh -a --components intel.oneapi.lin.mkl.devel --action install --eula accept -s
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
echo "MKLROOT=/opt/intel/oneapi/mkl/latest" >> $GITHUB_ENV
|
||||||
|
echo "LD_LIBRARY_PATH=/opt/intel/oneapi/mkl/latest/lib/intel64:$LD_LIBRARY_PATH" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Install system dependencies (macOS)
|
||||||
|
if: runner.os == 'macOS'
|
||||||
|
run: |
|
||||||
|
brew install libomp boost zeromq protobuf
|
||||||
|
# MKL is not available on Homebrew, but DiskANN can work without it
|
||||||
|
|
||||||
|
- name: Install build dependencies
|
||||||
|
run: |
|
||||||
|
uv pip install --system scikit-build-core numpy Cython pybind11
|
||||||
|
if [[ "$RUNNER_OS" == "Linux" ]]; then
|
||||||
|
uv pip install --system auditwheel
|
||||||
|
else
|
||||||
|
uv pip install --system delocate
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Build wheel
|
||||||
|
run: |
|
||||||
|
cd packages/leann-backend-diskann
|
||||||
|
uv build --wheel
|
||||||
|
|
||||||
|
- name: Repair wheel (Linux)
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
run: |
|
||||||
|
cd packages/leann-backend-diskann
|
||||||
|
auditwheel repair dist/*.whl -w dist_repaired
|
||||||
|
rm -rf dist
|
||||||
|
mv dist_repaired dist
|
||||||
|
|
||||||
|
- name: Repair wheel (macOS)
|
||||||
|
if: runner.os == 'macOS'
|
||||||
|
run: |
|
||||||
|
cd packages/leann-backend-diskann
|
||||||
|
delocate-wheel -w dist_repaired -v dist/*.whl
|
||||||
|
rm -rf dist
|
||||||
|
mv dist_repaired dist
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: diskann-${{ matrix.os }}-py${{ matrix.python-version }}
|
||||||
|
path: packages/leann-backend-diskann/dist/
|
||||||
|
|
||||||
|
# Build meta-package: leann (build last)
|
||||||
|
build-meta:
|
||||||
|
name: Build leann meta-package
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
uses: astral-sh/setup-uv@v4
|
||||||
|
|
||||||
|
- name: Install build dependencies
|
||||||
|
run: |
|
||||||
|
uv pip install --system build
|
||||||
|
|
||||||
|
- name: Build package
|
||||||
|
run: |
|
||||||
|
cd packages/leann
|
||||||
|
uv build
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: leann-meta-dist
|
||||||
|
path: packages/leann/dist/
|
||||||
|
|
||||||
|
# Publish to PyPI
|
||||||
|
publish:
|
||||||
|
name: Publish to PyPI
|
||||||
|
needs: [build-core, build-hnsw, build-diskann, build-meta]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.event_name == 'release' || github.event.inputs.publish != 'false'
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Download all artifacts
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
path: dist
|
||||||
|
|
||||||
|
- name: Flatten directory structure
|
||||||
|
run: |
|
||||||
|
mkdir -p all_wheels
|
||||||
|
find dist -name "*.whl" -exec cp {} all_wheels/ \;
|
||||||
|
find dist -name "*.tar.gz" -exec cp {} all_wheels/ \;
|
||||||
|
|
||||||
|
- name: Publish to Test PyPI
|
||||||
|
if: github.event.inputs.publish == 'test' || github.event_name == 'workflow_dispatch'
|
||||||
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
|
with:
|
||||||
|
password: ${{ secrets.TEST_PYPI_API_TOKEN }}
|
||||||
|
repository-url: https://test.pypi.org/legacy/
|
||||||
|
packages-dir: all_wheels/
|
||||||
|
|
||||||
|
- name: Publish to PyPI
|
||||||
|
if: github.event_name == 'release' || github.event.inputs.publish == 'prod'
|
||||||
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
|
with:
|
||||||
|
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||||
|
packages-dir: all_wheels/
|
||||||
91
README.md
91
README.md
@@ -12,11 +12,11 @@
|
|||||||
The smallest vector index in the world. RAG Everything with LEANN!
|
The smallest vector index in the world. RAG Everything with LEANN!
|
||||||
</h2>
|
</h2>
|
||||||
|
|
||||||
LEANN is a revolutionary vector database that democratizes personal AI. Transform your laptop into a powerful RAG system that can index and search through millions of documents while using **[97% less storage]** than traditional solutions **without accuracy loss**.
|
LEANN is a revolutionary vector database that democratizes personal AI. Transform your laptop into a powerful RAG system that can index and search through millions of documents while using **97% less storage** than traditional solutions **without accuracy loss**.
|
||||||
|
|
||||||
LEANN achieves this through *graph-based selective recomputation* with *high-degree preserving pruning*, computing embeddings on-demand instead of storing them all. [Illustration →](#️-architecture--how-it-works) | [Paper →](https://arxiv.org/abs/2506.08276)
|
LEANN achieves this through *graph-based selective recomputation* with *high-degree preserving pruning*, computing embeddings on-demand instead of storing them all. [Illustration Fig →](#️-architecture--how-it-works) | [Paper →](https://arxiv.org/abs/2506.08276)
|
||||||
|
|
||||||
**Ready to RAG Everything?** Transform your laptop into a personal AI assistant that can search your **[file system](#process-any-documents-pdf-txt-md)**, **[emails](#search-your-entire-life)**, **[browser history](#time-machine-for-the-web)**, **[chat history](#wechat-detective)**, or external knowledge bases (i.e., 60M documents) - all on your laptop, with zero cloud costs and complete privacy.
|
**Ready to RAG Everything?** Transform your laptop into a personal AI assistant that can search your **[file system](#-personal-data-manager-process-any-documents-pdf-txt-md)**, **[emails](#-your-personal-email-secretary-rag-on-apple-mail)**, **[browser history](#-time-machine-for-the-web-rag-your-entire-browser-history)**, **[chat history](#-wechat-detective-unlock-your-golden-memories)**, or external knowledge bases (i.e., 60M documents) - all on your laptop, with zero cloud costs and complete privacy.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -37,7 +37,7 @@ LEANN achieves this through *graph-based selective recomputation* with *high-deg
|
|||||||
|
|
||||||
✨ **No Accuracy Loss:** Maintain the same search quality as heavyweight solutions while using 97% less storage.
|
✨ **No Accuracy Loss:** Maintain the same search quality as heavyweight solutions while using 97% less storage.
|
||||||
|
|
||||||
## Quick Start in 1 minute
|
## Installation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone git@github.com:yichuan-w/LEANN.git leann
|
git clone git@github.com:yichuan-w/LEANN.git leann
|
||||||
@@ -47,36 +47,30 @@ git submodule update --init --recursive
|
|||||||
|
|
||||||
**macOS:**
|
**macOS:**
|
||||||
```bash
|
```bash
|
||||||
brew install llvm libomp boost protobuf zeromq
|
brew install llvm libomp boost protobuf zeromq pkgconf
|
||||||
export CC=$(brew --prefix llvm)/bin/clang
|
|
||||||
export CXX=$(brew --prefix llvm)/bin/clang++
|
|
||||||
|
|
||||||
# Install with HNSW backend (default, recommended for most users)
|
# Install with HNSW backend (default, recommended for most users)
|
||||||
uv sync
|
# Install uv first if you don't have it:
|
||||||
|
# curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
# Or add DiskANN backend if you want to test more options
|
# See: https://docs.astral.sh/uv/getting-started/installation/#installation-methods
|
||||||
uv sync --extra diskann
|
CC=$(brew --prefix llvm)/bin/clang CXX=$(brew --prefix llvm)/bin/clang++ uv sync
|
||||||
```
|
```
|
||||||
|
|
||||||
**Linux (Ubuntu/Debian):**
|
**Linux:**
|
||||||
```bash
|
```bash
|
||||||
sudo apt-get install libomp-dev libboost-all-dev protobuf-compiler libabsl-dev libmkl-full-dev libaio-dev libzmq3-dev
|
sudo apt-get install libomp-dev libboost-all-dev protobuf-compiler libabsl-dev libmkl-full-dev libaio-dev libzmq3-dev
|
||||||
|
|
||||||
# Install with HNSW backend (default, recommended for most users)
|
# Install with HNSW backend (default, recommended for most users)
|
||||||
uv sync
|
uv sync
|
||||||
|
|
||||||
# Or add DiskANN backend if you want to test more options
|
|
||||||
uv sync --extra diskann
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
**Ollama Setup (Recommended for full privacy):**
|
**Ollama Setup (Recommended for full privacy):**
|
||||||
|
|
||||||
> *You can skip this installation if you only want to use OpenAI API for generation.*
|
> *You can skip this installation if you only want to use OpenAI API for generation.*
|
||||||
|
|
||||||
|
|
||||||
*macOS:*
|
**macOS:**
|
||||||
|
|
||||||
First, [download Ollama for macOS](https://ollama.com/download/mac).
|
First, [download Ollama for macOS](https://ollama.com/download/mac).
|
||||||
|
|
||||||
@@ -85,7 +79,7 @@ First, [download Ollama for macOS](https://ollama.com/download/mac).
|
|||||||
ollama pull llama3.2:1b
|
ollama pull llama3.2:1b
|
||||||
```
|
```
|
||||||
|
|
||||||
*Linux:*
|
**Linux:**
|
||||||
```bash
|
```bash
|
||||||
# Install Ollama
|
# Install Ollama
|
||||||
curl -fsSL https://ollama.ai/install.sh | sh
|
curl -fsSL https://ollama.ai/install.sh | sh
|
||||||
@@ -97,9 +91,10 @@ ollama serve &
|
|||||||
ollama pull llama3.2:1b
|
ollama pull llama3.2:1b
|
||||||
```
|
```
|
||||||
|
|
||||||
## Dead Simple API
|
## Quick Start in 30s
|
||||||
|
|
||||||
Just 3 lines of code. Our declarative API makes RAG as easy as writing a config file:
|
Our declarative API makes RAG as easy as writing a config file.
|
||||||
|
[Try in this ipynb file →](demo.ipynb)
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from leann.api import LeannBuilder, LeannSearcher, LeannChat
|
from leann.api import LeannBuilder, LeannSearcher, LeannChat
|
||||||
@@ -130,24 +125,22 @@ response = chat.ask(
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
**That's it.** No cloud setup, no API keys, no "fine-tuning". Just your data, your questions, your laptop.
|
## RAG on Everything!
|
||||||
|
|
||||||
[Try the interactive demo →](demo.ipynb)
|
LEANN supports RAG on various data sources including documents (.pdf, .txt, .md), Apple Mail, Google Search History, WeChat, and more.
|
||||||
|
|
||||||
## Wild Things You Can Do
|
### 📄 Personal Data Manager: Process Any Documents (.pdf, .txt, .md)!
|
||||||
|
|
||||||
LEANN supports RAGing a lot of data sources, like .pdf, .txt, .md, and also supports RAGing your WeChat, Google Search History, and more.
|
Ask questions directly about your personal PDFs, documents, and any directory containing your files!
|
||||||
|
|
||||||
### Process Any Documents (.pdf, .txt, .md)
|
The example below asks a question about summarizing two papers (uses default data in `examples/data`):
|
||||||
|
|
||||||
Above we showed the Python API, while this CLI script demonstrates the same concepts while directly processing PDFs and documents, and even any directory that stores your personal files!
|
|
||||||
|
|
||||||
The following scripts use Ollama `qwen3:8b` by default, so you need `ollama pull qwen3:8b` first. For other models: `--llm openai --model gpt-4o` (requires `OPENAI_API_KEY` environment variable) or `--llm hf --model Qwen/Qwen3-4B`.
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Drop your PDFs, .txt, .md files into examples/data/
|
# Drop your PDFs, .txt, .md files into examples/data/
|
||||||
uv run ./examples/main_cli_example.py
|
uv run ./examples/main_cli_example.py
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
# Or use python directly
|
# Or use python directly
|
||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
python ./examples/main_cli_example.py
|
python ./examples/main_cli_example.py
|
||||||
@@ -155,14 +148,13 @@ python ./examples/main_cli_example.py
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
**Works with any text format** - research papers, personal notes, presentations. Built with LlamaIndex for document parsing.
|
### 📧 Your Personal Email Secretary: RAG on Apple Mail!
|
||||||
|
|
||||||
### Search Your Entire Life
|
**Note:** You need to grant full disk access to your terminal/VS Code in System Preferences → Privacy & Security → Full Disk Access.
|
||||||
```bash
|
```bash
|
||||||
python examples/mail_reader_leann.py
|
python examples/mail_reader_leann.py --query "What's the food I ordered by doordash or Uber eat mostly?"
|
||||||
# "What's the number of class recommend to take per semester for incoming EECS students?"
|
|
||||||
```
|
```
|
||||||
**90K emails → 14MB.** Finally, search your email like you search Google.
|
**780K email chunks → 78MB storage** Finally, search your email like you search Google.
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>📋 Click to expand: Command Examples</strong></summary>
|
<summary><strong>📋 Click to expand: Command Examples</strong></summary>
|
||||||
@@ -195,12 +187,11 @@ Once the index is built, you can ask questions like:
|
|||||||
- "Show me emails about travel expenses"
|
- "Show me emails about travel expenses"
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### Time Machine for the Web
|
### 🔍 Time Machine for the Web: RAG Your Entire Google Browser History!
|
||||||
```bash
|
```bash
|
||||||
python examples/google_history_reader_leann.py
|
python examples/google_history_reader_leann.py --query "Tell me my browser history about machine learning?"
|
||||||
# "Tell me my browser history about machine learning system stuff?"
|
|
||||||
```
|
```
|
||||||
**38K browser entries → 6MB.** Your browser history becomes your personal search engine.
|
**38K browser entries → 6MB storage.** Your browser history becomes your personal search engine.
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>📋 Click to expand: Command Examples</strong></summary>
|
<summary><strong>📋 Click to expand: Command Examples</strong></summary>
|
||||||
@@ -249,13 +240,13 @@ Once the index is built, you can ask questions like:
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### WeChat Detective
|
### 💬 WeChat Detective: Unlock Your Golden Memories!
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python examples/wechat_history_reader_leann.py
|
python examples/wechat_history_reader_leann.py --query "Show me all group chats about weekend plans"
|
||||||
# "Show me all group chats about weekend plans"
|
|
||||||
```
|
```
|
||||||
**400K messages → 64MB.** Search years of chat history in any language.
|
**400K messages → 64MB storage** Search years of chat history in any language.
|
||||||
|
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>🔧 Click to expand: Installation Requirements</strong></summary>
|
<summary><strong>🔧 Click to expand: Installation Requirements</strong></summary>
|
||||||
@@ -266,7 +257,13 @@ First, you need to install the WeChat exporter:
|
|||||||
sudo packages/wechat-exporter/wechattweak-cli install
|
sudo packages/wechat-exporter/wechattweak-cli install
|
||||||
```
|
```
|
||||||
|
|
||||||
**Troubleshooting**: If you encounter installation issues, check the [WeChatTweak-CLI issues page](https://github.com/sunnyyoung/WeChatTweak-CLI/issues/41).
|
**Troubleshooting:**
|
||||||
|
- **Installation issues**: Check the [WeChatTweak-CLI issues page](https://github.com/sunnyyoung/WeChatTweak-CLI/issues/41)
|
||||||
|
- **Export errors**: If you encounter the error below, try restarting WeChat
|
||||||
|
```
|
||||||
|
Failed to export WeChat data. Please ensure WeChat is running and WeChatTweak is installed.
|
||||||
|
Failed to find or export WeChat data. Exiting.
|
||||||
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
@@ -403,11 +400,11 @@ Same dataset, same hardware, same embedding model. LEANN just works better.
|
|||||||
|
|
||||||
### Storage Usage Comparison
|
### Storage Usage Comparison
|
||||||
|
|
||||||
| System | DPR (2.1M chunks) | RPJ-wiki (60M chunks) | Chat history (400K messages) | Apple emails (90K messages chunks) |Google Search History (38K entries)
|
| System | DPR (2.1M chunks) | RPJ-wiki (60M chunks) | Chat history (400K messages) | Apple emails (780K messages chunks) |Google Search History (38K entries)
|
||||||
|-----------------------|------------------|------------------------|-----------------------------|------------------------------|------------------------------|
|
|-----------------------|------------------|------------------------|-----------------------------|------------------------------|------------------------------|
|
||||||
| Traditional Vector DB(FAISS) | 3.8 GB | 201 GB | 1.8G | 305.8 MB |130.4 MB |
|
| Traditional Vector DB(FAISS) | 3.8 GB | 201 GB | 1.8G | 2.4G |130.4 MB |
|
||||||
| **LEANN** | **324 MB** | **6 GB** | **64 MB** | **14.8 MB** |**6.4MB** |
|
| **LEANN** | **324 MB** | **6 GB** | **64 MB** | **79 MB** |**6.4MB** |
|
||||||
| **Reduction** | **91% smaller** | **97% smaller** | **97% smaller** | **95% smaller** |**95% smaller** |
|
| **Reduction** | **91% smaller** | **97% smaller** | **97% smaller** | **97% smaller** |**95% smaller** |
|
||||||
|
|
||||||
<!-- ### Memory Usage Comparison
|
<!-- ### Memory Usage Comparison
|
||||||
|
|
||||||
|
|||||||
275
demo.ipynb
275
demo.ipynb
@@ -1,37 +1,296 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Quick Start in 30s"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from leann.api import LeannBuilder, LeannSearcher, LeannChat\n",
|
"# install this if you areusing colab\n",
|
||||||
|
"! pip install leann"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Build the index"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"INFO: Registering backend 'hnsw'\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"/Users/yichuan/Desktop/code/LEANN/leann/.venv/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
||||||
|
" from .autonotebook import tqdm as notebook_tqdm\n",
|
||||||
|
"INFO:sentence_transformers.SentenceTransformer:Load pretrained SentenceTransformer: facebook/contriever\n",
|
||||||
|
"WARNING:sentence_transformers.SentenceTransformer:No sentence-transformers model found with name facebook/contriever. Creating a new one with mean pooling.\n",
|
||||||
|
"Writing passages: 100%|██████████| 5/5 [00:00<00:00, 31254.13chunk/s]\n",
|
||||||
|
"Batches: 100%|██████████| 1/1 [00:00<00:00, 12.19it/s]\n",
|
||||||
|
"WARNING:leann_backend_hnsw.hnsw_backend:Converting data to float32, shape: (5, 768)\n",
|
||||||
|
"INFO:leann_backend_hnsw.hnsw_backend:INFO: Converting HNSW index to CSR-pruned format...\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"M: 64 for level: 0\n",
|
||||||
|
"Starting conversion: knowledge.index -> knowledge.csr.tmp\n",
|
||||||
|
"[0.00s] Reading Index HNSW header...\n",
|
||||||
|
"[0.00s] Header read: d=768, ntotal=5\n",
|
||||||
|
"[0.00s] Reading HNSW struct vectors...\n",
|
||||||
|
" Reading vector (dtype=<class 'numpy.float64'>, fmt='d')... Count=6, Bytes=48\n",
|
||||||
|
"[0.00s] Read assign_probas (6)\n",
|
||||||
|
" Reading vector (dtype=<class 'numpy.int32'>, fmt='i')... Count=7, Bytes=28\n",
|
||||||
|
"[0.11s] Read cum_nneighbor_per_level (7)\n",
|
||||||
|
" Reading vector (dtype=<class 'numpy.int32'>, fmt='i')... Count=5, Bytes=20\n",
|
||||||
|
"[0.23s] Read levels (5)\n",
|
||||||
|
"[0.34s] Probing for compact storage flag...\n",
|
||||||
|
"[0.34s] Found compact flag: False\n",
|
||||||
|
"[0.34s] Compact flag is False, reading original format...\n",
|
||||||
|
"[0.34s] Probing for potential extra byte before non-compact offsets...\n",
|
||||||
|
"[0.34s] Found and consumed an unexpected 0x00 byte.\n",
|
||||||
|
" Reading vector (dtype=<class 'numpy.uint64'>, fmt='Q')... Count=6, Bytes=48\n",
|
||||||
|
"[0.34s] Read offsets (6)\n",
|
||||||
|
"[0.44s] Attempting to read neighbors vector...\n",
|
||||||
|
" Reading vector (dtype=<class 'numpy.int32'>, fmt='i')... Count=320, Bytes=1280\n",
|
||||||
|
"[0.44s] Read neighbors (320)\n",
|
||||||
|
"[0.54s] Read scalar params (ep=4, max_lvl=0)\n",
|
||||||
|
"[0.54s] Checking for storage data...\n",
|
||||||
|
"[0.54s] Found storage fourcc: 49467849.\n",
|
||||||
|
"[0.54s] Converting to CSR format...\n",
|
||||||
|
"[0.54s] Conversion loop finished. \n",
|
||||||
|
"[0.54s] Running validation checks...\n",
|
||||||
|
" Checking total valid neighbor count...\n",
|
||||||
|
" OK: Total valid neighbors = 20\n",
|
||||||
|
" Checking final pointer indices...\n",
|
||||||
|
" OK: Final pointers match data size.\n",
|
||||||
|
"[0.54s] Deleting original neighbors and offsets arrays...\n",
|
||||||
|
" CSR Stats: |data|=20, |level_ptr|=10\n",
|
||||||
|
"[0.63s] Writing CSR HNSW graph data in FAISS-compatible order...\n",
|
||||||
|
" Pruning embeddings: Writing NULL storage marker.\n",
|
||||||
|
"[0.73s] Conversion complete.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"INFO:leann_backend_hnsw.hnsw_backend:✅ CSR conversion successful.\n",
|
||||||
|
"INFO:leann_backend_hnsw.hnsw_backend:INFO: Replaced original index with CSR-pruned version at 'knowledge.index'\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from leann.api import LeannBuilder\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# 1. Build the index (no embeddings stored!)\n",
|
|
||||||
"builder = LeannBuilder(backend_name=\"hnsw\")\n",
|
"builder = LeannBuilder(backend_name=\"hnsw\")\n",
|
||||||
"builder.add_text(\"C# is a powerful programming language\")\n",
|
"builder.add_text(\"C# is a powerful programming language\")\n",
|
||||||
"builder.add_text(\"Python is a powerful programming language and it is very popular\")\n",
|
"builder.add_text(\"Python is a powerful programming language and it is very popular\")\n",
|
||||||
"builder.add_text(\"Machine learning transforms industries\")\n",
|
"builder.add_text(\"Machine learning transforms industries\")\n",
|
||||||
"builder.add_text(\"Neural networks process complex data\")\n",
|
"builder.add_text(\"Neural networks process complex data\")\n",
|
||||||
"builder.add_text(\"Leann is a great storage saving engine for RAG on your MacBook\")\n",
|
"builder.add_text(\"Leann is a great storage saving engine for RAG on your MacBook\")\n",
|
||||||
"builder.build_index(\"knowledge.leann\")\n",
|
"builder.build_index(\"knowledge.leann\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Search with real-time embeddings"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"INFO:leann.api:🔍 LeannSearcher.search() called:\n",
|
||||||
|
"INFO:leann.api: Query: 'programming languages'\n",
|
||||||
|
"INFO:leann.api: Top_k: 2\n",
|
||||||
|
"INFO:leann.api: Additional kwargs: {}\n",
|
||||||
|
"INFO:leann.embedding_server_manager:Port 5557 has incompatible server, trying next port...\n",
|
||||||
|
"INFO:leann.embedding_server_manager:Port 5558 has incompatible server, trying next port...\n",
|
||||||
|
"INFO:leann.embedding_server_manager:Port 5559 has incompatible server, trying next port...\n",
|
||||||
|
"INFO:leann.embedding_server_manager:Found compatible server on port 5560\n",
|
||||||
|
"INFO:leann.embedding_server_manager:Using existing compatible server on port 5560\n",
|
||||||
|
"INFO:leann.api: Launching server time: 0.05758476257324219 seconds\n",
|
||||||
|
"INFO:leann.embedding_server_manager:Found compatible server on port 5560\n",
|
||||||
|
"INFO:leann.embedding_server_manager:Using existing compatible server on port 5560\n",
|
||||||
|
"INFO:leann.api: Generated embedding shape: (1, 768)\n",
|
||||||
|
"INFO:leann.api: Embedding time: 0.05983591079711914 seconds\n",
|
||||||
|
"INFO:leann.api: Search time: 0.039762258529663086 seconds\n",
|
||||||
|
"INFO:leann.api: Backend returned: labels=2 results\n",
|
||||||
|
"INFO:leann.api: Processing 2 passage IDs:\n",
|
||||||
|
"INFO:leann.api: 1. passage_id='0' -> SUCCESS: C# is a powerful programming language...\n",
|
||||||
|
"INFO:leann.api: 2. passage_id='1' -> SUCCESS: Python is a powerful programming language and it is very popular...\n",
|
||||||
|
"INFO:leann.api: Final enriched results: 2 passages\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"[read_HNSW - CSR NL v4] Reading metadata & CSR indices (manual offset)...\n",
|
||||||
|
"[read_HNSW NL v4] Read levels vector, size: 5\n",
|
||||||
|
"[read_HNSW NL v4] Reading Compact Storage format indices...\n",
|
||||||
|
"[read_HNSW NL v4] Read compact_level_ptr, size: 10\n",
|
||||||
|
"[read_HNSW NL v4] Read compact_node_offsets, size: 6\n",
|
||||||
|
"[read_HNSW NL v4] Read entry_point: 4, max_level: 0\n",
|
||||||
|
"[read_HNSW NL v4] Read storage fourcc: 0x6c6c756e\n",
|
||||||
|
"[read_HNSW NL v4 FIX] Detected FileIOReader. Neighbors size field offset: 326\n",
|
||||||
|
"[read_HNSW NL v4] Reading neighbors data into memory.\n",
|
||||||
|
"[read_HNSW NL v4] Read neighbors data, size: 20\n",
|
||||||
|
"[read_HNSW NL v4] Finished reading metadata and CSR indices.\n",
|
||||||
|
"INFO: Skipping external storage loading, since is_recompute is true.\n",
|
||||||
|
"ZmqDistanceComputer initialized: d=768, metric=0\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[SearchResult(id='0', score=np.float32(0.9646692), text='C# is a powerful programming language', metadata={}),\n",
|
||||||
|
" SearchResult(id='1', score=np.float32(0.91955304), text='Python is a powerful programming language and it is very popular', metadata={})]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from leann.api import LeannSearcher\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# 2. Search with real-time embeddings\n",
|
|
||||||
"searcher = LeannSearcher(\"knowledge.leann\")\n",
|
"searcher = LeannSearcher(\"knowledge.leann\")\n",
|
||||||
"results = searcher.search(\"programming languages\", top_k=2)\n",
|
"results = searcher.search(\"programming languages\", top_k=2)\n",
|
||||||
|
"results"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Chat with LEANN using retrieved results"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"INFO:leann.chat:Attempting to create LLM of type='hf' with model='Qwen/Qwen3-0.6B'\n",
|
||||||
|
"INFO:leann.chat:Initializing HFChat with model='Qwen/Qwen3-0.6B'\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"INFO: Registering backend 'hnsw'\n",
|
||||||
|
"[read_HNSW - CSR NL v4] Reading metadata & CSR indices (manual offset)...\n",
|
||||||
|
"[read_HNSW NL v4] Read levels vector, size: 5\n",
|
||||||
|
"[read_HNSW NL v4] Reading Compact Storage format indices...\n",
|
||||||
|
"[read_HNSW NL v4] Read compact_level_ptr, size: 10\n",
|
||||||
|
"[read_HNSW NL v4] Read compact_node_offsets, size: 6\n",
|
||||||
|
"[read_HNSW NL v4] Read entry_point: 4, max_level: 0\n",
|
||||||
|
"[read_HNSW NL v4] Read storage fourcc: 0x6c6c756e\n",
|
||||||
|
"[read_HNSW NL v4 FIX] Detected FileIOReader. Neighbors size field offset: 326\n",
|
||||||
|
"[read_HNSW NL v4] Reading neighbors data into memory.\n",
|
||||||
|
"[read_HNSW NL v4] Read neighbors data, size: 20\n",
|
||||||
|
"[read_HNSW NL v4] Finished reading metadata and CSR indices.\n",
|
||||||
|
"INFO: Skipping external storage loading, since is_recompute is true.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"/Users/yichuan/Desktop/code/LEANN/leann/.venv/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
||||||
|
" from .autonotebook import tqdm as notebook_tqdm\n",
|
||||||
|
"INFO:leann.chat:MPS is available. Using Apple Silicon GPU.\n",
|
||||||
|
"INFO:leann.api:🔍 LeannSearcher.search() called:\n",
|
||||||
|
"INFO:leann.api: Query: 'Compare the two retrieved programming languages and say which one is more popular today.'\n",
|
||||||
|
"INFO:leann.api: Top_k: 2\n",
|
||||||
|
"INFO:leann.api: Additional kwargs: {}\n",
|
||||||
|
"INFO:leann.embedding_server_manager:Port 5557 has incompatible server, trying next port...\n",
|
||||||
|
"INFO:leann.embedding_server_manager:Port 5558 has incompatible server, trying next port...\n",
|
||||||
|
"INFO:leann.embedding_server_manager:Port 5559 has incompatible server, trying next port...\n",
|
||||||
|
"INFO:leann.embedding_server_manager:Found compatible server on port 5560\n",
|
||||||
|
"INFO:leann.embedding_server_manager:Using existing compatible server on port 5560\n",
|
||||||
|
"INFO:leann.api: Launching server time: 0.11421084403991699 seconds\n",
|
||||||
|
"INFO:leann.embedding_server_manager:Found compatible server on port 5560\n",
|
||||||
|
"INFO:leann.embedding_server_manager:Using existing compatible server on port 5560\n",
|
||||||
|
"INFO:leann.api: Generated embedding shape: (1, 768)\n",
|
||||||
|
"INFO:leann.api: Embedding time: 0.1147918701171875 seconds\n",
|
||||||
|
"INFO:leann.api: Search time: 0.05468583106994629 seconds\n",
|
||||||
|
"INFO:leann.api: Backend returned: labels=2 results\n",
|
||||||
|
"INFO:leann.api: Processing 2 passage IDs:\n",
|
||||||
|
"INFO:leann.api: 1. passage_id='1' -> SUCCESS: Python is a powerful programming language and it is very popular...\n",
|
||||||
|
"INFO:leann.api: 2. passage_id='0' -> SUCCESS: C# is a powerful programming language...\n",
|
||||||
|
"INFO:leann.api: Final enriched results: 2 passages\n",
|
||||||
|
"INFO:leann.chat:Generating with HuggingFace model, config: {'max_new_tokens': 512, 'temperature': 0.7, 'top_p': 0.9, 'do_sample': True, 'pad_token_id': 151645, 'eos_token_id': 151645}\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"ZmqDistanceComputer initialized: d=768, metric=0\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'<think>\\n\\n</think>\\n\\nBased on the context provided, both Python and C# are mentioned as powerful programming languages, but no specific information is given about their popularity today. However, generally, Python is more popular for data science, web development, and other tasks, while C# is widely used in enterprise applications and game development. Since the context does not explicitly state which is more popular, but Python is often considered more popular in many cases, the best answer would be:\\n\\n**Python is more popular today.**'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 1,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from leann.api import LeannChat\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# 3. Chat with LEANN using retrieved results\n",
|
|
||||||
"llm_config = {\n",
|
"llm_config = {\n",
|
||||||
" \"type\": \"ollama\",\n",
|
" \"type\": \"hf\",\n",
|
||||||
" \"model\": \"llama3.2:1b\"\n",
|
" \"model\": \"Qwen/Qwen3-0.6B\"\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"chat = LeannChat(index_path=\"knowledge.leann\", llm_config=llm_config)\n",
|
"chat = LeannChat(index_path=\"knowledge.leann\", llm_config=llm_config)\n",
|
||||||
"response = chat.ask(\n",
|
"response = chat.ask(\n",
|
||||||
" \"Compare the two retrieved programming languages and say which one is more popular today.\",\n",
|
" \"Compare the two retrieved programming languages and say which one is more popular today.\",\n",
|
||||||
" top_k=2,\n",
|
" top_k=2,\n",
|
||||||
")"
|
")\n",
|
||||||
|
"response"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
54
docs/RELEASE.md
Normal file
54
docs/RELEASE.md
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
# Release Guide
|
||||||
|
|
||||||
|
## One-line Release 🚀
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/release.sh 0.1.1
|
||||||
|
```
|
||||||
|
|
||||||
|
That's it! This script will:
|
||||||
|
1. Update all package versions
|
||||||
|
2. Commit and push changes
|
||||||
|
3. Create GitHub release
|
||||||
|
4. CI automatically builds and publishes to PyPI
|
||||||
|
|
||||||
|
## Manual Testing Before Release
|
||||||
|
|
||||||
|
For testing specific packages locally (especially DiskANN on macOS):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build specific package locally
|
||||||
|
./scripts/build_and_test.sh diskann # or hnsw, core, meta, all
|
||||||
|
|
||||||
|
# Test installation in a clean environment
|
||||||
|
python -m venv test_env
|
||||||
|
source test_env/bin/activate
|
||||||
|
pip install packages/*/dist/*.whl
|
||||||
|
|
||||||
|
# Upload to Test PyPI (optional)
|
||||||
|
./scripts/upload_to_pypi.sh test
|
||||||
|
|
||||||
|
# Upload to Production PyPI (use with caution)
|
||||||
|
./scripts/upload_to_pypi.sh prod
|
||||||
|
```
|
||||||
|
|
||||||
|
### Why Manual Build for DiskANN?
|
||||||
|
|
||||||
|
DiskANN's complex dependencies (protobuf, abseil, etc.) sometimes require local testing before release. The build script will:
|
||||||
|
- Compile the C++ extension
|
||||||
|
- Use `delocate` (macOS) or `auditwheel` (Linux) to bundle system libraries
|
||||||
|
- Create a self-contained wheel with no external dependencies
|
||||||
|
|
||||||
|
## First-time setup
|
||||||
|
|
||||||
|
1. Install GitHub CLI:
|
||||||
|
```bash
|
||||||
|
brew install gh
|
||||||
|
gh auth login
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Set PyPI token in GitHub:
|
||||||
|
```bash
|
||||||
|
gh secret set PYPI_API_TOKEN
|
||||||
|
# Paste your PyPI token when prompted
|
||||||
|
```
|
||||||
@@ -22,7 +22,7 @@ def get_mail_path():
|
|||||||
return os.path.join(home_dir, "Library", "Mail")
|
return os.path.join(home_dir, "Library", "Mail")
|
||||||
|
|
||||||
# Default mail path for macOS
|
# Default mail path for macOS
|
||||||
# DEFAULT_MAIL_PATH = "/Users/yichuan/Library/Mail/V10/0FCA0879-FD8C-4B7E-83BF-FDDA930791C5/[Gmail].mbox/All Mail.mbox/78BA5BE1-8819-4F9A-9613-EB63772F1DD0/Data"
|
DEFAULT_MAIL_PATH = "/Users/yichuan/Library/Mail/V10/0FCA0879-FD8C-4B7E-83BF-FDDA930791C5/[Gmail].mbox/All Mail.mbox/78BA5BE1-8819-4F9A-9613-EB63772F1DD0/Data"
|
||||||
|
|
||||||
def create_leann_index_from_multiple_sources(messages_dirs: List[Path], index_path: str = "mail_index.leann", max_count: int = -1, include_html: bool = False, embedding_model: str = "facebook/contriever"):
|
def create_leann_index_from_multiple_sources(messages_dirs: List[Path], index_path: str = "mail_index.leann", max_count: int = -1, include_html: bool = False, embedding_model: str = "facebook/contriever"):
|
||||||
"""
|
"""
|
||||||
@@ -77,7 +77,7 @@ def create_leann_index_from_multiple_sources(messages_dirs: List[Path], index_pa
|
|||||||
print(f"\nTotal loaded {len(all_documents)} email documents from {len(messages_dirs)} directories and starting to split them into chunks")
|
print(f"\nTotal loaded {len(all_documents)} email documents from {len(messages_dirs)} directories and starting to split them into chunks")
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
# Create text splitter with 256 chunk size
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=128)
|
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=25)
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
# Convert Documents to text strings and chunk them
|
||||||
all_texts = []
|
all_texts = []
|
||||||
@@ -158,7 +158,7 @@ def create_leann_index(mail_path: str, index_path: str = "mail_index.leann", max
|
|||||||
print(f"Loaded {len(documents)} email documents")
|
print(f"Loaded {len(documents)} email documents")
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
# Create text splitter with 256 chunk size
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=25)
|
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=128)
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
# Convert Documents to text strings and chunk them
|
||||||
all_texts = []
|
all_texts = []
|
||||||
@@ -218,9 +218,9 @@ async def query_leann_index(index_path: str, query: str):
|
|||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
chat_response = chat.ask(
|
chat_response = chat.ask(
|
||||||
query,
|
query,
|
||||||
top_k=10,
|
top_k=20,
|
||||||
recompute_beighbor_embeddings=True,
|
recompute_beighbor_embeddings=True,
|
||||||
complexity=12,
|
complexity=32,
|
||||||
beam_width=1,
|
beam_width=1,
|
||||||
|
|
||||||
)
|
)
|
||||||
@@ -233,7 +233,7 @@ async def main():
|
|||||||
parser = argparse.ArgumentParser(description='LEANN Mail Reader - Create and query email index')
|
parser = argparse.ArgumentParser(description='LEANN Mail Reader - Create and query email index')
|
||||||
# Remove --mail-path argument and auto-detect all Messages directories
|
# Remove --mail-path argument and auto-detect all Messages directories
|
||||||
# Remove DEFAULT_MAIL_PATH
|
# Remove DEFAULT_MAIL_PATH
|
||||||
parser.add_argument('--index-dir', type=str, default="./mail_index_leann_debug",
|
parser.add_argument('--index-dir', type=str, default="./mail_index_index_file",
|
||||||
help='Directory to store the LEANN index (default: ./mail_index_leann_raw_text_all_dicts)')
|
help='Directory to store the LEANN index (default: ./mail_index_leann_raw_text_all_dicts)')
|
||||||
parser.add_argument('--max-emails', type=int, default=1000,
|
parser.add_argument('--max-emails', type=int, default=1000,
|
||||||
help='Maximum number of emails to process (-1 means all)')
|
help='Maximum number of emails to process (-1 means all)')
|
||||||
@@ -253,6 +253,9 @@ async def main():
|
|||||||
mail_path = get_mail_path()
|
mail_path = get_mail_path()
|
||||||
print(f"Searching for email data in: {mail_path}")
|
print(f"Searching for email data in: {mail_path}")
|
||||||
messages_dirs = find_all_messages_directories(mail_path)
|
messages_dirs = find_all_messages_directories(mail_path)
|
||||||
|
# messages_dirs = find_all_messages_directories(DEFAULT_MAIL_PATH)
|
||||||
|
# messages_dirs = [DEFAULT_MAIL_PATH]
|
||||||
|
# messages_dirs = messages_dirs[:1]
|
||||||
|
|
||||||
print('len(messages_dirs): ', len(messages_dirs))
|
print('len(messages_dirs): ', len(messages_dirs))
|
||||||
|
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ def create_leann_index_from_multiple_wechat_exports(
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
# Create text splitter with 256 chunk size
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=128)
|
text_splitter = SentenceSplitter(chunk_size=192, chunk_overlap=64)
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
# Convert Documents to text strings and chunk them
|
||||||
all_texts = []
|
all_texts = []
|
||||||
|
|||||||
Submodule packages/leann-backend-diskann/third_party/DiskANN updated: af2a26481e...25339b0341
@@ -8,7 +8,12 @@ build-backend = "scikit_build_core.build"
|
|||||||
name = "leann-backend-hnsw"
|
name = "leann-backend-hnsw"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
description = "Custom-built HNSW (Faiss) backend for the Leann toolkit."
|
description = "Custom-built HNSW (Faiss) backend for the Leann toolkit."
|
||||||
dependencies = ["leann-core==0.1.0", "numpy"]
|
dependencies = [
|
||||||
|
"leann-core==0.1.0",
|
||||||
|
"numpy",
|
||||||
|
"pyzmq>=23.0.0",
|
||||||
|
"msgpack>=1.0.0",
|
||||||
|
]
|
||||||
|
|
||||||
[tool.scikit-build]
|
[tool.scikit-build]
|
||||||
wheel.packages = ["leann_backend_hnsw"]
|
wheel.packages = ["leann_backend_hnsw"]
|
||||||
|
|||||||
@@ -5,14 +5,22 @@ build-backend = "setuptools.build_meta"
|
|||||||
[project]
|
[project]
|
||||||
name = "leann-core"
|
name = "leann-core"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
description = "Core API and plugin system for Leann."
|
description = "Core API and plugin system for LEANN"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
license = { text = "MIT" }
|
license = { text = "MIT" }
|
||||||
|
|
||||||
|
# All required dependencies included
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"numpy>=1.20.0",
|
"numpy>=1.20.0",
|
||||||
"tqdm>=4.60.0"
|
"tqdm>=4.60.0",
|
||||||
|
"psutil>=5.8.0",
|
||||||
|
"pyzmq>=23.0.0",
|
||||||
|
"msgpack>=1.0.0",
|
||||||
|
"torch>=2.0.0",
|
||||||
|
"sentence-transformers>=2.2.0",
|
||||||
|
"llama-index-core>=0.12.0",
|
||||||
|
"python-dotenv>=1.0.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
|
|||||||
@@ -142,7 +142,7 @@ class LeannBuilder:
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
backend_name: str,
|
backend_name: str,
|
||||||
embedding_model: str = "facebook/contriever-msmarco",
|
embedding_model: str = "facebook/contriever",
|
||||||
dimensions: Optional[int] = None,
|
dimensions: Optional[int] = None,
|
||||||
embedding_mode: str = "sentence-transformers",
|
embedding_mode: str = "sentence-transformers",
|
||||||
**backend_kwargs,
|
**backend_kwargs,
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ from typing import Dict, Any, Optional, List
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import difflib
|
import difflib
|
||||||
|
import torch
|
||||||
|
|
||||||
# Configure logging
|
# Configure logging
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
@@ -28,6 +29,68 @@ def check_ollama_models() -> List[str]:
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def check_ollama_model_exists_remotely(model_name: str) -> tuple[bool, list[str]]:
|
||||||
|
"""Check if a model exists in Ollama's remote library and return available tags
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(model_exists, available_tags): bool and list of matching tags
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
import re
|
||||||
|
|
||||||
|
# Split model name and tag
|
||||||
|
if ':' in model_name:
|
||||||
|
base_model, requested_tag = model_name.split(':', 1)
|
||||||
|
else:
|
||||||
|
base_model, requested_tag = model_name, None
|
||||||
|
|
||||||
|
# First check if base model exists in library
|
||||||
|
library_response = requests.get("https://ollama.com/library", timeout=8)
|
||||||
|
if library_response.status_code != 200:
|
||||||
|
return True, [] # Assume exists if can't check
|
||||||
|
|
||||||
|
# Extract model names from library page
|
||||||
|
models_in_library = re.findall(r'href="/library/([^"]+)"', library_response.text)
|
||||||
|
|
||||||
|
if base_model not in models_in_library:
|
||||||
|
return False, [] # Base model doesn't exist
|
||||||
|
|
||||||
|
# If base model exists, get available tags
|
||||||
|
tags_response = requests.get(f"https://ollama.com/library/{base_model}/tags", timeout=8)
|
||||||
|
if tags_response.status_code != 200:
|
||||||
|
return True, [] # Base model exists but can't get tags
|
||||||
|
|
||||||
|
# Extract tags for this model - be more specific to avoid HTML artifacts
|
||||||
|
tag_pattern = rf'{re.escape(base_model)}:[a-zA-Z0-9\.\-_]+'
|
||||||
|
raw_tags = re.findall(tag_pattern, tags_response.text)
|
||||||
|
|
||||||
|
# Clean up tags - remove HTML artifacts and duplicates
|
||||||
|
available_tags = []
|
||||||
|
seen = set()
|
||||||
|
for tag in raw_tags:
|
||||||
|
# Skip if it looks like HTML (contains < or >)
|
||||||
|
if '<' in tag or '>' in tag:
|
||||||
|
continue
|
||||||
|
if tag not in seen:
|
||||||
|
seen.add(tag)
|
||||||
|
available_tags.append(tag)
|
||||||
|
|
||||||
|
# Check if exact model exists
|
||||||
|
if requested_tag is None:
|
||||||
|
# User just requested base model, suggest tags
|
||||||
|
return True, available_tags[:10] # Return up to 10 tags
|
||||||
|
else:
|
||||||
|
exact_match = model_name in available_tags
|
||||||
|
return exact_match, available_tags[:10]
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# If scraping fails, assume model might exist (don't block user)
|
||||||
|
return True, []
|
||||||
|
|
||||||
|
|
||||||
def search_ollama_models_fuzzy(query: str, available_models: List[str]) -> List[str]:
|
def search_ollama_models_fuzzy(query: str, available_models: List[str]) -> List[str]:
|
||||||
"""Use intelligent fuzzy search for Ollama models"""
|
"""Use intelligent fuzzy search for Ollama models"""
|
||||||
if not available_models:
|
if not available_models:
|
||||||
@@ -243,24 +306,66 @@ def validate_model_and_suggest(model_name: str, llm_type: str) -> Optional[str]:
|
|||||||
if llm_type == "ollama":
|
if llm_type == "ollama":
|
||||||
available_models = check_ollama_models()
|
available_models = check_ollama_models()
|
||||||
if available_models and model_name not in available_models:
|
if available_models and model_name not in available_models:
|
||||||
# Use intelligent fuzzy search based on locally installed models
|
|
||||||
suggestions = search_ollama_models_fuzzy(model_name, available_models)
|
|
||||||
|
|
||||||
error_msg = f"Model '{model_name}' not found in your local Ollama installation."
|
error_msg = f"Model '{model_name}' not found in your local Ollama installation."
|
||||||
if suggestions:
|
|
||||||
error_msg += "\n\nDid you mean one of these installed models?\n"
|
|
||||||
for i, suggestion in enumerate(suggestions, 1):
|
|
||||||
error_msg += f" {i}. {suggestion}\n"
|
|
||||||
else:
|
|
||||||
error_msg += "\n\nYour installed models:\n"
|
|
||||||
for i, model in enumerate(available_models[:8], 1):
|
|
||||||
error_msg += f" {i}. {model}\n"
|
|
||||||
if len(available_models) > 8:
|
|
||||||
error_msg += f" ... and {len(available_models) - 8} more\n"
|
|
||||||
|
|
||||||
error_msg += "\nTo list all models: ollama list"
|
# Check if the model exists remotely and get available tags
|
||||||
error_msg += "\nTo download a new model: ollama pull <model_name>"
|
model_exists_remotely, available_tags = check_ollama_model_exists_remotely(model_name)
|
||||||
error_msg += "\nBrowse models: https://ollama.com/library"
|
|
||||||
|
if model_exists_remotely and model_name in available_tags:
|
||||||
|
# Exact model exists remotely - suggest pulling it
|
||||||
|
error_msg += f"\n\nTo install the requested model:\n"
|
||||||
|
error_msg += f" ollama pull {model_name}\n"
|
||||||
|
|
||||||
|
# Show local alternatives
|
||||||
|
suggestions = search_ollama_models_fuzzy(model_name, available_models)
|
||||||
|
if suggestions:
|
||||||
|
error_msg += "\nOr use one of these similar installed models:\n"
|
||||||
|
for i, suggestion in enumerate(suggestions, 1):
|
||||||
|
error_msg += f" {i}. {suggestion}\n"
|
||||||
|
|
||||||
|
elif model_exists_remotely and available_tags:
|
||||||
|
# Base model exists but requested tag doesn't - suggest correct tags
|
||||||
|
base_model = model_name.split(':')[0]
|
||||||
|
requested_tag = model_name.split(':', 1)[1] if ':' in model_name else None
|
||||||
|
|
||||||
|
error_msg += f"\n\nModel '{base_model}' exists, but tag '{requested_tag}' is not available."
|
||||||
|
error_msg += f"\n\nAvailable {base_model} models you can install:\n"
|
||||||
|
for i, tag in enumerate(available_tags[:8], 1):
|
||||||
|
error_msg += f" {i}. ollama pull {tag}\n"
|
||||||
|
if len(available_tags) > 8:
|
||||||
|
error_msg += f" ... and {len(available_tags) - 8} more variants\n"
|
||||||
|
|
||||||
|
# Also show local alternatives
|
||||||
|
suggestions = search_ollama_models_fuzzy(model_name, available_models)
|
||||||
|
if suggestions:
|
||||||
|
error_msg += "\nOr use one of these similar installed models:\n"
|
||||||
|
for i, suggestion in enumerate(suggestions, 1):
|
||||||
|
error_msg += f" {i}. {suggestion}\n"
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Model doesn't exist remotely - show fuzzy suggestions
|
||||||
|
suggestions = search_ollama_models_fuzzy(model_name, available_models)
|
||||||
|
error_msg += f"\n\nModel '{model_name}' was not found in Ollama's library."
|
||||||
|
|
||||||
|
if suggestions:
|
||||||
|
error_msg += "\n\nDid you mean one of these installed models?\n"
|
||||||
|
for i, suggestion in enumerate(suggestions, 1):
|
||||||
|
error_msg += f" {i}. {suggestion}\n"
|
||||||
|
else:
|
||||||
|
error_msg += "\n\nYour installed models:\n"
|
||||||
|
for i, model in enumerate(available_models[:8], 1):
|
||||||
|
error_msg += f" {i}. {model}\n"
|
||||||
|
if len(available_models) > 8:
|
||||||
|
error_msg += f" ... and {len(available_models) - 8} more\n"
|
||||||
|
|
||||||
|
error_msg += "\n\nCommands:"
|
||||||
|
error_msg += "\n ollama list # List installed models"
|
||||||
|
if model_exists_remotely and available_tags:
|
||||||
|
if model_name in available_tags:
|
||||||
|
error_msg += f"\n ollama pull {model_name} # Install requested model"
|
||||||
|
else:
|
||||||
|
error_msg += f"\n ollama pull {available_tags[0]} # Install recommended variant"
|
||||||
|
error_msg += "\n https://ollama.com/library # Browse available models"
|
||||||
return error_msg
|
return error_msg
|
||||||
|
|
||||||
elif llm_type == "hf":
|
elif llm_type == "hf":
|
||||||
@@ -397,7 +502,7 @@ class OllamaChat(LLMInterface):
|
|||||||
|
|
||||||
|
|
||||||
class HFChat(LLMInterface):
|
class HFChat(LLMInterface):
|
||||||
"""LLM interface for local Hugging Face Transformers models."""
|
"""LLM interface for local Hugging Face Transformers models with proper chat templates."""
|
||||||
|
|
||||||
def __init__(self, model_name: str = "deepseek-ai/deepseek-llm-7b-chat"):
|
def __init__(self, model_name: str = "deepseek-ai/deepseek-llm-7b-chat"):
|
||||||
logger.info(f"Initializing HFChat with model='{model_name}'")
|
logger.info(f"Initializing HFChat with model='{model_name}'")
|
||||||
@@ -408,7 +513,7 @@ class HFChat(LLMInterface):
|
|||||||
raise ValueError(model_error)
|
raise ValueError(model_error)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from transformers.pipelines import pipeline
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
import torch
|
import torch
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise ImportError(
|
raise ImportError(
|
||||||
@@ -417,54 +522,100 @@ class HFChat(LLMInterface):
|
|||||||
|
|
||||||
# Auto-detect device
|
# Auto-detect device
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
device = "cuda"
|
self.device = "cuda"
|
||||||
logger.info("CUDA is available. Using GPU.")
|
logger.info("CUDA is available. Using GPU.")
|
||||||
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
||||||
device = "mps"
|
self.device = "mps"
|
||||||
logger.info("MPS is available. Using Apple Silicon GPU.")
|
logger.info("MPS is available. Using Apple Silicon GPU.")
|
||||||
else:
|
else:
|
||||||
device = "cpu"
|
self.device = "cpu"
|
||||||
logger.info("No GPU detected. Using CPU.")
|
logger.info("No GPU detected. Using CPU.")
|
||||||
|
|
||||||
self.pipeline = pipeline("text-generation", model=model_name, device=device)
|
# Load tokenizer and model
|
||||||
|
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||||
|
self.model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
model_name,
|
||||||
|
torch_dtype=torch.float16 if self.device != "cpu" else torch.float32,
|
||||||
|
device_map="auto" if self.device != "cpu" else None,
|
||||||
|
trust_remote_code=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Move model to device if not using device_map
|
||||||
|
if self.device != "cpu" and "device_map" not in str(self.model):
|
||||||
|
self.model = self.model.to(self.device)
|
||||||
|
|
||||||
|
# Set pad token if not present
|
||||||
|
if self.tokenizer.pad_token is None:
|
||||||
|
self.tokenizer.pad_token = self.tokenizer.eos_token
|
||||||
|
|
||||||
def ask(self, prompt: str, **kwargs) -> str:
|
def ask(self, prompt: str, **kwargs) -> str:
|
||||||
# Map OpenAI-style arguments to Hugging Face equivalents
|
# Check if this is a Qwen model and add /no_think by default
|
||||||
if "max_tokens" in kwargs:
|
is_qwen_model = "qwen" in self.model.config._name_or_path.lower()
|
||||||
# Prefer user-provided max_new_tokens if both are present
|
|
||||||
kwargs.setdefault("max_new_tokens", kwargs["max_tokens"])
|
# For Qwen models, automatically add /no_think to the prompt
|
||||||
# Remove the unsupported key to avoid errors in Transformers
|
if is_qwen_model and "/no_think" not in prompt and "/think" not in prompt:
|
||||||
kwargs.pop("max_tokens")
|
prompt = prompt + " /no_think"
|
||||||
|
|
||||||
|
# Prepare chat template
|
||||||
|
messages = [{"role": "user", "content": prompt}]
|
||||||
|
|
||||||
|
# Apply chat template if available
|
||||||
|
if hasattr(self.tokenizer, "apply_chat_template"):
|
||||||
|
try:
|
||||||
|
formatted_prompt = self.tokenizer.apply_chat_template(
|
||||||
|
messages,
|
||||||
|
tokenize=False,
|
||||||
|
add_generation_prompt=True
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Chat template failed, using raw prompt: {e}")
|
||||||
|
formatted_prompt = prompt
|
||||||
|
else:
|
||||||
|
# Fallback for models without chat template
|
||||||
|
formatted_prompt = prompt
|
||||||
|
|
||||||
# Handle temperature=0 edge-case for greedy decoding
|
# Tokenize input
|
||||||
if "temperature" in kwargs and kwargs["temperature"] == 0.0:
|
inputs = self.tokenizer(
|
||||||
# Remove unsupported zero temperature and use deterministic generation
|
formatted_prompt,
|
||||||
kwargs.pop("temperature")
|
return_tensors="pt",
|
||||||
kwargs.setdefault("do_sample", False)
|
padding=True,
|
||||||
|
truncation=True,
|
||||||
|
max_length=2048
|
||||||
|
)
|
||||||
|
|
||||||
|
# Move inputs to device
|
||||||
|
if self.device != "cpu":
|
||||||
|
inputs = {k: v.to(self.device) for k, v in inputs.items()}
|
||||||
|
|
||||||
# Sensible defaults for text generation
|
# Set generation parameters
|
||||||
params = {"max_length": 500, "num_return_sequences": 1, **kwargs}
|
generation_config = {
|
||||||
logger.info(f"Generating text with Hugging Face model with params: {params}")
|
"max_new_tokens": kwargs.get("max_tokens", kwargs.get("max_new_tokens", 512)),
|
||||||
results = self.pipeline(prompt, **params)
|
"temperature": kwargs.get("temperature", 0.7),
|
||||||
|
"top_p": kwargs.get("top_p", 0.9),
|
||||||
|
"do_sample": kwargs.get("temperature", 0.7) > 0,
|
||||||
|
"pad_token_id": self.tokenizer.eos_token_id,
|
||||||
|
"eos_token_id": self.tokenizer.eos_token_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Handle temperature=0 for greedy decoding
|
||||||
|
if generation_config["temperature"] == 0.0:
|
||||||
|
generation_config["do_sample"] = False
|
||||||
|
generation_config.pop("temperature")
|
||||||
|
|
||||||
# Handle different response formats from transformers
|
logger.info(f"Generating with HuggingFace model, config: {generation_config}")
|
||||||
if isinstance(results, list) and len(results) > 0:
|
|
||||||
generated_text = (
|
# Generate
|
||||||
results[0].get("generated_text", "")
|
with torch.no_grad():
|
||||||
if isinstance(results[0], dict)
|
outputs = self.model.generate(
|
||||||
else str(results[0])
|
**inputs,
|
||||||
|
**generation_config
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
generated_text = str(results)
|
|
||||||
|
|
||||||
# Extract only the newly generated portion by removing the original prompt
|
# Decode response
|
||||||
if isinstance(generated_text, str) and generated_text.startswith(prompt):
|
generated_tokens = outputs[0][inputs["input_ids"].shape[1]:]
|
||||||
response = generated_text[len(prompt) :].strip()
|
response = self.tokenizer.decode(generated_tokens, skip_special_tokens=True)
|
||||||
else:
|
|
||||||
# Fallback: return the full response if prompt removal fails
|
return response.strip()
|
||||||
response = str(generated_text)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
class OpenAIChat(LLMInterface):
|
class OpenAIChat(LLMInterface):
|
||||||
|
|||||||
@@ -101,7 +101,7 @@ def compute_embeddings_sentence_transformers(
|
|||||||
if device == "mps":
|
if device == "mps":
|
||||||
batch_size = 128 # MPS optimal batch size from benchmark
|
batch_size = 128 # MPS optimal batch size from benchmark
|
||||||
if model_name == "Qwen/Qwen3-Embedding-0.6B":
|
if model_name == "Qwen/Qwen3-Embedding-0.6B":
|
||||||
batch_size = 64
|
batch_size = 32
|
||||||
elif device == "cuda":
|
elif device == "cuda":
|
||||||
batch_size = 256 # CUDA optimal batch size
|
batch_size = 256 # CUDA optimal batch size
|
||||||
# Keep original batch_size for CPU
|
# Keep original batch_size for CPU
|
||||||
|
|||||||
40
packages/leann/README.md
Normal file
40
packages/leann/README.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# LEANN - The smallest vector index in the world
|
||||||
|
|
||||||
|
LEANN is a revolutionary vector database that democratizes personal AI. Transform your laptop into a powerful RAG system that can index and search through millions of documents while using **97% less storage** than traditional solutions **without accuracy loss**.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Default installation (HNSW backend, recommended)
|
||||||
|
uv pip install leann
|
||||||
|
|
||||||
|
# With DiskANN backend (for large-scale deployments)
|
||||||
|
uv pip install leann[diskann]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```python
|
||||||
|
from leann import LeannBuilder, LeannSearcher, LeannChat
|
||||||
|
|
||||||
|
# Build an index
|
||||||
|
builder = LeannBuilder(backend_name="hnsw")
|
||||||
|
builder.add_text("LEANN saves 97% storage compared to traditional vector databases.")
|
||||||
|
builder.build_index("my_index.leann")
|
||||||
|
|
||||||
|
# Search
|
||||||
|
searcher = LeannSearcher("my_index.leann")
|
||||||
|
results = searcher.search("storage savings", top_k=3)
|
||||||
|
|
||||||
|
# Chat with your data
|
||||||
|
chat = LeannChat("my_index.leann", llm_config={"type": "ollama", "model": "llama3.2:1b"})
|
||||||
|
response = chat.ask("How much storage does LEANN save?")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
For full documentation, visit [https://leann.readthedocs.io](https://leann.readthedocs.io)
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT License
|
||||||
12
packages/leann/__init__.py
Normal file
12
packages/leann/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
"""
|
||||||
|
LEANN - Low-storage Embedding Approximation for Neural Networks
|
||||||
|
|
||||||
|
A revolutionary vector database that democratizes personal AI.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__version__ = "0.1.0"
|
||||||
|
|
||||||
|
# Re-export main API from leann-core
|
||||||
|
from leann_core import LeannBuilder, LeannSearcher, LeannChat
|
||||||
|
|
||||||
|
__all__ = ["LeannBuilder", "LeannSearcher", "LeannChat"]
|
||||||
42
packages/leann/pyproject.toml
Normal file
42
packages/leann/pyproject.toml
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "leann"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "LEANN - The smallest vector index in the world. RAG Everything with LEANN!"
|
||||||
|
readme = "README.md"
|
||||||
|
requires-python = ">=3.9"
|
||||||
|
license = { text = "MIT" }
|
||||||
|
authors = [
|
||||||
|
{ name = "LEANN Team" }
|
||||||
|
]
|
||||||
|
keywords = ["vector-database", "rag", "embeddings", "search", "ai"]
|
||||||
|
classifiers = [
|
||||||
|
"Development Status :: 4 - Beta",
|
||||||
|
"Intended Audience :: Developers",
|
||||||
|
"License :: OSI Approved :: MIT License",
|
||||||
|
"Programming Language :: Python :: 3",
|
||||||
|
"Programming Language :: Python :: 3.9",
|
||||||
|
"Programming Language :: Python :: 3.10",
|
||||||
|
"Programming Language :: Python :: 3.11",
|
||||||
|
"Programming Language :: Python :: 3.12",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Default installation: core + hnsw
|
||||||
|
dependencies = [
|
||||||
|
"leann-core>=0.1.0",
|
||||||
|
"leann-backend-hnsw>=0.1.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
diskann = [
|
||||||
|
"leann-backend-diskann>=0.1.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.urls]
|
||||||
|
Homepage = "https://github.com/yourusername/leann"
|
||||||
|
Documentation = "https://leann.readthedocs.io"
|
||||||
|
Repository = "https://github.com/yourusername/leann"
|
||||||
|
Issues = "https://github.com/yourusername/leann/issues"
|
||||||
@@ -33,8 +33,8 @@ dependencies = [
|
|||||||
"msgpack>=1.1.1",
|
"msgpack>=1.1.1",
|
||||||
"llama-index-vector-stores-faiss>=0.4.0",
|
"llama-index-vector-stores-faiss>=0.4.0",
|
||||||
"llama-index-embeddings-huggingface>=0.5.5",
|
"llama-index-embeddings-huggingface>=0.5.5",
|
||||||
"mlx>=0.26.3",
|
"mlx>=0.26.3; sys_platform == 'darwin'",
|
||||||
"mlx-lm>=0.26.0",
|
"mlx-lm>=0.26.0; sys_platform == 'darwin'",
|
||||||
"psutil>=5.8.0",
|
"psutil>=5.8.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user