Compare commits
207 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c8f173c0e5 | ||
|
|
7ea34bd7d0 | ||
|
|
13bb561aad | ||
|
|
348423eca9 | ||
|
|
bc621677f6 | ||
|
|
9d5cdd93b4 | ||
|
|
0174ba5571 | ||
|
|
03af82d695 | ||
|
|
738f1dbab8 | ||
|
|
37d990d51c | ||
|
|
a6f07a54f1 | ||
|
|
46905e0687 | ||
|
|
838ade231e | ||
|
|
da6540decd | ||
|
|
39e18a7c11 | ||
|
|
6bde28584b | ||
|
|
f62632c41f | ||
|
|
27708243ca | ||
|
|
9a1e4652ca | ||
|
|
14e84d9e2d | ||
|
|
2dcfca19ff | ||
|
|
bee2167ee3 | ||
|
|
ef980d70b3 | ||
|
|
db3c63c441 | ||
|
|
00eeadb9dd | ||
|
|
42c8370709 | ||
|
|
fafdf8fcbe | ||
|
|
21f7d8e031 | ||
|
|
46565b9249 | ||
|
|
3dad76126a | ||
|
|
18e28bda32 | ||
|
|
609fa62fd5 | ||
|
|
eab13434ef | ||
|
|
b2390ccc14 | ||
|
|
e8fca2c84a | ||
|
|
790ae14f69 | ||
|
|
ac363072e6 | ||
|
|
93465af46c | ||
|
|
792ece67dc | ||
|
|
239e35e2e6 | ||
|
|
2fac0c6fbf | ||
|
|
9801aa581b | ||
|
|
5e97916608 | ||
|
|
8b9c2be8c9 | ||
|
|
3ff5aac8e0 | ||
|
|
67fef60466 | ||
|
|
b6ab6f1993 | ||
|
|
9f2e82a838 | ||
|
|
0b2b799d5a | ||
|
|
0f790fbbd9 | ||
|
|
387ae21eba | ||
|
|
3cc329c3e7 | ||
|
|
5567302316 | ||
|
|
075d4bd167 | ||
|
|
e4bcc76f88 | ||
|
|
710e83b1fd | ||
|
|
c96d653072 | ||
|
|
8b22d2b5d3 | ||
|
|
4cb544ee38 | ||
|
|
f94ce63d51 | ||
|
|
4271ff9d84 | ||
|
|
0d448c4a41 | ||
|
|
af5599e33c | ||
|
|
efdf6d917a | ||
|
|
dd71ac8d71 | ||
|
|
8bee1d4100 | ||
|
|
33521d6d00 | ||
|
|
8899734952 | ||
|
|
54df6310c5 | ||
|
|
19bcc07814 | ||
|
|
8356e3c668 | ||
|
|
08eac5c821 | ||
|
|
4671ed9b36 | ||
|
|
055c086398 | ||
|
|
d505dcc5e3 | ||
|
|
261006c36a | ||
|
|
b2eba23e21 | ||
|
|
e9ee687472 | ||
|
|
6f5d5e4a77 | ||
|
|
5c8921673a | ||
|
|
e9d2d420bd | ||
|
|
ebabfad066 | ||
|
|
e6f612b5e8 | ||
|
|
51c41acd82 | ||
|
|
455f93fb7c | ||
|
|
48207c3b69 | ||
|
|
4de1caa40f | ||
|
|
60eaa8165c | ||
|
|
c1a5d0c624 | ||
|
|
af1790395a | ||
|
|
383c6d8d7e | ||
|
|
bc0d839693 | ||
|
|
8596562de5 | ||
|
|
5d09586853 | ||
|
|
a7cba078dd | ||
|
|
b3e9ee96fa | ||
|
|
8537a6b17e | ||
|
|
7c8d7dc5c2 | ||
|
|
8e23d663e6 | ||
|
|
8a3994bf80 | ||
|
|
8375f601ba | ||
|
|
c87c0fe662 | ||
|
|
73927b68ef | ||
|
|
cc1a62e5aa | ||
|
|
802020cb41 | ||
|
|
cdb92f7cf4 | ||
|
|
dc69bdec00 | ||
|
|
98073e9868 | ||
|
|
cf2ef48967 | ||
|
|
0692bbf7a2 | ||
|
|
52584a171f | ||
|
|
efd6b5324b | ||
|
|
2baaa4549b | ||
|
|
35310ddd52 | ||
|
|
fc9c5cb39d | ||
|
|
8f2a1e87ea | ||
|
|
50caf65f28 | ||
|
|
1b48794ca8 | ||
|
|
4aef1d814e | ||
|
|
75ddcd6158 | ||
|
|
2a4df11f5c | ||
|
|
5eb893c62b | ||
|
|
d91ce2e94d | ||
|
|
5c2ff8a641 | ||
|
|
d4f474c9b7 | ||
|
|
170f7644e9 | ||
|
|
cd8b970eff | ||
|
|
52153bbb69 | ||
|
|
e1ae087207 | ||
|
|
48c5e12ac1 | ||
|
|
f8b5c97190 | ||
|
|
d038c81b8b | ||
|
|
29cbbbd0d6 | ||
|
|
179f30bc36 | ||
|
|
c4a0a68581 | ||
|
|
5c836ad08e | ||
|
|
673fd9b7cd | ||
|
|
84b24b233d | ||
|
|
499cdd7822 | ||
|
|
800d4cf111 | ||
|
|
b6d43f5fd9 | ||
|
|
3603cd5034 | ||
|
|
6df7893173 | ||
|
|
e64b599276 | ||
|
|
2dd59c4ba1 | ||
|
|
166986d5e6 | ||
|
|
a6aec68f32 | ||
|
|
ed27a127d5 | ||
|
|
d8b4ea7564 | ||
|
|
f0a2ef96b4 | ||
|
|
7d73c2c803 | ||
|
|
e8d2ecab03 | ||
|
|
32a374d094 | ||
|
|
d45c013806 | ||
|
|
9000a7083d | ||
|
|
8307555d54 | ||
|
|
20f2aece08 | ||
|
|
43eb4f9a1d | ||
|
|
5461b71d8c | ||
|
|
374db0ebb8 | ||
|
|
cea1f6f87c | ||
|
|
6c0e39372b | ||
|
|
2bec67d2b6 | ||
|
|
133e715832 | ||
|
|
95cf2f16e2 | ||
|
|
47a4c153eb | ||
|
|
faf5ae3533 | ||
|
|
a44dccecac | ||
|
|
9cf9358b9c | ||
|
|
de252fef31 | ||
|
|
9076bc27b8 | ||
|
|
50686c0819 | ||
|
|
1614203786 | ||
|
|
3d4c75a56c | ||
|
|
2684ee71dc | ||
|
|
1d321953ba | ||
|
|
b3cb251369 | ||
|
|
0a17d2c9d8 | ||
|
|
e3defbca84 | ||
|
|
e407f63977 | ||
|
|
7add391b2c | ||
|
|
efd6373b32 | ||
|
|
d502fa24b0 | ||
|
|
258a9a5c7f | ||
|
|
5d41ac6115 | ||
|
|
2a0fdb49b8 | ||
|
|
9d1b7231b6 | ||
|
|
ed3095b478 | ||
|
|
88eca75917 | ||
|
|
42de27e16a | ||
|
|
c083bda5b7 | ||
|
|
e86da38726 | ||
|
|
99076e38bc | ||
|
|
9698c1a02c | ||
|
|
851f0f04c3 | ||
|
|
ae16d9d888 | ||
|
|
6e1af2eb0c | ||
|
|
7695dd0d50 | ||
|
|
c2065473ad | ||
|
|
5f3870564d | ||
|
|
c214b2e33e | ||
|
|
2420c5fd35 | ||
|
|
f48f526f0a | ||
|
|
5dd74982ba | ||
|
|
e07aaf52a7 | ||
|
|
30e5f12616 | ||
|
|
594427bf87 |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +0,0 @@
|
|||||||
paper_plot/data/big_graph_degree_data.npz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
12
.github/workflows/build-and-publish.yml
vendored
Normal file
12
.github/workflows/build-and-publish.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
name: CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ main ]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
uses: ./.github/workflows/build-reusable.yml
|
||||||
358
.github/workflows/build-reusable.yml
vendored
Normal file
358
.github/workflows/build-reusable.yml
vendored
Normal file
@@ -0,0 +1,358 @@
|
|||||||
|
name: Reusable Build
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
ref:
|
||||||
|
description: 'Git ref to build'
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: ''
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint:
|
||||||
|
name: Lint and Format Check
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ inputs.ref }}
|
||||||
|
|
||||||
|
- name: Setup Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
uses: astral-sh/setup-uv@v4
|
||||||
|
|
||||||
|
- name: Install ruff
|
||||||
|
run: |
|
||||||
|
uv tool install ruff
|
||||||
|
|
||||||
|
- name: Run ruff check
|
||||||
|
run: |
|
||||||
|
ruff check .
|
||||||
|
|
||||||
|
- name: Run ruff format check
|
||||||
|
run: |
|
||||||
|
ruff format --check .
|
||||||
|
|
||||||
|
build:
|
||||||
|
needs: lint
|
||||||
|
name: Build ${{ matrix.os }} Python ${{ matrix.python }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- os: ubuntu-22.04
|
||||||
|
python: '3.9'
|
||||||
|
- os: ubuntu-22.04
|
||||||
|
python: '3.10'
|
||||||
|
- os: ubuntu-22.04
|
||||||
|
python: '3.11'
|
||||||
|
- os: ubuntu-22.04
|
||||||
|
python: '3.12'
|
||||||
|
- os: ubuntu-22.04
|
||||||
|
python: '3.13'
|
||||||
|
- os: macos-14
|
||||||
|
python: '3.9'
|
||||||
|
- os: macos-14
|
||||||
|
python: '3.10'
|
||||||
|
- os: macos-14
|
||||||
|
python: '3.11'
|
||||||
|
- os: macos-14
|
||||||
|
python: '3.12'
|
||||||
|
- os: macos-14
|
||||||
|
python: '3.13'
|
||||||
|
- os: macos-15
|
||||||
|
python: '3.9'
|
||||||
|
- os: macos-15
|
||||||
|
python: '3.10'
|
||||||
|
- os: macos-15
|
||||||
|
python: '3.11'
|
||||||
|
- os: macos-15
|
||||||
|
python: '3.12'
|
||||||
|
- os: macos-15
|
||||||
|
python: '3.13'
|
||||||
|
- os: macos-13
|
||||||
|
python: '3.9'
|
||||||
|
- os: macos-13
|
||||||
|
python: '3.10'
|
||||||
|
- os: macos-13
|
||||||
|
python: '3.11'
|
||||||
|
- os: macos-13
|
||||||
|
python: '3.12'
|
||||||
|
# Note: macos-13 + Python 3.13 excluded due to PyTorch compatibility
|
||||||
|
# (PyTorch 2.5+ supports Python 3.13 but not Intel Mac x86_64)
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v5
|
||||||
|
with:
|
||||||
|
ref: ${{ inputs.ref }}
|
||||||
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Setup Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python }}
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
uses: astral-sh/setup-uv@v6
|
||||||
|
|
||||||
|
- name: Install system dependencies (Ubuntu)
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y libomp-dev libboost-all-dev protobuf-compiler libzmq3-dev \
|
||||||
|
pkg-config libabsl-dev libaio-dev libprotobuf-dev \
|
||||||
|
patchelf
|
||||||
|
|
||||||
|
# Install Intel MKL for DiskANN
|
||||||
|
wget -q https://registrationcenter-download.intel.com/akdlm/IRC_NAS/79153e0f-74d7-45af-b8c2-258941adf58a/intel-onemkl-2025.0.0.940.sh
|
||||||
|
sudo sh intel-onemkl-2025.0.0.940.sh -a --components intel.oneapi.lin.mkl.devel --action install --eula accept -s
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
echo "MKLROOT=/opt/intel/oneapi/mkl/latest" >> $GITHUB_ENV
|
||||||
|
echo "LD_LIBRARY_PATH=/opt/intel/oneapi/compiler/latest/linux/compiler/lib/intel64_lin" >> $GITHUB_ENV
|
||||||
|
echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/oneapi/mkl/latest/lib/intel64" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Install system dependencies (macOS)
|
||||||
|
if: runner.os == 'macOS'
|
||||||
|
run: |
|
||||||
|
# Don't install LLVM, use system clang for better compatibility
|
||||||
|
brew install libomp boost protobuf zeromq
|
||||||
|
|
||||||
|
- name: Install build dependencies
|
||||||
|
run: |
|
||||||
|
uv pip install --system scikit-build-core numpy swig Cython pybind11
|
||||||
|
if [[ "$RUNNER_OS" == "Linux" ]]; then
|
||||||
|
uv pip install --system auditwheel
|
||||||
|
else
|
||||||
|
uv pip install --system delocate
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Set macOS environment variables
|
||||||
|
if: runner.os == 'macOS'
|
||||||
|
run: |
|
||||||
|
# Use brew --prefix to automatically detect Homebrew installation path
|
||||||
|
HOMEBREW_PREFIX=$(brew --prefix)
|
||||||
|
echo "HOMEBREW_PREFIX=${HOMEBREW_PREFIX}" >> $GITHUB_ENV
|
||||||
|
echo "OpenMP_ROOT=${HOMEBREW_PREFIX}/opt/libomp" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# Set CMAKE_PREFIX_PATH to let CMake find all packages automatically
|
||||||
|
echo "CMAKE_PREFIX_PATH=${HOMEBREW_PREFIX}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# Set compiler flags for OpenMP (required for both backends)
|
||||||
|
echo "LDFLAGS=-L${HOMEBREW_PREFIX}/opt/libomp/lib" >> $GITHUB_ENV
|
||||||
|
echo "CPPFLAGS=-I${HOMEBREW_PREFIX}/opt/libomp/include" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Build packages
|
||||||
|
run: |
|
||||||
|
# Build core (platform independent)
|
||||||
|
cd packages/leann-core
|
||||||
|
uv build
|
||||||
|
cd ../..
|
||||||
|
|
||||||
|
# Build HNSW backend
|
||||||
|
cd packages/leann-backend-hnsw
|
||||||
|
if [[ "${{ matrix.os }}" == macos-* ]]; then
|
||||||
|
# Use system clang for better compatibility
|
||||||
|
export CC=clang
|
||||||
|
export CXX=clang++
|
||||||
|
# Homebrew libraries on each macOS version require matching minimum version
|
||||||
|
if [[ "${{ matrix.os }}" == "macos-13" ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=13.0
|
||||||
|
elif [[ "${{ matrix.os }}" == "macos-14" ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=14.0
|
||||||
|
elif [[ "${{ matrix.os }}" == "macos-15" ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=15.0
|
||||||
|
fi
|
||||||
|
uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist
|
||||||
|
else
|
||||||
|
uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist
|
||||||
|
fi
|
||||||
|
cd ../..
|
||||||
|
|
||||||
|
# Build DiskANN backend
|
||||||
|
cd packages/leann-backend-diskann
|
||||||
|
if [[ "${{ matrix.os }}" == macos-* ]]; then
|
||||||
|
# Use system clang for better compatibility
|
||||||
|
export CC=clang
|
||||||
|
export CXX=clang++
|
||||||
|
# DiskANN requires macOS 13.3+ for sgesdd_ LAPACK function
|
||||||
|
# But Homebrew libraries on each macOS version require matching minimum version
|
||||||
|
if [[ "${{ matrix.os }}" == "macos-13" ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=13.3
|
||||||
|
elif [[ "${{ matrix.os }}" == "macos-14" ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=14.0
|
||||||
|
elif [[ "${{ matrix.os }}" == "macos-15" ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=15.0
|
||||||
|
fi
|
||||||
|
uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist
|
||||||
|
else
|
||||||
|
uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist
|
||||||
|
fi
|
||||||
|
cd ../..
|
||||||
|
|
||||||
|
# Build meta package (platform independent)
|
||||||
|
cd packages/leann
|
||||||
|
uv build
|
||||||
|
cd ../..
|
||||||
|
|
||||||
|
- name: Repair wheels (Linux)
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
run: |
|
||||||
|
# Repair HNSW wheel
|
||||||
|
cd packages/leann-backend-hnsw
|
||||||
|
if [ -d dist ]; then
|
||||||
|
auditwheel repair dist/*.whl -w dist_repaired
|
||||||
|
rm -rf dist
|
||||||
|
mv dist_repaired dist
|
||||||
|
fi
|
||||||
|
cd ../..
|
||||||
|
|
||||||
|
# Repair DiskANN wheel
|
||||||
|
cd packages/leann-backend-diskann
|
||||||
|
if [ -d dist ]; then
|
||||||
|
auditwheel repair dist/*.whl -w dist_repaired
|
||||||
|
rm -rf dist
|
||||||
|
mv dist_repaired dist
|
||||||
|
fi
|
||||||
|
cd ../..
|
||||||
|
|
||||||
|
- name: Repair wheels (macOS)
|
||||||
|
if: runner.os == 'macOS'
|
||||||
|
run: |
|
||||||
|
# Determine deployment target based on runner OS
|
||||||
|
# Must match the Homebrew libraries for each macOS version
|
||||||
|
if [[ "${{ matrix.os }}" == "macos-13" ]]; then
|
||||||
|
HNSW_TARGET="13.0"
|
||||||
|
DISKANN_TARGET="13.3"
|
||||||
|
elif [[ "${{ matrix.os }}" == "macos-14" ]]; then
|
||||||
|
HNSW_TARGET="14.0"
|
||||||
|
DISKANN_TARGET="14.0"
|
||||||
|
elif [[ "${{ matrix.os }}" == "macos-15" ]]; then
|
||||||
|
HNSW_TARGET="15.0"
|
||||||
|
DISKANN_TARGET="15.0"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Repair HNSW wheel
|
||||||
|
cd packages/leann-backend-hnsw
|
||||||
|
if [ -d dist ]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=$HNSW_TARGET
|
||||||
|
delocate-wheel -w dist_repaired -v --require-target-macos-version $HNSW_TARGET dist/*.whl
|
||||||
|
rm -rf dist
|
||||||
|
mv dist_repaired dist
|
||||||
|
fi
|
||||||
|
cd ../..
|
||||||
|
|
||||||
|
# Repair DiskANN wheel
|
||||||
|
cd packages/leann-backend-diskann
|
||||||
|
if [ -d dist ]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=$DISKANN_TARGET
|
||||||
|
delocate-wheel -w dist_repaired -v --require-target-macos-version $DISKANN_TARGET dist/*.whl
|
||||||
|
rm -rf dist
|
||||||
|
mv dist_repaired dist
|
||||||
|
fi
|
||||||
|
cd ../..
|
||||||
|
|
||||||
|
- name: List built packages
|
||||||
|
run: |
|
||||||
|
echo "📦 Built packages:"
|
||||||
|
find packages/*/dist -name "*.whl" -o -name "*.tar.gz" | sort
|
||||||
|
|
||||||
|
|
||||||
|
- name: Install built packages for testing
|
||||||
|
run: |
|
||||||
|
# Create a virtual environment with the correct Python version
|
||||||
|
uv venv --python ${{ matrix.python }}
|
||||||
|
source .venv/bin/activate || source .venv/Scripts/activate
|
||||||
|
|
||||||
|
# Install packages using --find-links to prioritize local builds
|
||||||
|
uv pip install --find-links packages/leann-core/dist --find-links packages/leann-backend-hnsw/dist --find-links packages/leann-backend-diskann/dist packages/leann-core/dist/*.whl || uv pip install --find-links packages/leann-core/dist packages/leann-core/dist/*.tar.gz
|
||||||
|
uv pip install --find-links packages/leann-core/dist packages/leann-backend-hnsw/dist/*.whl
|
||||||
|
uv pip install --find-links packages/leann-core/dist packages/leann-backend-diskann/dist/*.whl
|
||||||
|
uv pip install packages/leann/dist/*.whl || uv pip install packages/leann/dist/*.tar.gz
|
||||||
|
|
||||||
|
# Install test dependencies using extras
|
||||||
|
uv pip install -e ".[test]"
|
||||||
|
|
||||||
|
- name: Run tests with pytest
|
||||||
|
env:
|
||||||
|
CI: true
|
||||||
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
HF_HUB_DISABLE_SYMLINKS: 1
|
||||||
|
TOKENIZERS_PARALLELISM: false
|
||||||
|
PYTORCH_ENABLE_MPS_FALLBACK: 0
|
||||||
|
OMP_NUM_THREADS: 1
|
||||||
|
MKL_NUM_THREADS: 1
|
||||||
|
run: |
|
||||||
|
source .venv/bin/activate || source .venv/Scripts/activate
|
||||||
|
pytest tests/ -v --tb=short
|
||||||
|
|
||||||
|
- name: Run sanity checks (optional)
|
||||||
|
run: |
|
||||||
|
# Activate virtual environment
|
||||||
|
source .venv/bin/activate || source .venv/Scripts/activate
|
||||||
|
|
||||||
|
# Run distance function tests if available
|
||||||
|
if [ -f test/sanity_checks/test_distance_functions.py ]; then
|
||||||
|
echo "Running distance function sanity checks..."
|
||||||
|
python test/sanity_checks/test_distance_functions.py || echo "⚠️ Distance function test failed, continuing..."
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: packages-${{ matrix.os }}-py${{ matrix.python }}
|
||||||
|
path: packages/*/dist/
|
||||||
|
|
||||||
|
|
||||||
|
arch-smoke:
|
||||||
|
name: Arch Linux smoke test (install & import)
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: archlinux:latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Prepare system
|
||||||
|
run: |
|
||||||
|
pacman -Syu --noconfirm
|
||||||
|
pacman -S --noconfirm python python-pip gcc git zlib openssl
|
||||||
|
|
||||||
|
- name: Download ALL wheel artifacts from this run
|
||||||
|
uses: actions/download-artifact@v5
|
||||||
|
with:
|
||||||
|
# Don't specify name, download all artifacts
|
||||||
|
path: ./wheels
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
uses: astral-sh/setup-uv@v6
|
||||||
|
|
||||||
|
- name: Create virtual environment and install wheels
|
||||||
|
run: |
|
||||||
|
uv venv
|
||||||
|
source .venv/bin/activate || source .venv/Scripts/activate
|
||||||
|
uv pip install --find-links wheels leann-core
|
||||||
|
uv pip install --find-links wheels leann-backend-hnsw
|
||||||
|
uv pip install --find-links wheels leann-backend-diskann
|
||||||
|
uv pip install --find-links wheels leann
|
||||||
|
|
||||||
|
- name: Import & tiny runtime check
|
||||||
|
env:
|
||||||
|
OMP_NUM_THREADS: 1
|
||||||
|
MKL_NUM_THREADS: 1
|
||||||
|
run: |
|
||||||
|
source .venv/bin/activate || source .venv/Scripts/activate
|
||||||
|
python - <<'PY'
|
||||||
|
import leann
|
||||||
|
import leann_backend_hnsw as h
|
||||||
|
import leann_backend_diskann as d
|
||||||
|
from leann import LeannBuilder, LeannSearcher
|
||||||
|
b = LeannBuilder(backend_name="hnsw")
|
||||||
|
b.add_text("hello arch")
|
||||||
|
b.build_index("arch_demo.leann")
|
||||||
|
s = LeannSearcher("arch_demo.leann")
|
||||||
|
print("search:", s.search("hello", top_k=1))
|
||||||
|
PY
|
||||||
19
.github/workflows/link-check.yml
vendored
Normal file
19
.github/workflows/link-check.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
name: Link Check
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main, master ]
|
||||||
|
pull_request:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 3 * * 1"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
link-check:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: lycheeverse/lychee-action@v2
|
||||||
|
with:
|
||||||
|
args: --no-progress --insecure --user-agent 'curl/7.68.0' README.md docs/ apps/ examples/ benchmarks/
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
129
.github/workflows/release-manual.yml
vendored
Normal file
129
.github/workflows/release-manual.yml
vendored
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
name: Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
version:
|
||||||
|
description: 'Version to release (e.g., 0.1.2)'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
update-version:
|
||||||
|
name: Update Version
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
outputs:
|
||||||
|
commit-sha: ${{ steps.push.outputs.commit-sha }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Validate version
|
||||||
|
run: |
|
||||||
|
# Remove 'v' prefix if present for validation
|
||||||
|
VERSION_CLEAN="${{ inputs.version }}"
|
||||||
|
VERSION_CLEAN="${VERSION_CLEAN#v}"
|
||||||
|
if ! [[ "$VERSION_CLEAN" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||||
|
echo "❌ Invalid version format. Expected format: X.Y.Z or vX.Y.Z"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "✅ Version format valid: ${{ inputs.version }}"
|
||||||
|
|
||||||
|
- name: Update versions and push
|
||||||
|
id: push
|
||||||
|
run: |
|
||||||
|
# Check current version
|
||||||
|
CURRENT_VERSION=$(grep "^version" packages/leann-core/pyproject.toml | cut -d'"' -f2)
|
||||||
|
echo "Current version: $CURRENT_VERSION"
|
||||||
|
echo "Target version: ${{ inputs.version }}"
|
||||||
|
|
||||||
|
if [ "$CURRENT_VERSION" = "${{ inputs.version }}" ]; then
|
||||||
|
echo "⚠️ Version is already ${{ inputs.version }}, skipping update"
|
||||||
|
COMMIT_SHA=$(git rev-parse HEAD)
|
||||||
|
else
|
||||||
|
./scripts/bump_version.sh ${{ inputs.version }}
|
||||||
|
git config user.name "GitHub Actions"
|
||||||
|
git config user.email "actions@github.com"
|
||||||
|
git add packages/*/pyproject.toml
|
||||||
|
git commit -m "chore: release v${{ inputs.version }}"
|
||||||
|
git push origin main
|
||||||
|
COMMIT_SHA=$(git rev-parse HEAD)
|
||||||
|
echo "✅ Pushed version update: $COMMIT_SHA"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "commit-sha=$COMMIT_SHA" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
build-packages:
|
||||||
|
name: Build packages
|
||||||
|
needs: update-version
|
||||||
|
uses: ./.github/workflows/build-reusable.yml
|
||||||
|
with:
|
||||||
|
ref: 'main'
|
||||||
|
|
||||||
|
publish:
|
||||||
|
name: Publish and Release
|
||||||
|
needs: [update-version, build-packages]
|
||||||
|
if: always() && needs.update-version.result == 'success' && needs.build-packages.result == 'success'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: 'main'
|
||||||
|
|
||||||
|
- name: Download all artifacts
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
path: dist-artifacts
|
||||||
|
|
||||||
|
- name: Collect packages
|
||||||
|
run: |
|
||||||
|
mkdir -p dist
|
||||||
|
find dist-artifacts -name "*.whl" -exec cp {} dist/ \;
|
||||||
|
find dist-artifacts -name "*.tar.gz" -exec cp {} dist/ \;
|
||||||
|
|
||||||
|
echo "📦 Packages to publish:"
|
||||||
|
ls -la dist/
|
||||||
|
|
||||||
|
- name: Publish to PyPI
|
||||||
|
env:
|
||||||
|
TWINE_USERNAME: __token__
|
||||||
|
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
|
||||||
|
run: |
|
||||||
|
if [ -z "$TWINE_PASSWORD" ]; then
|
||||||
|
echo "❌ PYPI_API_TOKEN not configured!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
pip install twine
|
||||||
|
twine upload dist/* --skip-existing --verbose
|
||||||
|
|
||||||
|
echo "✅ Published to PyPI!"
|
||||||
|
|
||||||
|
- name: Create release
|
||||||
|
run: |
|
||||||
|
# Check if tag already exists
|
||||||
|
if git rev-parse "v${{ inputs.version }}" >/dev/null 2>&1; then
|
||||||
|
echo "⚠️ Tag v${{ inputs.version }} already exists, skipping tag creation"
|
||||||
|
else
|
||||||
|
git tag "v${{ inputs.version }}"
|
||||||
|
git push origin "v${{ inputs.version }}"
|
||||||
|
echo "✅ Created and pushed tag v${{ inputs.version }}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if release already exists
|
||||||
|
if gh release view "v${{ inputs.version }}" >/dev/null 2>&1; then
|
||||||
|
echo "⚠️ Release v${{ inputs.version }} already exists, skipping release creation"
|
||||||
|
else
|
||||||
|
gh release create "v${{ inputs.version }}" \
|
||||||
|
--title "Release v${{ inputs.version }}" \
|
||||||
|
--notes "🚀 Released to PyPI: https://pypi.org/project/leann/${{ inputs.version }}/" \
|
||||||
|
--latest
|
||||||
|
echo "✅ Created GitHub release v${{ inputs.version }}"
|
||||||
|
fi
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
20
.gitignore
vendored
20
.gitignore
vendored
@@ -12,13 +12,13 @@ outputs/
|
|||||||
*.idx
|
*.idx
|
||||||
*.map
|
*.map
|
||||||
.history/
|
.history/
|
||||||
scripts/
|
|
||||||
lm_eval.egg-info/
|
lm_eval.egg-info/
|
||||||
demo/experiment_results/**/*.json
|
demo/experiment_results/**/*.json
|
||||||
*.jsonl
|
*.jsonl
|
||||||
*.eml
|
*.eml
|
||||||
*.emlx
|
*.emlx
|
||||||
*.json
|
*.json
|
||||||
|
!.vscode/*.json
|
||||||
*.sh
|
*.sh
|
||||||
*.txt
|
*.txt
|
||||||
!CMakeLists.txt
|
!CMakeLists.txt
|
||||||
@@ -35,11 +35,15 @@ build/
|
|||||||
nprobe_logs/
|
nprobe_logs/
|
||||||
micro/results
|
micro/results
|
||||||
micro/contriever-INT8
|
micro/contriever-INT8
|
||||||
examples/data/*
|
data/*
|
||||||
!examples/data/2501.14312v1 (1).pdf
|
!data/2501.14312v1 (1).pdf
|
||||||
!examples/data/2506.08276v1.pdf
|
!data/2506.08276v1.pdf
|
||||||
!examples/data/PrideandPrejudice.txt
|
!data/PrideandPrejudice.txt
|
||||||
!examples/data/README.md
|
!data/huawei_pangu.md
|
||||||
|
!data/ground_truth/
|
||||||
|
!data/indices/
|
||||||
|
!data/queries/
|
||||||
|
!data/.gitattributes
|
||||||
*.qdstrm
|
*.qdstrm
|
||||||
benchmark_results/
|
benchmark_results/
|
||||||
results/
|
results/
|
||||||
@@ -87,3 +91,7 @@ packages/leann-backend-diskann/third_party/DiskANN/_deps/
|
|||||||
*.passages.json
|
*.passages.json
|
||||||
|
|
||||||
batchtest.py
|
batchtest.py
|
||||||
|
tests/__pytest_cache__/
|
||||||
|
tests/__pycache__/
|
||||||
|
|
||||||
|
benchmarks/data/
|
||||||
|
|||||||
16
.pre-commit-config.yaml
Normal file
16
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v5.0.0
|
||||||
|
hooks:
|
||||||
|
- id: trailing-whitespace
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: check-yaml
|
||||||
|
- id: check-added-large-files
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: debug-statements
|
||||||
|
|
||||||
|
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||||
|
rev: v0.12.7 # Fixed version to match pyproject.toml
|
||||||
|
hooks:
|
||||||
|
- id: ruff
|
||||||
|
- id: ruff-format
|
||||||
5
.vscode/extensions.json
vendored
Normal file
5
.vscode/extensions.json
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
"recommendations": [
|
||||||
|
"charliermarsh.ruff",
|
||||||
|
]
|
||||||
|
}
|
||||||
22
.vscode/settings.json
vendored
Normal file
22
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
{
|
||||||
|
"python.defaultInterpreterPath": ".venv/bin/python",
|
||||||
|
"python.terminal.activateEnvironment": true,
|
||||||
|
"[python]": {
|
||||||
|
"editor.defaultFormatter": "charliermarsh.ruff",
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.codeActionsOnSave": {
|
||||||
|
"source.organizeImports": "explicit",
|
||||||
|
"source.fixAll": "explicit"
|
||||||
|
},
|
||||||
|
"editor.insertSpaces": true,
|
||||||
|
"editor.tabSize": 4
|
||||||
|
},
|
||||||
|
"ruff.enable": true,
|
||||||
|
"files.watcherExclude": {
|
||||||
|
"**/.venv/**": true,
|
||||||
|
"**/__pycache__/**": true,
|
||||||
|
"**/*.egg-info/**": true,
|
||||||
|
"**/build/**": true,
|
||||||
|
"**/dist/**": true
|
||||||
|
}
|
||||||
|
}
|
||||||
698
README.md
698
README.md
@@ -3,20 +3,25 @@
|
|||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="https://img.shields.io/badge/Python-3.9%2B-blue.svg" alt="Python 3.9+">
|
<img src="https://img.shields.io/badge/Python-3.9%20%7C%203.10%20%7C%203.11%20%7C%203.12%20%7C%203.13-blue.svg" alt="Python Versions">
|
||||||
|
<img src="https://github.com/yichuan-w/LEANN/actions/workflows/build-and-publish.yml/badge.svg" alt="CI Status">
|
||||||
|
<img src="https://img.shields.io/badge/Platform-Ubuntu%20%26%20Arch%20%26%20WSL%20%7C%20macOS%20(ARM64%2FIntel)-lightgrey" alt="Platform">
|
||||||
<img src="https://img.shields.io/badge/License-MIT-green.svg" alt="MIT License">
|
<img src="https://img.shields.io/badge/License-MIT-green.svg" alt="MIT License">
|
||||||
<img src="https://img.shields.io/badge/Platform-Linux%20%7C%20macOS-lightgrey" alt="Platform">
|
<img src="https://img.shields.io/badge/MCP-Native%20Integration-blue" alt="MCP Integration">
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<h2 align="center" tabindex="-1" class="heading-element" dir="auto">
|
<h2 align="center" tabindex="-1" class="heading-element" dir="auto">
|
||||||
The smallest vector index in the world. RAG Everything with LEANN!
|
The smallest vector index in the world. RAG Everything with LEANN!
|
||||||
</h2>
|
</h2>
|
||||||
|
|
||||||
LEANN is a revolutionary vector database that democratizes personal AI. Transform your laptop into a powerful RAG system that can index and search through millions of documents while using **[97% less storage]** than traditional solutions **without accuracy loss**.
|
LEANN is an innovative vector database that democratizes personal AI. Transform your laptop into a powerful RAG system that can index and search through millions of documents while using **97% less storage** than traditional solutions **without accuracy loss**.
|
||||||
|
|
||||||
LEANN achieves this through *graph-based selective recomputation* with *high-degree preserving pruning*, computing embeddings on-demand instead of storing them all. [Illustration →](#️-architecture--how-it-works) | [Paper →](https://arxiv.org/abs/2506.08276)
|
LEANN achieves this through *graph-based selective recomputation* with *high-degree preserving pruning*, computing embeddings on-demand instead of storing them all. [Illustration Fig →](#️-architecture--how-it-works) | [Paper →](https://arxiv.org/abs/2506.08276)
|
||||||
|
|
||||||
**Ready to RAG Everything?** Transform your laptop into a personal AI assistant that can search your **[file system](#process-any-documents-pdf-txt-md)**, **[emails](#search-your-entire-life)**, **[browser history](#time-machine-for-the-web)**, **[chat history](#wechat-detective)**, or external knowledge bases (i.e., 60M documents) - all on your laptop, with zero cloud costs and complete privacy.
|
**Ready to RAG Everything?** Transform your laptop into a personal AI assistant that can semantic search your **[file system](#-personal-data-manager-process-any-documents-pdf-txt-md)**, **[emails](#-your-personal-email-secretary-rag-on-apple-mail)**, **[browser history](#-time-machine-for-the-web-rag-your-entire-browser-history)**, **[chat history](#-wechat-detective-unlock-your-golden-memories)**, **[codebase](#-claude-code-integration-transform-your-development-workflow)**\* , or external knowledge bases (i.e., 60M documents) - all on your laptop, with zero cloud costs and complete privacy.
|
||||||
|
|
||||||
|
|
||||||
|
\* Claude Code only supports basic `grep`-style keyword search. **LEANN** is a drop-in **semantic search MCP service fully compatible with Claude Code**, unlocking intelligent retrieval without changing your workflow. 🔥 Check out [the easy setup →](packages/leann-mcp/README.md)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -26,57 +31,173 @@ LEANN achieves this through *graph-based selective recomputation* with *high-deg
|
|||||||
<img src="assets/effects.png" alt="LEANN vs Traditional Vector DB Storage Comparison" width="70%">
|
<img src="assets/effects.png" alt="LEANN vs Traditional Vector DB Storage Comparison" width="70%">
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
> **The numbers speak for themselves:** Index 60 million Wikipedia chunks in just 6GB instead of 201GB. From emails to browser history, everything fits on your laptop. [See detailed benchmarks for different applications below ↓](#storage-usage-comparison)
|
> **The numbers speak for themselves:** Index 60 million text chunks in just 6GB instead of 201GB. From emails to browser history, everything fits on your laptop. [See detailed benchmarks for different applications below ↓](#-storage-comparison)
|
||||||
|
|
||||||
|
|
||||||
🔒 **Privacy:** Your data never leaves your laptop. No OpenAI, no cloud, no "terms of service".
|
🔒 **Privacy:** Your data never leaves your laptop. No OpenAI, no cloud, no "terms of service".
|
||||||
|
|
||||||
🪶 **Lightweight:** Graph-based recomputation eliminates heavy embedding storage, while smart graph pruning and CSR format minimize graph storage overhead. Always less storage, less memory usage!
|
🪶 **Lightweight:** Graph-based recomputation eliminates heavy embedding storage, while smart graph pruning and CSR format minimize graph storage overhead. Always less storage, less memory usage!
|
||||||
|
|
||||||
|
📦 **Portable:** Transfer your entire knowledge base between devices (even with others) with minimal cost - your personal AI memory travels with you.
|
||||||
|
|
||||||
📈 **Scalability:** Handle messy personal data that would crash traditional vector DBs, easily managing your growing personalized data and agent generated memory!
|
📈 **Scalability:** Handle messy personal data that would crash traditional vector DBs, easily managing your growing personalized data and agent generated memory!
|
||||||
|
|
||||||
✨ **No Accuracy Loss:** Maintain the same search quality as heavyweight solutions while using 97% less storage.
|
✨ **No Accuracy Loss:** Maintain the same search quality as heavyweight solutions while using 97% less storage.
|
||||||
|
|
||||||
## Quick Start in 1 minute
|
## Installation
|
||||||
|
|
||||||
|
### 📦 Prerequisites: Install uv
|
||||||
|
|
||||||
|
[Install uv](https://docs.astral.sh/uv/getting-started/installation/#installation-methods) first if you don't have it. Typically, you can install it with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone git@github.com:yichuan-w/LEANN.git leann
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🚀 Quick Install
|
||||||
|
|
||||||
|
Clone the repository to access all examples and try amazing applications,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/yichuan-w/LEANN.git leann
|
||||||
|
cd leann
|
||||||
|
```
|
||||||
|
|
||||||
|
and install LEANN from [PyPI](https://pypi.org/project/leann/) to run them immediately:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv venv
|
||||||
|
source .venv/bin/activate
|
||||||
|
uv pip install leann
|
||||||
|
```
|
||||||
|
<!--
|
||||||
|
> Low-resource? See “Low-resource setups” in the [Configuration Guide](docs/configuration-guide.md#low-resource-setups). -->
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>
|
||||||
|
<strong>🔧 Build from Source (Recommended for development)</strong>
|
||||||
|
</summary>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/yichuan-w/LEANN.git leann
|
||||||
cd leann
|
cd leann
|
||||||
git submodule update --init --recursive
|
git submodule update --init --recursive
|
||||||
```
|
```
|
||||||
|
|
||||||
**macOS:**
|
**macOS:**
|
||||||
|
|
||||||
|
Note: DiskANN requires MacOS 13.3 or later.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
brew install llvm libomp boost protobuf zeromq
|
brew install libomp boost protobuf zeromq pkgconf
|
||||||
export CC=$(brew --prefix llvm)/bin/clang
|
|
||||||
export CXX=$(brew --prefix llvm)/bin/clang++
|
|
||||||
|
|
||||||
# Install with HNSW backend (default, recommended for most users)
|
|
||||||
uv sync
|
|
||||||
|
|
||||||
# Or add DiskANN backend if you want to test more options
|
|
||||||
uv sync --extra diskann
|
uv sync --extra diskann
|
||||||
```
|
```
|
||||||
|
|
||||||
**Linux (Ubuntu/Debian):**
|
**Linux (Ubuntu/Debian):**
|
||||||
|
|
||||||
|
Note: On Ubuntu 20.04, you may need to build a newer Abseil and pin Protobuf (e.g., v3.20.x) for building DiskANN. See [Issue #30](https://github.com/yichuan-w/LEANN/issues/30) for a step-by-step note.
|
||||||
|
|
||||||
|
You can manually install [Intel oneAPI MKL](https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl.html) instead of `libmkl-full-dev` for DiskANN. You can also use `libopenblas-dev` for building HNSW only, by removing `--extra diskann` in the command below.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo apt-get install libomp-dev libboost-all-dev protobuf-compiler libabsl-dev libmkl-full-dev libaio-dev libzmq3-dev
|
sudo apt-get update && sudo apt-get install -y \
|
||||||
|
libomp-dev libboost-all-dev protobuf-compiler libzmq3-dev \
|
||||||
|
pkg-config libabsl-dev libaio-dev libprotobuf-dev \
|
||||||
|
libmkl-full-dev
|
||||||
|
|
||||||
# Install with HNSW backend (default, recommended for most users)
|
|
||||||
uv sync
|
|
||||||
|
|
||||||
# Or add DiskANN backend if you want to test more options
|
|
||||||
uv sync --extra diskann
|
uv sync --extra diskann
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Linux (Arch Linux):**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo pacman -Syu && sudo pacman -S --needed base-devel cmake pkgconf git gcc \
|
||||||
|
boost boost-libs protobuf abseil-cpp libaio zeromq
|
||||||
|
|
||||||
|
# For MKL in DiskANN
|
||||||
|
sudo pacman -S --needed base-devel git
|
||||||
|
git clone https://aur.archlinux.org/paru-bin.git
|
||||||
|
cd paru-bin && makepkg -si
|
||||||
|
paru -S intel-oneapi-mkl intel-oneapi-compiler
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
|
||||||
|
uv sync --extra diskann
|
||||||
|
```
|
||||||
|
|
||||||
|
**Linux (RHEL / CentOS Stream / Oracle / Rocky / AlmaLinux):**
|
||||||
|
|
||||||
|
See [Issue #50](https://github.com/yichuan-w/LEANN/issues/50) for more details.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo dnf groupinstall -y "Development Tools"
|
||||||
|
sudo dnf install -y libomp-devel boost-devel protobuf-compiler protobuf-devel \
|
||||||
|
abseil-cpp-devel libaio-devel zeromq-devel pkgconf-pkg-config
|
||||||
|
|
||||||
|
# For MKL in DiskANN
|
||||||
|
sudo dnf install -y intel-oneapi-mkl intel-oneapi-mkl-devel \
|
||||||
|
intel-oneapi-openmp || sudo dnf install -y intel-oneapi-compiler
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
|
||||||
|
uv sync --extra diskann
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
**Ollama Setup (Recommended for full privacy):**
|
## Quick Start
|
||||||
|
|
||||||
> *You can skip this installation if you only want to use OpenAI API for generation.*
|
Our declarative API makes RAG as easy as writing a config file.
|
||||||
|
|
||||||
|
Check out [demo.ipynb](demo.ipynb) or [](https://colab.research.google.com/github/yichuan-w/LEANN/blob/main/demo.ipynb)
|
||||||
|
|
||||||
*macOS:*
|
```python
|
||||||
|
from leann import LeannBuilder, LeannSearcher, LeannChat
|
||||||
|
from pathlib import Path
|
||||||
|
INDEX_PATH = str(Path("./").resolve() / "demo.leann")
|
||||||
|
|
||||||
|
# Build an index
|
||||||
|
builder = LeannBuilder(backend_name="hnsw")
|
||||||
|
builder.add_text("LEANN saves 97% storage compared to traditional vector databases.")
|
||||||
|
builder.add_text("Tung Tung Tung Sahur called—they need their banana‑crocodile hybrid back")
|
||||||
|
builder.build_index(INDEX_PATH)
|
||||||
|
|
||||||
|
# Search
|
||||||
|
searcher = LeannSearcher(INDEX_PATH)
|
||||||
|
results = searcher.search("fantastical AI-generated creatures", top_k=1)
|
||||||
|
|
||||||
|
# Chat with your data
|
||||||
|
chat = LeannChat(INDEX_PATH, llm_config={"type": "hf", "model": "Qwen/Qwen3-0.6B"})
|
||||||
|
response = chat.ask("How much storage does LEANN save?", top_k=1)
|
||||||
|
```
|
||||||
|
|
||||||
|
## RAG on Everything!
|
||||||
|
|
||||||
|
LEANN supports RAG on various data sources including documents (`.pdf`, `.txt`, `.md`), Apple Mail, Google Search History, WeChat, and more.
|
||||||
|
|
||||||
|
**AST-Aware Code Chunking** - LEANN also features intelligent code chunking that preserves semantic boundaries (functions, classes, methods) for Python, Java, C#, and TypeScript files, providing improved code understanding compared to traditional text-based approaches.
|
||||||
|
📖 Read the [AST Chunking Guide →](docs/ast_chunking_guide.md) to learn more.
|
||||||
|
|
||||||
|
### Generation Model Setup
|
||||||
|
|
||||||
|
LEANN supports multiple LLM providers for text generation (OpenAI API, HuggingFace, Ollama).
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>🔑 OpenAI API Setup (Default)</strong></summary>
|
||||||
|
|
||||||
|
Set your OpenAI API key as an environment variable:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export OPENAI_API_KEY="your-api-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>🔧 Ollama Setup (Recommended for full privacy)</strong></summary>
|
||||||
|
|
||||||
|
**macOS:**
|
||||||
|
|
||||||
First, [download Ollama for macOS](https://ollama.com/download/mac).
|
First, [download Ollama for macOS](https://ollama.com/download/mac).
|
||||||
|
|
||||||
@@ -85,7 +206,8 @@ First, [download Ollama for macOS](https://ollama.com/download/mac).
|
|||||||
ollama pull llama3.2:1b
|
ollama pull llama3.2:1b
|
||||||
```
|
```
|
||||||
|
|
||||||
*Linux:*
|
**Linux:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install Ollama
|
# Install Ollama
|
||||||
curl -fsSL https://ollama.ai/install.sh | sh
|
curl -fsSL https://ollama.ai/install.sh | sh
|
||||||
@@ -97,91 +219,126 @@ ollama serve &
|
|||||||
ollama pull llama3.2:1b
|
ollama pull llama3.2:1b
|
||||||
```
|
```
|
||||||
|
|
||||||
## Dead Simple API
|
</details>
|
||||||
|
|
||||||
Just 3 lines of code. Our declarative API makes RAG as easy as writing a config file:
|
### ⭐ Flexible Configuration
|
||||||
|
|
||||||
```python
|
LEANN provides flexible parameters for embedding models, search strategies, and data processing to fit your specific needs.
|
||||||
from leann.api import LeannBuilder, LeannSearcher, LeannChat
|
|
||||||
|
|
||||||
# 1. Build the index (no embeddings stored!)
|
📚 **Need configuration best practices?** Check our [Configuration Guide](docs/configuration-guide.md) for detailed optimization tips, model selection advice, and solutions to common issues like slow embeddings or poor search quality.
|
||||||
builder = LeannBuilder(backend_name="hnsw")
|
|
||||||
builder.add_text("C# is a powerful programming language")
|
|
||||||
builder.add_text("Python is a powerful programming language and it is very popular")
|
|
||||||
builder.add_text("Machine learning transforms industries")
|
|
||||||
builder.add_text("Neural networks process complex data")
|
|
||||||
builder.add_text("Leann is a great storage saving engine for RAG on your MacBook")
|
|
||||||
builder.build_index("knowledge.leann")
|
|
||||||
|
|
||||||
# 2. Search with real-time embeddings
|
|
||||||
searcher = LeannSearcher("knowledge.leann")
|
|
||||||
results = searcher.search("programming languages", top_k=2)
|
|
||||||
|
|
||||||
# 3. Chat with LEANN using retrieved results
|
|
||||||
llm_config = {
|
|
||||||
"type": "ollama",
|
|
||||||
"model": "llama3.2:1b"
|
|
||||||
}
|
|
||||||
|
|
||||||
chat = LeannChat(index_path="knowledge.leann", llm_config=llm_config)
|
|
||||||
response = chat.ask(
|
|
||||||
"Compare the two retrieved programming languages and say which one is more popular today.",
|
|
||||||
top_k=2,
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
**That's it.** No cloud setup, no API keys, no "fine-tuning". Just your data, your questions, your laptop.
|
|
||||||
|
|
||||||
[Try the interactive demo →](demo.ipynb)
|
|
||||||
|
|
||||||
## Wild Things You Can Do
|
|
||||||
|
|
||||||
LEANN supports RAGing a lot of data sources, like .pdf, .txt, .md, and also supports RAGing your WeChat, Google Search History, and more.
|
|
||||||
|
|
||||||
### Process Any Documents (.pdf, .txt, .md)
|
|
||||||
|
|
||||||
Above we showed the Python API, while this CLI script demonstrates the same concepts while directly processing PDFs and documents, and even any directory that stores your personal files!
|
|
||||||
|
|
||||||
The following scripts use Ollama `qwen3:8b` by default, so you need `ollama pull qwen3:8b` first. For other models: `--llm openai --model gpt-4o` (requires `OPENAI_API_KEY` environment variable) or `--llm hf --model Qwen/Qwen3-4B`.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Drop your PDFs, .txt, .md files into examples/data/
|
|
||||||
uv run ./examples/main_cli_example.py
|
|
||||||
|
|
||||||
# Or use python directly
|
|
||||||
source .venv/bin/activate
|
|
||||||
python ./examples/main_cli_example.py
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
**Works with any text format** - research papers, personal notes, presentations. Built with LlamaIndex for document parsing.
|
|
||||||
|
|
||||||
### Search Your Entire Life
|
|
||||||
```bash
|
|
||||||
python examples/mail_reader_leann.py
|
|
||||||
# "What's the number of class recommend to take per semester for incoming EECS students?"
|
|
||||||
```
|
|
||||||
**90K emails → 14MB.** Finally, search your email like you search Google.
|
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>📋 Click to expand: Command Examples</strong></summary>
|
<summary><strong>📋 Click to expand: Common Parameters (Available in All Examples)</strong></summary>
|
||||||
|
|
||||||
|
All RAG examples share these common parameters. **Interactive mode** is available in all examples - simply run without `--query` to start a continuous Q&A session where you can ask multiple questions. Type 'quit' to exit.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Use default mail path (works for most macOS setups)
|
# Core Parameters (General preprocessing for all examples)
|
||||||
python examples/mail_reader_leann.py
|
--index-dir DIR # Directory to store the index (default: current directory)
|
||||||
|
--query "YOUR QUESTION" # Single query mode. Omit for interactive chat (type 'quit' to exit), and now you can play with your index interactively
|
||||||
|
--max-items N # Limit data preprocessing (default: -1, process all data)
|
||||||
|
--force-rebuild # Force rebuild index even if it exists
|
||||||
|
|
||||||
# Run with custom index directory
|
# Embedding Parameters
|
||||||
python examples/mail_reader_leann.py --index-dir "./my_mail_index"
|
--embedding-model MODEL # e.g., facebook/contriever, text-embedding-3-small, mlx-community/Qwen3-Embedding-0.6B-8bit or nomic-embed-text
|
||||||
|
--embedding-mode MODE # sentence-transformers, openai, mlx, or ollama
|
||||||
|
|
||||||
# Process all emails (may take time but indexes everything)
|
# LLM Parameters (Text generation models)
|
||||||
python examples/mail_reader_leann.py --max-emails -1
|
--llm TYPE # LLM backend: openai, ollama, or hf (default: openai)
|
||||||
|
--llm-model MODEL # Model name (default: gpt-4o) e.g., gpt-4o-mini, llama3.2:1b, Qwen/Qwen2.5-1.5B-Instruct
|
||||||
|
--thinking-budget LEVEL # Thinking budget for reasoning models: low/medium/high (supported by o3, o3-mini, GPT-Oss:20b, and other reasoning models)
|
||||||
|
|
||||||
# Limit number of emails processed (useful for testing)
|
# Search Parameters
|
||||||
python examples/mail_reader_leann.py --max-emails 1000
|
--top-k N # Number of results to retrieve (default: 20)
|
||||||
|
--search-complexity N # Search complexity for graph traversal (default: 32)
|
||||||
|
|
||||||
# Run a single query
|
# Chunking Parameters
|
||||||
python examples/mail_reader_leann.py --query "What did my boss say about deadlines?"
|
--chunk-size N # Size of text chunks (default varies by source: 256 for most, 192 for WeChat)
|
||||||
|
--chunk-overlap N # Overlap between chunks (default varies: 25-128 depending on source)
|
||||||
|
|
||||||
|
# Index Building Parameters
|
||||||
|
--backend-name NAME # Backend to use: hnsw or diskann (default: hnsw)
|
||||||
|
--graph-degree N # Graph degree for index construction (default: 32)
|
||||||
|
--build-complexity N # Build complexity for index construction (default: 64)
|
||||||
|
--compact / --no-compact # Use compact storage (default: true). Must be `no-compact` for `no-recompute` build.
|
||||||
|
--recompute / --no-recompute # Enable/disable embedding recomputation (default: enabled). Should not do a `no-recompute` search in a `recompute` build.
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### 📄 Personal Data Manager: Process Any Documents (`.pdf`, `.txt`, `.md`)!
|
||||||
|
|
||||||
|
Ask questions directly about your personal PDFs, documents, and any directory containing your files!
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="videos/paper_clear.gif" alt="LEANN Document Search Demo" width="600">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
The example below asks a question about summarizing our paper (uses default data in `data/`, which is a directory with diverse data sources: two papers, Pride and Prejudice, and a Technical report about LLM in Huawei in Chinese), and this is the **easiest example** to run here:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
source .venv/bin/activate # Don't forget to activate the virtual environment
|
||||||
|
python -m apps.document_rag --query "What are the main techniques LEANN explores?"
|
||||||
|
```
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>📋 Click to expand: Document-Specific Arguments</strong></summary>
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
```bash
|
||||||
|
--data-dir DIR # Directory containing documents to process (default: data)
|
||||||
|
--file-types .ext .ext # Filter by specific file types (optional - all LlamaIndex supported types if omitted)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Example Commands
|
||||||
|
```bash
|
||||||
|
# Process all documents with larger chunks for academic papers
|
||||||
|
python -m apps.document_rag --data-dir "~/Documents/Papers" --chunk-size 1024
|
||||||
|
|
||||||
|
# Filter only markdown and Python files with smaller chunks
|
||||||
|
python -m apps.document_rag --data-dir "./docs" --chunk-size 256 --file-types .md .py
|
||||||
|
|
||||||
|
# Enable AST-aware chunking for code files
|
||||||
|
python -m apps.document_rag --enable-code-chunking --data-dir "./my_project"
|
||||||
|
|
||||||
|
# Or use the specialized code RAG for better code understanding
|
||||||
|
python -m apps.code_rag --repo-dir "./my_codebase" --query "How does authentication work?"
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### 📧 Your Personal Email Secretary: RAG on Apple Mail!
|
||||||
|
|
||||||
|
> **Note:** The examples below currently support macOS only. Windows support coming soon.
|
||||||
|
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="videos/mail_clear.gif" alt="LEANN Email Search Demo" width="600">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
Before running the example below, you need to grant full disk access to your terminal/VS Code in System Preferences → Privacy & Security → Full Disk Access.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m apps.email_rag --query "What's the food I ordered by DoorDash or Uber Eats mostly?"
|
||||||
|
```
|
||||||
|
**780K email chunks → 78MB storage.** Finally, search your email like you search Google.
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>📋 Click to expand: Email-Specific Arguments</strong></summary>
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
```bash
|
||||||
|
--mail-path PATH # Path to specific mail directory (auto-detects if omitted)
|
||||||
|
--include-html # Include HTML content in processing (useful for newsletters)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Example Commands
|
||||||
|
```bash
|
||||||
|
# Search work emails from a specific account
|
||||||
|
python -m apps.email_rag --mail-path "~/Library/Mail/V10/WORK_ACCOUNT"
|
||||||
|
|
||||||
|
# Find all receipts and order confirmations (includes HTML)
|
||||||
|
python -m apps.email_rag --query "receipt order confirmation invoice" --include-html
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
@@ -195,28 +352,32 @@ Once the index is built, you can ask questions like:
|
|||||||
- "Show me emails about travel expenses"
|
- "Show me emails about travel expenses"
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### Time Machine for the Web
|
### 🔍 Time Machine for the Web: RAG Your Entire Chrome Browser History!
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="videos/google_clear.gif" alt="LEANN Browser History Search Demo" width="600">
|
||||||
|
</p>
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python examples/google_history_reader_leann.py
|
python -m apps.browser_rag --query "Tell me my browser history about machine learning?"
|
||||||
# "Tell me my browser history about machine learning system stuff?"
|
|
||||||
```
|
```
|
||||||
**38K browser entries → 6MB.** Your browser history becomes your personal search engine.
|
**38K browser entries → 6MB storage.** Your browser history becomes your personal search engine.
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>📋 Click to expand: Command Examples</strong></summary>
|
<summary><strong>📋 Click to expand: Browser-Specific Arguments</strong></summary>
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
```bash
|
```bash
|
||||||
# Use default Chrome profile (auto-finds all profiles)
|
--chrome-profile PATH # Path to Chrome profile directory (auto-detects if omitted)
|
||||||
python examples/google_history_reader_leann.py
|
```
|
||||||
|
|
||||||
# Run with custom index directory
|
#### Example Commands
|
||||||
python examples/google_history_reader_leann.py --index-dir "./my_chrome_index"
|
```bash
|
||||||
|
# Search academic research from your browsing history
|
||||||
|
python -m apps.browser_rag --query "arxiv papers machine learning transformer architecture"
|
||||||
|
|
||||||
# Limit number of history entries processed (useful for testing)
|
# Track competitor analysis across work profile
|
||||||
python examples/google_history_reader_leann.py --max-entries 500
|
python -m apps.browser_rag --chrome-profile "~/Library/Application Support/Google/Chrome/Work Profile" --max-items 5000
|
||||||
|
|
||||||
# Run a single query
|
|
||||||
python examples/google_history_reader_leann.py --query "What websites did I visit about machine learning?"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
@@ -249,44 +410,58 @@ Once the index is built, you can ask questions like:
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### WeChat Detective
|
### 💬 WeChat Detective: Unlock Your Golden Memories!
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="videos/wechat_clear.gif" alt="LEANN WeChat Search Demo" width="600">
|
||||||
|
</p>
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python examples/wechat_history_reader_leann.py
|
python -m apps.wechat_rag --query "Show me all group chats about weekend plans"
|
||||||
# "Show me all group chats about weekend plans"
|
|
||||||
```
|
```
|
||||||
**400K messages → 64MB.** Search years of chat history in any language.
|
**400K messages → 64MB storage** Search years of chat history in any language.
|
||||||
|
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>🔧 Click to expand: Installation Requirements</strong></summary>
|
<summary><strong>🔧 Click to expand: Installation Requirements</strong></summary>
|
||||||
|
|
||||||
First, you need to install the WeChat exporter:
|
First, you need to install the [WeChat exporter](https://github.com/sunnyyoung/WeChatTweak-CLI),
|
||||||
|
|
||||||
|
```bash
|
||||||
|
brew install sunnyyoung/repo/wechattweak-cli
|
||||||
|
```
|
||||||
|
|
||||||
|
or install it manually (if you have issues with Homebrew):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo packages/wechat-exporter/wechattweak-cli install
|
sudo packages/wechat-exporter/wechattweak-cli install
|
||||||
```
|
```
|
||||||
|
|
||||||
**Troubleshooting**: If you encounter installation issues, check the [WeChatTweak-CLI issues page](https://github.com/sunnyyoung/WeChatTweak-CLI/issues/41).
|
**Troubleshooting:**
|
||||||
|
- **Installation issues**: Check the [WeChatTweak-CLI issues page](https://github.com/sunnyyoung/WeChatTweak-CLI/issues/41)
|
||||||
|
- **Export errors**: If you encounter the error below, try restarting WeChat
|
||||||
|
```bash
|
||||||
|
Failed to export WeChat data. Please ensure WeChat is running and WeChatTweak is installed.
|
||||||
|
Failed to find or export WeChat data. Exiting.
|
||||||
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>📋 Click to expand: Command Examples</strong></summary>
|
<summary><strong>📋 Click to expand: WeChat-Specific Arguments</strong></summary>
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
```bash
|
```bash
|
||||||
# Use default settings (recommended for first run)
|
--export-dir DIR # Directory to store exported WeChat data (default: wechat_export_direct)
|
||||||
python examples/wechat_history_reader_leann.py
|
--force-export # Force re-export even if data exists
|
||||||
|
```
|
||||||
|
|
||||||
# Run with custom export directory and wehn we run the first time, LEANN will export all chat history automatically for you
|
#### Example Commands
|
||||||
python examples/wechat_history_reader_leann.py --export-dir "./my_wechat_exports"
|
```bash
|
||||||
|
# Search for travel plans discussed in group chats
|
||||||
|
python -m apps.wechat_rag --query "travel plans" --max-items 10000
|
||||||
|
|
||||||
# Run with custom index directory
|
# Re-export and search recent chats (useful after new messages)
|
||||||
python examples/wechat_history_reader_leann.py --index-dir "./my_wechat_index"
|
python -m apps.wechat_rag --force-export --query "work schedule"
|
||||||
|
|
||||||
# Limit number of chat entries processed (useful for testing)
|
|
||||||
python examples/wechat_history_reader_leann.py --max-entries 1000
|
|
||||||
|
|
||||||
# Run a single query
|
|
||||||
python examples/wechat_history_reader_leann.py --query "Show me conversations about travel plans"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
@@ -300,15 +475,59 @@ Once the index is built, you can ask questions like:
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
### 🚀 Claude Code Integration: Transform Your Development Workflow!
|
||||||
|
|
||||||
|
**The future of code assistance is here.** Transform your development workflow with LEANN's native MCP integration for Claude Code. Index your entire codebase and get intelligent code assistance directly in your IDE.
|
||||||
|
|
||||||
|
**Key features:**
|
||||||
|
- 🔍 **Semantic code search** across your entire project, fully local index and lightweight
|
||||||
|
- 🧠 **AST-aware chunking** preserves code structure (functions, classes)
|
||||||
|
- 📚 **Context-aware assistance** for debugging and development
|
||||||
|
- 🚀 **Zero-config setup** with automatic language detection
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install LEANN globally for MCP integration
|
||||||
|
uv tool install leann-core --with leann
|
||||||
|
claude mcp add --scope user leann-server -- leann_mcp
|
||||||
|
# Setup is automatic - just start using Claude Code!
|
||||||
|
```
|
||||||
|
Try our fully agentic pipeline with auto query rewriting, semantic search planning, and more:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
**🔥 Ready to supercharge your coding?** [Complete Setup Guide →](packages/leann-mcp/README.md)
|
||||||
|
|
||||||
## 🖥️ Command Line Interface
|
## 🖥️ Command Line Interface
|
||||||
|
|
||||||
LEANN includes a powerful CLI for document processing and search. Perfect for quick document indexing and interactive chat.
|
LEANN includes a powerful CLI for document processing and search. Perfect for quick document indexing and interactive chat.
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
If you followed the Quick Start, `leann` is already installed in your virtual environment:
|
||||||
```bash
|
```bash
|
||||||
# Build an index from documents
|
source .venv/bin/activate
|
||||||
leann build my-docs --docs ./documents
|
leann --help
|
||||||
|
```
|
||||||
|
|
||||||
|
**To make it globally available:**
|
||||||
|
```bash
|
||||||
|
# Install the LEANN CLI globally using uv tool
|
||||||
|
uv tool install leann-core --with leann
|
||||||
|
|
||||||
|
|
||||||
|
# Now you can use leann from anywhere without activating venv
|
||||||
|
leann --help
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Note**: Global installation is required for Claude Code integration. The `leann_mcp` server depends on the globally available `leann` command.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Usage Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# build from a specific directory, and my_docs is the index name(Here you can also build from multiple dict or multiple files)
|
||||||
|
leann build my-docs --docs ./your_documents
|
||||||
|
|
||||||
# Search your documents
|
# Search your documents
|
||||||
leann search my-docs "machine learning concepts"
|
leann search my-docs "machine learning concepts"
|
||||||
@@ -318,30 +537,36 @@ leann ask my-docs --interactive
|
|||||||
|
|
||||||
# List all your indexes
|
# List all your indexes
|
||||||
leann list
|
leann list
|
||||||
|
|
||||||
|
# Remove an index
|
||||||
|
leann remove my-docs
|
||||||
```
|
```
|
||||||
|
|
||||||
**Key CLI features:**
|
**Key CLI features:**
|
||||||
- Auto-detects document formats (PDF, TXT, MD, DOCX)
|
- Auto-detects document formats (PDF, TXT, MD, DOCX, PPTX + code files)
|
||||||
- Smart text chunking with overlap
|
- **🧠 AST-aware chunking** for Python, Java, C#, TypeScript files
|
||||||
|
- Smart text chunking with overlap for all other content
|
||||||
- Multiple LLM providers (Ollama, OpenAI, HuggingFace)
|
- Multiple LLM providers (Ollama, OpenAI, HuggingFace)
|
||||||
- Organized index storage in `~/.leann/indexes/`
|
- Organized index storage in `.leann/indexes/` (project-local)
|
||||||
- Support for advanced search parameters
|
- Support for advanced search parameters
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>📋 Click to expand: Complete CLI Reference</strong></summary>
|
<summary><strong>📋 Click to expand: Complete CLI Reference</strong></summary>
|
||||||
|
|
||||||
|
You can use `leann --help`, or `leann build --help`, `leann search --help`, `leann ask --help`, `leann list --help`, `leann remove --help` to get the complete CLI reference.
|
||||||
|
|
||||||
**Build Command:**
|
**Build Command:**
|
||||||
```bash
|
```bash
|
||||||
leann build INDEX_NAME --docs DIRECTORY [OPTIONS]
|
leann build INDEX_NAME --docs DIRECTORY|FILE [DIRECTORY|FILE ...] [OPTIONS]
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
--backend {hnsw,diskann} Backend to use (default: hnsw)
|
--backend {hnsw,diskann} Backend to use (default: hnsw)
|
||||||
--embedding-model MODEL Embedding model (default: facebook/contriever)
|
--embedding-model MODEL Embedding model (default: facebook/contriever)
|
||||||
--graph-degree N Graph degree (default: 32)
|
--graph-degree N Graph degree (default: 32)
|
||||||
--complexity N Build complexity (default: 64)
|
--complexity N Build complexity (default: 64)
|
||||||
--force Force rebuild existing index
|
--force Force rebuild existing index
|
||||||
--compact Use compact storage (default: true)
|
--compact / --no-compact Use compact storage (default: true). Must be `no-compact` for `no-recompute` build.
|
||||||
--recompute Enable recomputation (default: true)
|
--recompute / --no-recompute Enable recomputation (default: true)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Search Command:**
|
**Search Command:**
|
||||||
@@ -349,9 +574,9 @@ Options:
|
|||||||
leann search INDEX_NAME QUERY [OPTIONS]
|
leann search INDEX_NAME QUERY [OPTIONS]
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
--top-k N Number of results (default: 5)
|
--top-k N Number of results (default: 5)
|
||||||
--complexity N Search complexity (default: 64)
|
--complexity N Search complexity (default: 64)
|
||||||
--recompute-embeddings Use recomputation for highest accuracy
|
--recompute / --no-recompute Enable/disable embedding recomputation (default: enabled). Should not do a `no-recompute` search in a `recompute` build.
|
||||||
--pruning-strategy {global,local,proportional}
|
--pruning-strategy {global,local,proportional}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -366,6 +591,31 @@ Options:
|
|||||||
--top-k N Retrieval count (default: 20)
|
--top-k N Retrieval count (default: 20)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**List Command:**
|
||||||
|
```bash
|
||||||
|
leann list
|
||||||
|
|
||||||
|
# Lists all indexes across all projects with status indicators:
|
||||||
|
# ✅ - Index is complete and ready to use
|
||||||
|
# ❌ - Index is incomplete or corrupted
|
||||||
|
# 📁 - CLI-created index (in .leann/indexes/)
|
||||||
|
# 📄 - App-created index (*.leann.meta.json files)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Remove Command:**
|
||||||
|
```bash
|
||||||
|
leann remove INDEX_NAME [OPTIONS]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--force, -f Force removal without confirmation
|
||||||
|
|
||||||
|
# Smart removal: automatically finds and safely removes indexes
|
||||||
|
# - Shows all matching indexes across projects
|
||||||
|
# - Requires confirmation for cross-project removal
|
||||||
|
# - Interactive selection when multiple matches found
|
||||||
|
# - Supports both CLI and app-created indexes
|
||||||
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
## 🏗️ Architecture & How It Works
|
## 🏗️ Architecture & How It Works
|
||||||
@@ -382,56 +632,32 @@ Options:
|
|||||||
- **Dynamic batching:** Efficiently batch embedding computations for GPU utilization
|
- **Dynamic batching:** Efficiently batch embedding computations for GPU utilization
|
||||||
- **Two-level search:** Smart graph traversal that prioritizes promising nodes
|
- **Two-level search:** Smart graph traversal that prioritizes promising nodes
|
||||||
|
|
||||||
**Backends:** DiskANN or HNSW - pick what works for your data size.
|
**Backends:**
|
||||||
|
- **HNSW** (default): Ideal for most datasets with maximum storage savings through full recomputation
|
||||||
|
- **DiskANN**: Advanced option with superior search performance, using PQ-based graph traversal with real-time reranking for the best speed-accuracy trade-off
|
||||||
|
|
||||||
## Benchmarks
|
## Benchmarks
|
||||||
|
|
||||||
Run the comparison yourself:
|
**[DiskANN vs HNSW Performance Comparison →](benchmarks/diskann_vs_hnsw_speed_comparison.py)** - Compare search performance between both backends
|
||||||
```bash
|
|
||||||
python examples/compare_faiss_vs_leann.py
|
|
||||||
```
|
|
||||||
|
|
||||||
| System | Storage |
|
**[Simple Example: Compare LEANN vs FAISS →](benchmarks/compare_faiss_vs_leann.py)** - See storage savings in action
|
||||||
|--------|---------|
|
|
||||||
| FAISS HNSW | 5.5 MB |
|
|
||||||
| LEANN | 0.5 MB |
|
|
||||||
| **Savings** | **91%** |
|
|
||||||
|
|
||||||
Same dataset, same hardware, same embedding model. LEANN just works better.
|
### 📊 Storage Comparison
|
||||||
|
|
||||||
|
| System | DPR (2.1M) | Wiki (60M) | Chat (400K) | Email (780K) | Browser (38K) |
|
||||||
|
|--------|-------------|------------|-------------|--------------|---------------|
|
||||||
|
| Traditional vector database (e.g., FAISS) | 3.8 GB | 201 GB | 1.8 GB | 2.4 GB | 130 MB |
|
||||||
|
| LEANN | 324 MB | 6 GB | 64 MB | 79 MB | 6.4 MB |
|
||||||
|
| Savings| 91% | 97% | 97% | 97% | 95% |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Storage Usage Comparison
|
|
||||||
|
|
||||||
| System | DPR (2.1M chunks) | RPJ-wiki (60M chunks) | Chat history (400K messages) | Apple emails (90K messages chunks) |Google Search History (38K entries)
|
|
||||||
|-----------------------|------------------|------------------------|-----------------------------|------------------------------|------------------------------|
|
|
||||||
| Traditional Vector DB(FAISS) | 3.8 GB | 201 GB | 1.8G | 305.8 MB |130.4 MB |
|
|
||||||
| **LEANN** | **324 MB** | **6 GB** | **64 MB** | **14.8 MB** |**6.4MB** |
|
|
||||||
| **Reduction** | **91% smaller** | **97% smaller** | **97% smaller** | **95% smaller** |**95% smaller** |
|
|
||||||
|
|
||||||
<!-- ### Memory Usage Comparison
|
|
||||||
|
|
||||||
| System j | DPR(2M docs) | RPJ-wiki(60M docs) | Chat history() |
|
|
||||||
| --------------------- | ---------------- | ---------------- | ---------------- |
|
|
||||||
| Traditional Vector DB(LLamaindex faiss) | x GB | x GB | x GB |
|
|
||||||
| **Leann** | **xx MB** | **x GB** | **x GB** |
|
|
||||||
| **Reduction** | **x%** | **x%** | **x%** |
|
|
||||||
|
|
||||||
### Query Performance of LEANN
|
|
||||||
|
|
||||||
| Backend | Index Size | Query Time | Recall@3 |
|
|
||||||
| ------------------- | ---------- | ---------- | --------- |
|
|
||||||
| DiskANN | 1M docs | xms | 0.95 |
|
|
||||||
| HNSW | 1M docs | xms | 0.95 | -->
|
|
||||||
|
|
||||||
*Benchmarks run on Apple M3 Pro 36 GB*
|
|
||||||
|
|
||||||
## Reproduce Our Results
|
## Reproduce Our Results
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uv pip install -e ".[dev]" # Install dev dependencies
|
uv pip install -e ".[dev]" # Install dev dependencies
|
||||||
python examples/run_evaluation.py data/indices/dpr/dpr_diskann # DPR dataset
|
python benchmarks/run_evaluation.py # Will auto-download evaluation data and run benchmarks
|
||||||
python examples/run_evaluation.py data/indices/rpj_wiki/rpj_wiki.index # Wikipedia
|
python benchmarks/run_evaluation.py benchmarks/data/indices/rpj_wiki/rpj_wiki --num-queries 2000 # After downloading data, you can run the benchmark with our biggest index
|
||||||
```
|
```
|
||||||
|
|
||||||
The evaluation script downloads data automatically on first run. The last three results were tested with partial personal data, and you can reproduce them with your own data!
|
The evaluation script downloads data automatically on first run. The last three results were tested with partial personal data, and you can reproduce them with your own data!
|
||||||
@@ -453,98 +679,15 @@ If you find Leann useful, please cite:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## ✨ Features
|
## ✨ [Detailed Features →](docs/features.md)
|
||||||
|
|
||||||
### 🔥 Core Features
|
## 🤝 [CONTRIBUTING →](docs/CONTRIBUTING.md)
|
||||||
|
|
||||||
- **🔄 Real-time Embeddings** - Eliminate heavy embedding storage with dynamic computation using optimized ZMQ servers and highly optimized search paradigm (overlapping and batching) with highly optimized embedding engine
|
|
||||||
- **📈 Scalable Architecture** - Handles millions of documents on consumer hardware; the larger your dataset, the more LEANN can save
|
|
||||||
- **🎯 Graph Pruning** - Advanced techniques to minimize the storage overhead of vector search to a limited footprint
|
|
||||||
- **🏗️ Pluggable Backends** - DiskANN, HNSW/FAISS with unified API
|
|
||||||
|
|
||||||
### 🛠️ Technical Highlights
|
|
||||||
- **🔄 Recompute Mode** - Highest accuracy scenarios while eliminating vector storage overhead
|
|
||||||
- **⚡ Zero-copy Operations** - Minimize IPC overhead by transferring distances instead of embeddings
|
|
||||||
- **🚀 High-throughput Embedding Pipeline** - Optimized batched processing for maximum efficiency
|
|
||||||
- **🎯 Two-level Search** - Novel coarse-to-fine search overlap for accelerated query processing (optional)
|
|
||||||
- **💾 Memory-mapped Indices** - Fast startup with raw text mapping to reduce memory overhead
|
|
||||||
- **🚀 MLX Support** - Ultra-fast recompute/build with quantized embedding models, accelerating building and search ([minimal example](test/build_mlx_index.py))
|
|
||||||
|
|
||||||
### 🎨 Developer Experience
|
|
||||||
|
|
||||||
- **Simple Python API** - Get started in minutes
|
|
||||||
- **Extensible backend system** - Easy to add new algorithms
|
|
||||||
- **Comprehensive examples** - From basic usage to production deployment
|
|
||||||
|
|
||||||
## 🤝 Contributing
|
|
||||||
|
|
||||||
We welcome contributions! Leann is built by the community, for the community.
|
|
||||||
|
|
||||||
### Ways to Contribute
|
|
||||||
|
|
||||||
- 🐛 **Bug Reports**: Found an issue? Let us know!
|
|
||||||
- 💡 **Feature Requests**: Have an idea? We'd love to hear it!
|
|
||||||
- 🔧 **Code Contributions**: PRs welcome for all skill levels
|
|
||||||
- 📖 **Documentation**: Help make Leann more accessible
|
|
||||||
- 🧪 **Benchmarks**: Share your performance results
|
|
||||||
|
|
||||||
|
|
||||||
<!-- ## ❓ FAQ
|
## ❓ [FAQ →](docs/faq.md)
|
||||||
|
|
||||||
### Common Issues
|
|
||||||
|
|
||||||
#### NCCL Topology Error
|
|
||||||
|
|
||||||
**Problem**: You encounter `ncclTopoComputePaths` error during document processing:
|
|
||||||
|
|
||||||
```
|
|
||||||
ncclTopoComputePaths (system=<optimized out>, comm=comm@entry=0x5555a82fa3c0) at graph/paths.cc:688
|
|
||||||
```
|
|
||||||
|
|
||||||
**Solution**: Set these environment variables before running your script:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export NCCL_TOPO_DUMP_FILE=/tmp/nccl_topo.xml
|
|
||||||
export NCCL_DEBUG=INFO
|
|
||||||
export NCCL_DEBUG_SUBSYS=INIT,GRAPH
|
|
||||||
export NCCL_IB_DISABLE=1
|
|
||||||
export NCCL_NET_PLUGIN=none
|
|
||||||
export NCCL_SOCKET_IFNAME=ens5
|
|
||||||
``` -->
|
|
||||||
## FAQ
|
|
||||||
|
|
||||||
### 1. My building time seems long
|
|
||||||
|
|
||||||
You can speed up the process by using a lightweight embedding model. Add this to your arguments:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
--embedding-model sentence-transformers/all-MiniLM-L6-v2
|
|
||||||
```
|
|
||||||
**Model sizes:** `all-MiniLM-L6-v2` (30M parameters), `facebook/contriever` (~100M parameters), `Qwen3-0.6B` (600M parameters)
|
|
||||||
|
|
||||||
|
|
||||||
## 📈 Roadmap
|
## 📈 [Roadmap →](docs/roadmap.md)
|
||||||
|
|
||||||
### 🎯 Q2 2025
|
|
||||||
|
|
||||||
- [X] DiskANN backend with MIPS/L2/Cosine support
|
|
||||||
- [X] HNSW backend integration
|
|
||||||
- [X] Real-time embedding pipeline
|
|
||||||
- [X] Memory-efficient graph pruning
|
|
||||||
|
|
||||||
### 🚀 Q3 2025
|
|
||||||
|
|
||||||
|
|
||||||
- [ ] Advanced caching strategies
|
|
||||||
- [ ] Add contextual-retrieval https://www.anthropic.com/news/contextual-retrieval
|
|
||||||
- [ ] Add sleep-time-compute and summarize agent! to summarilze the file on computer!
|
|
||||||
- [ ] Add OpenAI recompute API
|
|
||||||
|
|
||||||
### 🌟 Q4 2025
|
|
||||||
|
|
||||||
- [ ] Integration with LangChain/LlamaIndex
|
|
||||||
- [ ] Visual similarity search
|
|
||||||
- [ ] Query rewrtiting, rerank and expansion
|
|
||||||
|
|
||||||
## 📄 License
|
## 📄 License
|
||||||
|
|
||||||
@@ -552,13 +695,15 @@ MIT License - see [LICENSE](LICENSE) for details.
|
|||||||
|
|
||||||
## 🙏 Acknowledgments
|
## 🙏 Acknowledgments
|
||||||
|
|
||||||
- **Microsoft Research** for the DiskANN algorithm
|
Core Contributors: [Yichuan Wang](https://yichuan-w.github.io/) & [Zhifei Li](https://github.com/andylizf).
|
||||||
- **Meta AI** for FAISS and optimization insights
|
|
||||||
- **HuggingFace** for the transformer ecosystem
|
|
||||||
- **Our amazing contributors** who make this possible
|
|
||||||
|
|
||||||
---
|
We welcome more contributors! Feel free to open issues or submit PRs.
|
||||||
|
|
||||||
|
This work is done at [**Berkeley Sky Computing Lab**](https://sky.cs.berkeley.edu/).
|
||||||
|
|
||||||
|
## Star History
|
||||||
|
|
||||||
|
[](https://www.star-history.com/#yichuan-w/LEANN&Date)
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<strong>⭐ Star us on GitHub if Leann is useful for your research or applications!</strong>
|
<strong>⭐ Star us on GitHub if Leann is useful for your research or applications!</strong>
|
||||||
</p>
|
</p>
|
||||||
@@ -566,4 +711,3 @@ MIT License - see [LICENSE](LICENSE) for details.
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
Made with ❤️ by the Leann team
|
Made with ❤️ by the Leann team
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|||||||
343
apps/base_rag_example.py
Normal file
343
apps/base_rag_example.py
Normal file
@@ -0,0 +1,343 @@
|
|||||||
|
"""
|
||||||
|
Base class for unified RAG examples interface.
|
||||||
|
Provides common parameters and functionality for all RAG examples.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import dotenv
|
||||||
|
from leann.api import LeannBuilder, LeannChat
|
||||||
|
from leann.registry import register_project_directory
|
||||||
|
|
||||||
|
dotenv.load_dotenv()
|
||||||
|
|
||||||
|
|
||||||
|
class BaseRAGExample(ABC):
|
||||||
|
"""Base class for all RAG examples with unified interface."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
description: str,
|
||||||
|
default_index_name: str,
|
||||||
|
):
|
||||||
|
self.name = name
|
||||||
|
self.description = description
|
||||||
|
self.default_index_name = default_index_name
|
||||||
|
self.parser = self._create_parser()
|
||||||
|
|
||||||
|
def _create_parser(self) -> argparse.ArgumentParser:
|
||||||
|
"""Create argument parser with common parameters."""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=self.description, formatter_class=argparse.RawDescriptionHelpFormatter
|
||||||
|
)
|
||||||
|
|
||||||
|
# Core parameters (all examples share these)
|
||||||
|
core_group = parser.add_argument_group("Core Parameters")
|
||||||
|
core_group.add_argument(
|
||||||
|
"--index-dir",
|
||||||
|
type=str,
|
||||||
|
default=f"./{self.default_index_name}",
|
||||||
|
help=f"Directory to store the index (default: ./{self.default_index_name})",
|
||||||
|
)
|
||||||
|
core_group.add_argument(
|
||||||
|
"--query",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Query to run (if not provided, will run in interactive mode)",
|
||||||
|
)
|
||||||
|
# Allow subclasses to override default max_items
|
||||||
|
max_items_default = getattr(self, "max_items_default", -1)
|
||||||
|
core_group.add_argument(
|
||||||
|
"--max-items",
|
||||||
|
type=int,
|
||||||
|
default=max_items_default,
|
||||||
|
help="Maximum number of items to process -1 for all, means index all documents, and you should set it to a reasonable number if you have a large dataset and try at the first time)",
|
||||||
|
)
|
||||||
|
core_group.add_argument(
|
||||||
|
"--force-rebuild", action="store_true", help="Force rebuild index even if it exists"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Embedding parameters
|
||||||
|
embedding_group = parser.add_argument_group("Embedding Parameters")
|
||||||
|
# Allow subclasses to override default embedding_model
|
||||||
|
embedding_model_default = getattr(self, "embedding_model_default", "facebook/contriever")
|
||||||
|
embedding_group.add_argument(
|
||||||
|
"--embedding-model",
|
||||||
|
type=str,
|
||||||
|
default=embedding_model_default,
|
||||||
|
help=f"Embedding model to use (default: {embedding_model_default}), we provide facebook/contriever, text-embedding-3-small,mlx-community/Qwen3-Embedding-0.6B-8bit or nomic-embed-text",
|
||||||
|
)
|
||||||
|
embedding_group.add_argument(
|
||||||
|
"--embedding-mode",
|
||||||
|
type=str,
|
||||||
|
default="sentence-transformers",
|
||||||
|
choices=["sentence-transformers", "openai", "mlx", "ollama"],
|
||||||
|
help="Embedding backend mode (default: sentence-transformers), we provide sentence-transformers, openai, mlx, or ollama",
|
||||||
|
)
|
||||||
|
|
||||||
|
# LLM parameters
|
||||||
|
llm_group = parser.add_argument_group("LLM Parameters")
|
||||||
|
llm_group.add_argument(
|
||||||
|
"--llm",
|
||||||
|
type=str,
|
||||||
|
default="openai",
|
||||||
|
choices=["openai", "ollama", "hf", "simulated"],
|
||||||
|
help="LLM backend: openai, ollama, or hf (default: openai)",
|
||||||
|
)
|
||||||
|
llm_group.add_argument(
|
||||||
|
"--llm-model",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Model name (default: gpt-4o) e.g., gpt-4o-mini, llama3.2:1b, Qwen/Qwen2.5-1.5B-Instruct",
|
||||||
|
)
|
||||||
|
llm_group.add_argument(
|
||||||
|
"--llm-host",
|
||||||
|
type=str,
|
||||||
|
default="http://localhost:11434",
|
||||||
|
help="Host for Ollama API (default: http://localhost:11434)",
|
||||||
|
)
|
||||||
|
llm_group.add_argument(
|
||||||
|
"--thinking-budget",
|
||||||
|
type=str,
|
||||||
|
choices=["low", "medium", "high"],
|
||||||
|
default=None,
|
||||||
|
help="Thinking budget for reasoning models (low/medium/high). Supported by GPT-Oss:20b and other reasoning models.",
|
||||||
|
)
|
||||||
|
|
||||||
|
# AST Chunking parameters
|
||||||
|
ast_group = parser.add_argument_group("AST Chunking Parameters")
|
||||||
|
ast_group.add_argument(
|
||||||
|
"--use-ast-chunking",
|
||||||
|
action="store_true",
|
||||||
|
help="Enable AST-aware chunking for code files (requires astchunk)",
|
||||||
|
)
|
||||||
|
ast_group.add_argument(
|
||||||
|
"--ast-chunk-size",
|
||||||
|
type=int,
|
||||||
|
default=512,
|
||||||
|
help="Maximum characters per AST chunk (default: 512)",
|
||||||
|
)
|
||||||
|
ast_group.add_argument(
|
||||||
|
"--ast-chunk-overlap",
|
||||||
|
type=int,
|
||||||
|
default=64,
|
||||||
|
help="Overlap between AST chunks (default: 64)",
|
||||||
|
)
|
||||||
|
ast_group.add_argument(
|
||||||
|
"--code-file-extensions",
|
||||||
|
nargs="+",
|
||||||
|
default=None,
|
||||||
|
help="Additional code file extensions to process with AST chunking (e.g., .py .java .cs .ts)",
|
||||||
|
)
|
||||||
|
ast_group.add_argument(
|
||||||
|
"--ast-fallback-traditional",
|
||||||
|
action="store_true",
|
||||||
|
default=True,
|
||||||
|
help="Fall back to traditional chunking if AST chunking fails (default: True)",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Search parameters
|
||||||
|
search_group = parser.add_argument_group("Search Parameters")
|
||||||
|
search_group.add_argument(
|
||||||
|
"--top-k", type=int, default=20, help="Number of results to retrieve (default: 20)"
|
||||||
|
)
|
||||||
|
search_group.add_argument(
|
||||||
|
"--search-complexity",
|
||||||
|
type=int,
|
||||||
|
default=32,
|
||||||
|
help="Search complexity for graph traversal (default: 64)",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Index building parameters
|
||||||
|
index_group = parser.add_argument_group("Index Building Parameters")
|
||||||
|
index_group.add_argument(
|
||||||
|
"--backend-name",
|
||||||
|
type=str,
|
||||||
|
default="hnsw",
|
||||||
|
choices=["hnsw", "diskann"],
|
||||||
|
help="Backend to use for index (default: hnsw)",
|
||||||
|
)
|
||||||
|
index_group.add_argument(
|
||||||
|
"--graph-degree",
|
||||||
|
type=int,
|
||||||
|
default=32,
|
||||||
|
help="Graph degree for index construction (default: 32)",
|
||||||
|
)
|
||||||
|
index_group.add_argument(
|
||||||
|
"--build-complexity",
|
||||||
|
type=int,
|
||||||
|
default=64,
|
||||||
|
help="Build complexity for index construction (default: 64)",
|
||||||
|
)
|
||||||
|
index_group.add_argument(
|
||||||
|
"--no-compact",
|
||||||
|
action="store_true",
|
||||||
|
help="Disable compact index storage",
|
||||||
|
)
|
||||||
|
index_group.add_argument(
|
||||||
|
"--no-recompute",
|
||||||
|
action="store_true",
|
||||||
|
help="Disable embedding recomputation",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add source-specific parameters
|
||||||
|
self._add_specific_arguments(parser)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _add_specific_arguments(self, parser: argparse.ArgumentParser):
|
||||||
|
"""Add source-specific arguments. Override in subclasses."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def load_data(self, args) -> list[str]:
|
||||||
|
"""Load data from the source. Returns list of text chunks."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_llm_config(self, args) -> dict[str, Any]:
|
||||||
|
"""Get LLM configuration based on arguments."""
|
||||||
|
config = {"type": args.llm}
|
||||||
|
|
||||||
|
if args.llm == "openai":
|
||||||
|
config["model"] = args.llm_model or "gpt-4o"
|
||||||
|
elif args.llm == "ollama":
|
||||||
|
config["model"] = args.llm_model or "llama3.2:1b"
|
||||||
|
config["host"] = args.llm_host
|
||||||
|
elif args.llm == "hf":
|
||||||
|
config["model"] = args.llm_model or "Qwen/Qwen2.5-1.5B-Instruct"
|
||||||
|
elif args.llm == "simulated":
|
||||||
|
# Simulated LLM doesn't need additional configuration
|
||||||
|
pass
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
|
async def build_index(self, args, texts: list[str]) -> str:
|
||||||
|
"""Build LEANN index from texts."""
|
||||||
|
index_path = str(Path(args.index_dir) / f"{self.default_index_name}.leann")
|
||||||
|
|
||||||
|
print(f"\n[Building Index] Creating {self.name} index...")
|
||||||
|
print(f"Total text chunks: {len(texts)}")
|
||||||
|
|
||||||
|
builder = LeannBuilder(
|
||||||
|
backend_name=args.backend_name,
|
||||||
|
embedding_model=args.embedding_model,
|
||||||
|
embedding_mode=args.embedding_mode,
|
||||||
|
graph_degree=args.graph_degree,
|
||||||
|
complexity=args.build_complexity,
|
||||||
|
is_compact=not args.no_compact,
|
||||||
|
is_recompute=not args.no_recompute,
|
||||||
|
num_threads=1, # Force single-threaded mode
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add texts in batches for better progress tracking
|
||||||
|
batch_size = 1000
|
||||||
|
for i in range(0, len(texts), batch_size):
|
||||||
|
batch = texts[i : i + batch_size]
|
||||||
|
for text in batch:
|
||||||
|
builder.add_text(text)
|
||||||
|
print(f"Added {min(i + batch_size, len(texts))}/{len(texts)} texts...")
|
||||||
|
|
||||||
|
print("Building index structure...")
|
||||||
|
builder.build_index(index_path)
|
||||||
|
print(f"Index saved to: {index_path}")
|
||||||
|
|
||||||
|
# Register project directory so leann list can discover this index
|
||||||
|
# The index is saved as args.index_dir/index_name.leann
|
||||||
|
# We want to register the current working directory where the app is run
|
||||||
|
register_project_directory(Path.cwd())
|
||||||
|
|
||||||
|
return index_path
|
||||||
|
|
||||||
|
async def run_interactive_chat(self, args, index_path: str):
|
||||||
|
"""Run interactive chat with the index."""
|
||||||
|
chat = LeannChat(
|
||||||
|
index_path,
|
||||||
|
llm_config=self.get_llm_config(args),
|
||||||
|
system_prompt=f"You are a helpful assistant that answers questions about {self.name} data.",
|
||||||
|
complexity=args.search_complexity,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"\n[Interactive Mode] Chat with your {self.name} data!")
|
||||||
|
print("Type 'quit' or 'exit' to stop.\n")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
query = input("You: ").strip()
|
||||||
|
if query.lower() in ["quit", "exit", "q"]:
|
||||||
|
print("Goodbye!")
|
||||||
|
break
|
||||||
|
|
||||||
|
if not query:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Prepare LLM kwargs with thinking budget if specified
|
||||||
|
llm_kwargs = {}
|
||||||
|
if hasattr(args, "thinking_budget") and args.thinking_budget:
|
||||||
|
llm_kwargs["thinking_budget"] = args.thinking_budget
|
||||||
|
|
||||||
|
response = chat.ask(
|
||||||
|
query,
|
||||||
|
top_k=args.top_k,
|
||||||
|
complexity=args.search_complexity,
|
||||||
|
llm_kwargs=llm_kwargs,
|
||||||
|
)
|
||||||
|
print(f"\nAssistant: {response}\n")
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\nGoodbye!")
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}")
|
||||||
|
|
||||||
|
async def run_single_query(self, args, index_path: str, query: str):
|
||||||
|
"""Run a single query against the index."""
|
||||||
|
chat = LeannChat(
|
||||||
|
index_path,
|
||||||
|
llm_config=self.get_llm_config(args),
|
||||||
|
system_prompt=f"You are a helpful assistant that answers questions about {self.name} data.",
|
||||||
|
complexity=args.search_complexity,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"\n[Query]: \033[36m{query}\033[0m")
|
||||||
|
|
||||||
|
# Prepare LLM kwargs with thinking budget if specified
|
||||||
|
llm_kwargs = {}
|
||||||
|
if hasattr(args, "thinking_budget") and args.thinking_budget:
|
||||||
|
llm_kwargs["thinking_budget"] = args.thinking_budget
|
||||||
|
|
||||||
|
response = chat.ask(
|
||||||
|
query, top_k=args.top_k, complexity=args.search_complexity, llm_kwargs=llm_kwargs
|
||||||
|
)
|
||||||
|
print(f"\n[Response]: \033[36m{response}\033[0m")
|
||||||
|
|
||||||
|
async def run(self):
|
||||||
|
"""Main entry point for the example."""
|
||||||
|
args = self.parser.parse_args()
|
||||||
|
|
||||||
|
# Check if index exists
|
||||||
|
index_path = str(Path(args.index_dir) / f"{self.default_index_name}.leann")
|
||||||
|
index_exists = Path(args.index_dir).exists()
|
||||||
|
|
||||||
|
if not index_exists or args.force_rebuild:
|
||||||
|
# Load data and build index
|
||||||
|
print(f"\n{'Rebuilding' if index_exists else 'Building'} index...")
|
||||||
|
texts = await self.load_data(args)
|
||||||
|
|
||||||
|
if not texts:
|
||||||
|
print("No data found to index!")
|
||||||
|
return
|
||||||
|
|
||||||
|
index_path = await self.build_index(args, texts)
|
||||||
|
else:
|
||||||
|
print(f"\nUsing existing index in {args.index_dir}")
|
||||||
|
|
||||||
|
# Run query or interactive mode
|
||||||
|
if args.query:
|
||||||
|
await self.run_single_query(args, index_path, args.query)
|
||||||
|
else:
|
||||||
|
await self.run_interactive_chat(args, index_path)
|
||||||
170
apps/browser_rag.py
Normal file
170
apps/browser_rag.py
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
"""
|
||||||
|
Browser History RAG example using the unified interface.
|
||||||
|
Supports Chrome browser history.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add parent directory to path for imports
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent))
|
||||||
|
|
||||||
|
from base_rag_example import BaseRAGExample, create_text_chunks
|
||||||
|
|
||||||
|
from .history_data.history import ChromeHistoryReader
|
||||||
|
|
||||||
|
|
||||||
|
class BrowserRAG(BaseRAGExample):
|
||||||
|
"""RAG example for Chrome browser history."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
# Set default values BEFORE calling super().__init__
|
||||||
|
self.embedding_model_default = (
|
||||||
|
"sentence-transformers/all-MiniLM-L6-v2" # Fast 384-dim model
|
||||||
|
)
|
||||||
|
|
||||||
|
super().__init__(
|
||||||
|
name="Browser History",
|
||||||
|
description="Process and query Chrome browser history with LEANN",
|
||||||
|
default_index_name="google_history_index",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _add_specific_arguments(self, parser):
|
||||||
|
"""Add browser-specific arguments."""
|
||||||
|
browser_group = parser.add_argument_group("Browser Parameters")
|
||||||
|
browser_group.add_argument(
|
||||||
|
"--chrome-profile",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Path to Chrome profile directory (auto-detected if not specified)",
|
||||||
|
)
|
||||||
|
browser_group.add_argument(
|
||||||
|
"--auto-find-profiles",
|
||||||
|
action="store_true",
|
||||||
|
default=True,
|
||||||
|
help="Automatically find all Chrome profiles (default: True)",
|
||||||
|
)
|
||||||
|
browser_group.add_argument(
|
||||||
|
"--chunk-size", type=int, default=256, help="Text chunk size (default: 256)"
|
||||||
|
)
|
||||||
|
browser_group.add_argument(
|
||||||
|
"--chunk-overlap", type=int, default=128, help="Text chunk overlap (default: 128)"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_chrome_base_path(self) -> Path:
|
||||||
|
"""Get the base Chrome profile path based on OS."""
|
||||||
|
if sys.platform == "darwin":
|
||||||
|
return Path.home() / "Library" / "Application Support" / "Google" / "Chrome"
|
||||||
|
elif sys.platform.startswith("linux"):
|
||||||
|
return Path.home() / ".config" / "google-chrome"
|
||||||
|
elif sys.platform == "win32":
|
||||||
|
return Path(os.environ["LOCALAPPDATA"]) / "Google" / "Chrome" / "User Data"
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported platform: {sys.platform}")
|
||||||
|
|
||||||
|
def _find_chrome_profiles(self) -> list[Path]:
|
||||||
|
"""Auto-detect all Chrome profiles."""
|
||||||
|
base_path = self._get_chrome_base_path()
|
||||||
|
if not base_path.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
profiles = []
|
||||||
|
|
||||||
|
# Check Default profile
|
||||||
|
default_profile = base_path / "Default"
|
||||||
|
if default_profile.exists() and (default_profile / "History").exists():
|
||||||
|
profiles.append(default_profile)
|
||||||
|
|
||||||
|
# Check numbered profiles
|
||||||
|
for item in base_path.iterdir():
|
||||||
|
if item.is_dir() and item.name.startswith("Profile "):
|
||||||
|
if (item / "History").exists():
|
||||||
|
profiles.append(item)
|
||||||
|
|
||||||
|
return profiles
|
||||||
|
|
||||||
|
async def load_data(self, args) -> list[str]:
|
||||||
|
"""Load browser history and convert to text chunks."""
|
||||||
|
# Determine Chrome profiles
|
||||||
|
if args.chrome_profile and not args.auto_find_profiles:
|
||||||
|
profile_dirs = [Path(args.chrome_profile)]
|
||||||
|
else:
|
||||||
|
print("Auto-detecting Chrome profiles...")
|
||||||
|
profile_dirs = self._find_chrome_profiles()
|
||||||
|
|
||||||
|
# If specific profile given, filter to just that one
|
||||||
|
if args.chrome_profile:
|
||||||
|
profile_path = Path(args.chrome_profile)
|
||||||
|
profile_dirs = [p for p in profile_dirs if p == profile_path]
|
||||||
|
|
||||||
|
if not profile_dirs:
|
||||||
|
print("No Chrome profiles found!")
|
||||||
|
print("Please specify --chrome-profile manually")
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"Found {len(profile_dirs)} Chrome profiles")
|
||||||
|
|
||||||
|
# Create reader
|
||||||
|
reader = ChromeHistoryReader()
|
||||||
|
|
||||||
|
# Process each profile
|
||||||
|
all_documents = []
|
||||||
|
total_processed = 0
|
||||||
|
|
||||||
|
for i, profile_dir in enumerate(profile_dirs):
|
||||||
|
print(f"\nProcessing profile {i + 1}/{len(profile_dirs)}: {profile_dir.name}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Apply max_items limit per profile
|
||||||
|
max_per_profile = -1
|
||||||
|
if args.max_items > 0:
|
||||||
|
remaining = args.max_items - total_processed
|
||||||
|
if remaining <= 0:
|
||||||
|
break
|
||||||
|
max_per_profile = remaining
|
||||||
|
|
||||||
|
# Load history
|
||||||
|
documents = reader.load_data(
|
||||||
|
chrome_profile_path=str(profile_dir),
|
||||||
|
max_count=max_per_profile,
|
||||||
|
)
|
||||||
|
|
||||||
|
if documents:
|
||||||
|
all_documents.extend(documents)
|
||||||
|
total_processed += len(documents)
|
||||||
|
print(f"Processed {len(documents)} history entries from this profile")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error processing {profile_dir}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not all_documents:
|
||||||
|
print("No browser history found to process!")
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"\nTotal history entries processed: {len(all_documents)}")
|
||||||
|
|
||||||
|
# Convert to text chunks
|
||||||
|
all_texts = create_text_chunks(
|
||||||
|
all_documents, chunk_size=args.chunk_size, chunk_overlap=args.chunk_overlap
|
||||||
|
)
|
||||||
|
|
||||||
|
return all_texts
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
# Example queries for browser history RAG
|
||||||
|
print("\n🌐 Browser History RAG Example")
|
||||||
|
print("=" * 50)
|
||||||
|
print("\nExample queries you can try:")
|
||||||
|
print("- 'What websites did I visit about machine learning?'")
|
||||||
|
print("- 'Find my search history about programming'")
|
||||||
|
print("- 'What YouTube videos did I watch recently?'")
|
||||||
|
print("- 'Show me websites about travel planning'")
|
||||||
|
print("\nNote: Make sure Chrome is closed before running\n")
|
||||||
|
|
||||||
|
rag = BrowserRAG()
|
||||||
|
asyncio.run(rag.run())
|
||||||
22
apps/chunking/__init__.py
Normal file
22
apps/chunking/__init__.py
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
"""
|
||||||
|
Chunking utilities for LEANN RAG applications.
|
||||||
|
Provides AST-aware and traditional text chunking functionality.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .utils import (
|
||||||
|
CODE_EXTENSIONS,
|
||||||
|
create_ast_chunks,
|
||||||
|
create_text_chunks,
|
||||||
|
create_traditional_chunks,
|
||||||
|
detect_code_files,
|
||||||
|
get_language_from_extension,
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"CODE_EXTENSIONS",
|
||||||
|
"create_ast_chunks",
|
||||||
|
"create_text_chunks",
|
||||||
|
"create_traditional_chunks",
|
||||||
|
"detect_code_files",
|
||||||
|
"get_language_from_extension",
|
||||||
|
]
|
||||||
320
apps/chunking/utils.py
Normal file
320
apps/chunking/utils.py
Normal file
@@ -0,0 +1,320 @@
|
|||||||
|
"""
|
||||||
|
Enhanced chunking utilities with AST-aware code chunking support.
|
||||||
|
Provides unified interface for both traditional and AST-based text chunking.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from llama_index.core.node_parser import SentenceSplitter
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Code file extensions supported by astchunk
|
||||||
|
CODE_EXTENSIONS = {
|
||||||
|
".py": "python",
|
||||||
|
".java": "java",
|
||||||
|
".cs": "csharp",
|
||||||
|
".ts": "typescript",
|
||||||
|
".tsx": "typescript",
|
||||||
|
".js": "typescript",
|
||||||
|
".jsx": "typescript",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Default chunk parameters for different content types
|
||||||
|
DEFAULT_CHUNK_PARAMS = {
|
||||||
|
"code": {
|
||||||
|
"max_chunk_size": 512,
|
||||||
|
"chunk_overlap": 64,
|
||||||
|
},
|
||||||
|
"text": {
|
||||||
|
"chunk_size": 256,
|
||||||
|
"chunk_overlap": 128,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def detect_code_files(documents, code_extensions=None) -> tuple[list, list]:
|
||||||
|
"""
|
||||||
|
Separate documents into code files and regular text files.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
documents: List of LlamaIndex Document objects
|
||||||
|
code_extensions: Dict mapping file extensions to languages (defaults to CODE_EXTENSIONS)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (code_documents, text_documents)
|
||||||
|
"""
|
||||||
|
if code_extensions is None:
|
||||||
|
code_extensions = CODE_EXTENSIONS
|
||||||
|
|
||||||
|
code_docs = []
|
||||||
|
text_docs = []
|
||||||
|
|
||||||
|
for doc in documents:
|
||||||
|
# Get file path from metadata
|
||||||
|
file_path = doc.metadata.get("file_path", "")
|
||||||
|
if not file_path:
|
||||||
|
# Fallback to file_name
|
||||||
|
file_path = doc.metadata.get("file_name", "")
|
||||||
|
|
||||||
|
if file_path:
|
||||||
|
file_ext = Path(file_path).suffix.lower()
|
||||||
|
if file_ext in code_extensions:
|
||||||
|
# Add language info to metadata
|
||||||
|
doc.metadata["language"] = code_extensions[file_ext]
|
||||||
|
doc.metadata["is_code"] = True
|
||||||
|
code_docs.append(doc)
|
||||||
|
else:
|
||||||
|
doc.metadata["is_code"] = False
|
||||||
|
text_docs.append(doc)
|
||||||
|
else:
|
||||||
|
# If no file path, treat as text
|
||||||
|
doc.metadata["is_code"] = False
|
||||||
|
text_docs.append(doc)
|
||||||
|
|
||||||
|
logger.info(f"Detected {len(code_docs)} code files and {len(text_docs)} text files")
|
||||||
|
return code_docs, text_docs
|
||||||
|
|
||||||
|
|
||||||
|
def get_language_from_extension(file_path: str) -> Optional[str]:
|
||||||
|
"""Get the programming language from file extension."""
|
||||||
|
ext = Path(file_path).suffix.lower()
|
||||||
|
return CODE_EXTENSIONS.get(ext)
|
||||||
|
|
||||||
|
|
||||||
|
def create_ast_chunks(
|
||||||
|
documents,
|
||||||
|
max_chunk_size: int = 512,
|
||||||
|
chunk_overlap: int = 64,
|
||||||
|
metadata_template: str = "default",
|
||||||
|
) -> list[str]:
|
||||||
|
"""
|
||||||
|
Create AST-aware chunks from code documents using astchunk.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
documents: List of code documents
|
||||||
|
max_chunk_size: Maximum characters per chunk
|
||||||
|
chunk_overlap: Number of AST nodes to overlap between chunks
|
||||||
|
metadata_template: Template for chunk metadata
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of text chunks with preserved code structure
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from astchunk import ASTChunkBuilder
|
||||||
|
except ImportError as e:
|
||||||
|
logger.error(f"astchunk not available: {e}")
|
||||||
|
logger.info("Falling back to traditional chunking for code files")
|
||||||
|
return create_traditional_chunks(documents, max_chunk_size, chunk_overlap)
|
||||||
|
|
||||||
|
all_chunks = []
|
||||||
|
|
||||||
|
for doc in documents:
|
||||||
|
# Get language from metadata (set by detect_code_files)
|
||||||
|
language = doc.metadata.get("language")
|
||||||
|
if not language:
|
||||||
|
logger.warning(
|
||||||
|
"No language detected for document, falling back to traditional chunking"
|
||||||
|
)
|
||||||
|
traditional_chunks = create_traditional_chunks([doc], max_chunk_size, chunk_overlap)
|
||||||
|
all_chunks.extend(traditional_chunks)
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Configure astchunk
|
||||||
|
configs = {
|
||||||
|
"max_chunk_size": max_chunk_size,
|
||||||
|
"language": language,
|
||||||
|
"metadata_template": metadata_template,
|
||||||
|
"chunk_overlap": chunk_overlap if chunk_overlap > 0 else 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add repository-level metadata if available
|
||||||
|
repo_metadata = {
|
||||||
|
"file_path": doc.metadata.get("file_path", ""),
|
||||||
|
"file_name": doc.metadata.get("file_name", ""),
|
||||||
|
"creation_date": doc.metadata.get("creation_date", ""),
|
||||||
|
"last_modified_date": doc.metadata.get("last_modified_date", ""),
|
||||||
|
}
|
||||||
|
configs["repo_level_metadata"] = repo_metadata
|
||||||
|
|
||||||
|
# Create chunk builder and process
|
||||||
|
chunk_builder = ASTChunkBuilder(**configs)
|
||||||
|
code_content = doc.get_content()
|
||||||
|
|
||||||
|
if not code_content or not code_content.strip():
|
||||||
|
logger.warning("Empty code content, skipping")
|
||||||
|
continue
|
||||||
|
|
||||||
|
chunks = chunk_builder.chunkify(code_content)
|
||||||
|
|
||||||
|
# Extract text content from chunks
|
||||||
|
for chunk in chunks:
|
||||||
|
if hasattr(chunk, "text"):
|
||||||
|
chunk_text = chunk.text
|
||||||
|
elif isinstance(chunk, dict) and "text" in chunk:
|
||||||
|
chunk_text = chunk["text"]
|
||||||
|
elif isinstance(chunk, str):
|
||||||
|
chunk_text = chunk
|
||||||
|
else:
|
||||||
|
# Try to convert to string
|
||||||
|
chunk_text = str(chunk)
|
||||||
|
|
||||||
|
if chunk_text and chunk_text.strip():
|
||||||
|
all_chunks.append(chunk_text.strip())
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Created {len(chunks)} AST chunks from {language} file: {doc.metadata.get('file_name', 'unknown')}"
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"AST chunking failed for {language} file: {e}")
|
||||||
|
logger.info("Falling back to traditional chunking")
|
||||||
|
traditional_chunks = create_traditional_chunks([doc], max_chunk_size, chunk_overlap)
|
||||||
|
all_chunks.extend(traditional_chunks)
|
||||||
|
|
||||||
|
return all_chunks
|
||||||
|
|
||||||
|
|
||||||
|
def create_traditional_chunks(
|
||||||
|
documents, chunk_size: int = 256, chunk_overlap: int = 128
|
||||||
|
) -> list[str]:
|
||||||
|
"""
|
||||||
|
Create traditional text chunks using LlamaIndex SentenceSplitter.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
documents: List of documents to chunk
|
||||||
|
chunk_size: Size of each chunk in characters
|
||||||
|
chunk_overlap: Overlap between chunks
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of text chunks
|
||||||
|
"""
|
||||||
|
# Handle invalid chunk_size values
|
||||||
|
if chunk_size <= 0:
|
||||||
|
logger.warning(f"Invalid chunk_size={chunk_size}, using default value of 256")
|
||||||
|
chunk_size = 256
|
||||||
|
|
||||||
|
# Ensure chunk_overlap is not negative and not larger than chunk_size
|
||||||
|
if chunk_overlap < 0:
|
||||||
|
chunk_overlap = 0
|
||||||
|
if chunk_overlap >= chunk_size:
|
||||||
|
chunk_overlap = chunk_size // 2
|
||||||
|
|
||||||
|
node_parser = SentenceSplitter(
|
||||||
|
chunk_size=chunk_size,
|
||||||
|
chunk_overlap=chunk_overlap,
|
||||||
|
separator=" ",
|
||||||
|
paragraph_separator="\n\n",
|
||||||
|
)
|
||||||
|
|
||||||
|
all_texts = []
|
||||||
|
for doc in documents:
|
||||||
|
try:
|
||||||
|
nodes = node_parser.get_nodes_from_documents([doc])
|
||||||
|
if nodes:
|
||||||
|
chunk_texts = [node.get_content() for node in nodes]
|
||||||
|
all_texts.extend(chunk_texts)
|
||||||
|
logger.debug(f"Created {len(chunk_texts)} traditional chunks from document")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Traditional chunking failed for document: {e}")
|
||||||
|
# As last resort, add the raw content
|
||||||
|
content = doc.get_content()
|
||||||
|
if content and content.strip():
|
||||||
|
all_texts.append(content.strip())
|
||||||
|
|
||||||
|
return all_texts
|
||||||
|
|
||||||
|
|
||||||
|
def create_text_chunks(
|
||||||
|
documents,
|
||||||
|
chunk_size: int = 256,
|
||||||
|
chunk_overlap: int = 128,
|
||||||
|
use_ast_chunking: bool = False,
|
||||||
|
ast_chunk_size: int = 512,
|
||||||
|
ast_chunk_overlap: int = 64,
|
||||||
|
code_file_extensions: Optional[list[str]] = None,
|
||||||
|
ast_fallback_traditional: bool = True,
|
||||||
|
) -> list[str]:
|
||||||
|
"""
|
||||||
|
Create text chunks from documents with optional AST support for code files.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
documents: List of LlamaIndex Document objects
|
||||||
|
chunk_size: Size for traditional text chunks
|
||||||
|
chunk_overlap: Overlap for traditional text chunks
|
||||||
|
use_ast_chunking: Whether to use AST chunking for code files
|
||||||
|
ast_chunk_size: Size for AST chunks
|
||||||
|
ast_chunk_overlap: Overlap for AST chunks
|
||||||
|
code_file_extensions: Custom list of code file extensions
|
||||||
|
ast_fallback_traditional: Fall back to traditional chunking on AST errors
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of text chunks
|
||||||
|
"""
|
||||||
|
if not documents:
|
||||||
|
logger.warning("No documents provided for chunking")
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Create a local copy of supported extensions for this function call
|
||||||
|
local_code_extensions = CODE_EXTENSIONS.copy()
|
||||||
|
|
||||||
|
# Update supported extensions if provided
|
||||||
|
if code_file_extensions:
|
||||||
|
# Map extensions to languages (simplified mapping)
|
||||||
|
ext_mapping = {
|
||||||
|
".py": "python",
|
||||||
|
".java": "java",
|
||||||
|
".cs": "c_sharp",
|
||||||
|
".ts": "typescript",
|
||||||
|
".tsx": "typescript",
|
||||||
|
}
|
||||||
|
for ext in code_file_extensions:
|
||||||
|
if ext.lower() not in local_code_extensions:
|
||||||
|
# Try to guess language from extension
|
||||||
|
if ext.lower() in ext_mapping:
|
||||||
|
local_code_extensions[ext.lower()] = ext_mapping[ext.lower()]
|
||||||
|
else:
|
||||||
|
logger.warning(f"Unsupported extension {ext}, will use traditional chunking")
|
||||||
|
|
||||||
|
all_chunks = []
|
||||||
|
|
||||||
|
if use_ast_chunking:
|
||||||
|
# Separate code and text documents using local extensions
|
||||||
|
code_docs, text_docs = detect_code_files(documents, local_code_extensions)
|
||||||
|
|
||||||
|
# Process code files with AST chunking
|
||||||
|
if code_docs:
|
||||||
|
logger.info(f"Processing {len(code_docs)} code files with AST chunking")
|
||||||
|
try:
|
||||||
|
ast_chunks = create_ast_chunks(
|
||||||
|
code_docs, max_chunk_size=ast_chunk_size, chunk_overlap=ast_chunk_overlap
|
||||||
|
)
|
||||||
|
all_chunks.extend(ast_chunks)
|
||||||
|
logger.info(f"Created {len(ast_chunks)} AST chunks from code files")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"AST chunking failed: {e}")
|
||||||
|
if ast_fallback_traditional:
|
||||||
|
logger.info("Falling back to traditional chunking for code files")
|
||||||
|
traditional_code_chunks = create_traditional_chunks(
|
||||||
|
code_docs, chunk_size, chunk_overlap
|
||||||
|
)
|
||||||
|
all_chunks.extend(traditional_code_chunks)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Process text files with traditional chunking
|
||||||
|
if text_docs:
|
||||||
|
logger.info(f"Processing {len(text_docs)} text files with traditional chunking")
|
||||||
|
text_chunks = create_traditional_chunks(text_docs, chunk_size, chunk_overlap)
|
||||||
|
all_chunks.extend(text_chunks)
|
||||||
|
logger.info(f"Created {len(text_chunks)} traditional chunks from text files")
|
||||||
|
else:
|
||||||
|
# Use traditional chunking for all files
|
||||||
|
logger.info(f"Processing {len(documents)} documents with traditional chunking")
|
||||||
|
all_chunks = create_traditional_chunks(documents, chunk_size, chunk_overlap)
|
||||||
|
|
||||||
|
logger.info(f"Total chunks created: {len(all_chunks)}")
|
||||||
|
return all_chunks
|
||||||
211
apps/code_rag.py
Normal file
211
apps/code_rag.py
Normal file
@@ -0,0 +1,211 @@
|
|||||||
|
"""
|
||||||
|
Code RAG example using AST-aware chunking for optimal code understanding.
|
||||||
|
Specialized for code repositories with automatic language detection and
|
||||||
|
optimized chunking parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add parent directory to path for imports
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent))
|
||||||
|
|
||||||
|
from base_rag_example import BaseRAGExample
|
||||||
|
from chunking import CODE_EXTENSIONS, create_text_chunks
|
||||||
|
from llama_index.core import SimpleDirectoryReader
|
||||||
|
|
||||||
|
|
||||||
|
class CodeRAG(BaseRAGExample):
|
||||||
|
"""Specialized RAG example for code repositories with AST-aware chunking."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
name="Code",
|
||||||
|
description="Process and query code repositories with AST-aware chunking",
|
||||||
|
default_index_name="code_index",
|
||||||
|
)
|
||||||
|
# Override defaults for code-specific usage
|
||||||
|
self.embedding_model_default = "facebook/contriever" # Good for code
|
||||||
|
self.max_items_default = -1 # Process all code files by default
|
||||||
|
|
||||||
|
def _add_specific_arguments(self, parser):
|
||||||
|
"""Add code-specific arguments."""
|
||||||
|
code_group = parser.add_argument_group("Code Repository Parameters")
|
||||||
|
|
||||||
|
code_group.add_argument(
|
||||||
|
"--repo-dir",
|
||||||
|
type=str,
|
||||||
|
default=".",
|
||||||
|
help="Code repository directory to index (default: current directory)",
|
||||||
|
)
|
||||||
|
code_group.add_argument(
|
||||||
|
"--include-extensions",
|
||||||
|
nargs="+",
|
||||||
|
default=list(CODE_EXTENSIONS.keys()),
|
||||||
|
help="File extensions to include (default: supported code extensions)",
|
||||||
|
)
|
||||||
|
code_group.add_argument(
|
||||||
|
"--exclude-dirs",
|
||||||
|
nargs="+",
|
||||||
|
default=[
|
||||||
|
".git",
|
||||||
|
"__pycache__",
|
||||||
|
"node_modules",
|
||||||
|
"venv",
|
||||||
|
".venv",
|
||||||
|
"build",
|
||||||
|
"dist",
|
||||||
|
"target",
|
||||||
|
],
|
||||||
|
help="Directories to exclude from indexing",
|
||||||
|
)
|
||||||
|
code_group.add_argument(
|
||||||
|
"--max-file-size",
|
||||||
|
type=int,
|
||||||
|
default=1000000, # 1MB
|
||||||
|
help="Maximum file size in bytes to process (default: 1MB)",
|
||||||
|
)
|
||||||
|
code_group.add_argument(
|
||||||
|
"--include-comments",
|
||||||
|
action="store_true",
|
||||||
|
help="Include comments in chunking (useful for documentation)",
|
||||||
|
)
|
||||||
|
code_group.add_argument(
|
||||||
|
"--preserve-imports",
|
||||||
|
action="store_true",
|
||||||
|
default=True,
|
||||||
|
help="Try to preserve import statements in chunks (default: True)",
|
||||||
|
)
|
||||||
|
|
||||||
|
async def load_data(self, args) -> list[str]:
|
||||||
|
"""Load code files and convert to AST-aware chunks."""
|
||||||
|
print(f"🔍 Scanning code repository: {args.repo_dir}")
|
||||||
|
print(f"📁 Including extensions: {args.include_extensions}")
|
||||||
|
print(f"🚫 Excluding directories: {args.exclude_dirs}")
|
||||||
|
|
||||||
|
# Check if repository directory exists
|
||||||
|
repo_path = Path(args.repo_dir)
|
||||||
|
if not repo_path.exists():
|
||||||
|
raise ValueError(f"Repository directory not found: {args.repo_dir}")
|
||||||
|
|
||||||
|
# Load code files with filtering
|
||||||
|
reader_kwargs = {
|
||||||
|
"recursive": True,
|
||||||
|
"encoding": "utf-8",
|
||||||
|
"required_exts": args.include_extensions,
|
||||||
|
"exclude_hidden": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create exclusion filter
|
||||||
|
def file_filter(file_path: str) -> bool:
|
||||||
|
"""Filter out unwanted files and directories."""
|
||||||
|
path = Path(file_path)
|
||||||
|
|
||||||
|
# Check file size
|
||||||
|
try:
|
||||||
|
if path.stat().st_size > args.max_file_size:
|
||||||
|
print(f"⚠️ Skipping large file: {path.name} ({path.stat().st_size} bytes)")
|
||||||
|
return False
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check if in excluded directory
|
||||||
|
for exclude_dir in args.exclude_dirs:
|
||||||
|
if exclude_dir in path.parts:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Load documents with file filtering
|
||||||
|
documents = SimpleDirectoryReader(
|
||||||
|
args.repo_dir,
|
||||||
|
file_extractor=None, # Use default extractors
|
||||||
|
**reader_kwargs,
|
||||||
|
).load_data(show_progress=True)
|
||||||
|
|
||||||
|
# Apply custom filtering
|
||||||
|
filtered_docs = []
|
||||||
|
for doc in documents:
|
||||||
|
file_path = doc.metadata.get("file_path", "")
|
||||||
|
if file_filter(file_path):
|
||||||
|
filtered_docs.append(doc)
|
||||||
|
|
||||||
|
documents = filtered_docs
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ Error loading code files: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
if not documents:
|
||||||
|
print(
|
||||||
|
f"❌ No code files found in {args.repo_dir} with extensions {args.include_extensions}"
|
||||||
|
)
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"✅ Loaded {len(documents)} code files")
|
||||||
|
|
||||||
|
# Show breakdown by language/extension
|
||||||
|
ext_counts = {}
|
||||||
|
for doc in documents:
|
||||||
|
file_path = doc.metadata.get("file_path", "")
|
||||||
|
if file_path:
|
||||||
|
ext = Path(file_path).suffix.lower()
|
||||||
|
ext_counts[ext] = ext_counts.get(ext, 0) + 1
|
||||||
|
|
||||||
|
print("📊 Files by extension:")
|
||||||
|
for ext, count in sorted(ext_counts.items()):
|
||||||
|
print(f" {ext}: {count} files")
|
||||||
|
|
||||||
|
# Use AST-aware chunking by default for code
|
||||||
|
print(
|
||||||
|
f"🧠 Using AST-aware chunking (chunk_size: {args.ast_chunk_size}, overlap: {args.ast_chunk_overlap})"
|
||||||
|
)
|
||||||
|
|
||||||
|
all_texts = create_text_chunks(
|
||||||
|
documents,
|
||||||
|
chunk_size=256, # Fallback for non-code files
|
||||||
|
chunk_overlap=64,
|
||||||
|
use_ast_chunking=True, # Always use AST for code RAG
|
||||||
|
ast_chunk_size=args.ast_chunk_size,
|
||||||
|
ast_chunk_overlap=args.ast_chunk_overlap,
|
||||||
|
code_file_extensions=args.include_extensions,
|
||||||
|
ast_fallback_traditional=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply max_items limit if specified
|
||||||
|
if args.max_items > 0 and len(all_texts) > args.max_items:
|
||||||
|
print(f"⏳ Limiting to {args.max_items} chunks (from {len(all_texts)})")
|
||||||
|
all_texts = all_texts[: args.max_items]
|
||||||
|
|
||||||
|
print(f"✅ Generated {len(all_texts)} code chunks")
|
||||||
|
return all_texts
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
# Example queries for code RAG
|
||||||
|
print("\n💻 Code RAG Example")
|
||||||
|
print("=" * 50)
|
||||||
|
print("\nExample queries you can try:")
|
||||||
|
print("- 'How does the embedding computation work?'")
|
||||||
|
print("- 'What are the main classes in this codebase?'")
|
||||||
|
print("- 'Show me the search implementation'")
|
||||||
|
print("- 'How is error handling implemented?'")
|
||||||
|
print("- 'What design patterns are used?'")
|
||||||
|
print("- 'Explain the chunking logic'")
|
||||||
|
print("\n🚀 Features:")
|
||||||
|
print("- ✅ AST-aware chunking preserves code structure")
|
||||||
|
print("- ✅ Automatic language detection")
|
||||||
|
print("- ✅ Smart filtering of large files and common excludes")
|
||||||
|
print("- ✅ Optimized for code understanding")
|
||||||
|
print("\nUsage examples:")
|
||||||
|
print(" python -m apps.code_rag --repo-dir ./my_project")
|
||||||
|
print(
|
||||||
|
" python -m apps.code_rag --include-extensions .py .js --query 'How does authentication work?'"
|
||||||
|
)
|
||||||
|
print("\nOr run without --query for interactive mode\n")
|
||||||
|
|
||||||
|
rag = CodeRAG()
|
||||||
|
asyncio.run(rag.run())
|
||||||
131
apps/document_rag.py
Normal file
131
apps/document_rag.py
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
"""
|
||||||
|
Document RAG example using the unified interface.
|
||||||
|
Supports PDF, TXT, MD, and other document formats.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add parent directory to path for imports
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent))
|
||||||
|
|
||||||
|
from base_rag_example import BaseRAGExample
|
||||||
|
from chunking import create_text_chunks
|
||||||
|
from llama_index.core import SimpleDirectoryReader
|
||||||
|
|
||||||
|
|
||||||
|
class DocumentRAG(BaseRAGExample):
|
||||||
|
"""RAG example for document processing (PDF, TXT, MD, etc.)."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
name="Document",
|
||||||
|
description="Process and query documents (PDF, TXT, MD, etc.) with LEANN",
|
||||||
|
default_index_name="test_doc_files",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _add_specific_arguments(self, parser):
|
||||||
|
"""Add document-specific arguments."""
|
||||||
|
doc_group = parser.add_argument_group("Document Parameters")
|
||||||
|
doc_group.add_argument(
|
||||||
|
"--data-dir",
|
||||||
|
type=str,
|
||||||
|
default="data",
|
||||||
|
help="Directory containing documents to index (default: data)",
|
||||||
|
)
|
||||||
|
doc_group.add_argument(
|
||||||
|
"--file-types",
|
||||||
|
nargs="+",
|
||||||
|
default=None,
|
||||||
|
help="Filter by file types (e.g., .pdf .txt .md). If not specified, all supported types are processed",
|
||||||
|
)
|
||||||
|
doc_group.add_argument(
|
||||||
|
"--chunk-size", type=int, default=256, help="Text chunk size (default: 256)"
|
||||||
|
)
|
||||||
|
doc_group.add_argument(
|
||||||
|
"--chunk-overlap", type=int, default=128, help="Text chunk overlap (default: 128)"
|
||||||
|
)
|
||||||
|
doc_group.add_argument(
|
||||||
|
"--enable-code-chunking",
|
||||||
|
action="store_true",
|
||||||
|
help="Enable AST-aware chunking for code files in the data directory",
|
||||||
|
)
|
||||||
|
|
||||||
|
async def load_data(self, args) -> list[str]:
|
||||||
|
"""Load documents and convert to text chunks."""
|
||||||
|
print(f"Loading documents from: {args.data_dir}")
|
||||||
|
if args.file_types:
|
||||||
|
print(f"Filtering by file types: {args.file_types}")
|
||||||
|
else:
|
||||||
|
print("Processing all supported file types")
|
||||||
|
|
||||||
|
# Check if data directory exists
|
||||||
|
data_path = Path(args.data_dir)
|
||||||
|
if not data_path.exists():
|
||||||
|
raise ValueError(f"Data directory not found: {args.data_dir}")
|
||||||
|
|
||||||
|
# Load documents
|
||||||
|
reader_kwargs = {
|
||||||
|
"recursive": True,
|
||||||
|
"encoding": "utf-8",
|
||||||
|
}
|
||||||
|
if args.file_types:
|
||||||
|
reader_kwargs["required_exts"] = args.file_types
|
||||||
|
|
||||||
|
documents = SimpleDirectoryReader(args.data_dir, **reader_kwargs).load_data(
|
||||||
|
show_progress=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not documents:
|
||||||
|
print(f"No documents found in {args.data_dir} with extensions {args.file_types}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"Loaded {len(documents)} documents")
|
||||||
|
|
||||||
|
# Determine chunking strategy
|
||||||
|
use_ast = args.enable_code_chunking or getattr(args, "use_ast_chunking", False)
|
||||||
|
|
||||||
|
if use_ast:
|
||||||
|
print("Using AST-aware chunking for code files")
|
||||||
|
|
||||||
|
# Convert to text chunks with optional AST support
|
||||||
|
all_texts = create_text_chunks(
|
||||||
|
documents,
|
||||||
|
chunk_size=args.chunk_size,
|
||||||
|
chunk_overlap=args.chunk_overlap,
|
||||||
|
use_ast_chunking=use_ast,
|
||||||
|
ast_chunk_size=getattr(args, "ast_chunk_size", 512),
|
||||||
|
ast_chunk_overlap=getattr(args, "ast_chunk_overlap", 64),
|
||||||
|
code_file_extensions=getattr(args, "code_file_extensions", None),
|
||||||
|
ast_fallback_traditional=getattr(args, "ast_fallback_traditional", True),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply max_items limit if specified
|
||||||
|
if args.max_items > 0 and len(all_texts) > args.max_items:
|
||||||
|
print(f"Limiting to {args.max_items} chunks (from {len(all_texts)})")
|
||||||
|
all_texts = all_texts[: args.max_items]
|
||||||
|
|
||||||
|
return all_texts
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
# Example queries for document RAG
|
||||||
|
print("\n📄 Document RAG Example")
|
||||||
|
print("=" * 50)
|
||||||
|
print("\nExample queries you can try:")
|
||||||
|
print("- 'What are the main techniques LEANN uses?'")
|
||||||
|
print("- 'What is the technique DLPM?'")
|
||||||
|
print("- 'Who does Elizabeth Bennet marry?'")
|
||||||
|
print(
|
||||||
|
"- 'What is the problem of developing pan gu model Huawei meets? (盘古大模型开发中遇到什么问题?)'"
|
||||||
|
)
|
||||||
|
print("\n🚀 NEW: Code-aware chunking available!")
|
||||||
|
print("- Use --enable-code-chunking to enable AST-aware chunking for code files")
|
||||||
|
print("- Supports Python, Java, C#, TypeScript files")
|
||||||
|
print("- Better semantic understanding of code structure")
|
||||||
|
print("\nOr run without --query for interactive mode\n")
|
||||||
|
|
||||||
|
rag = DocumentRAG()
|
||||||
|
asyncio.run(rag.run())
|
||||||
167
apps/email_data/LEANN_email_reader.py
Normal file
167
apps/email_data/LEANN_email_reader.py
Normal file
@@ -0,0 +1,167 @@
|
|||||||
|
import email
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from llama_index.core import Document
|
||||||
|
from llama_index.core.readers.base import BaseReader
|
||||||
|
|
||||||
|
|
||||||
|
def find_all_messages_directories(root: str | None = None) -> list[Path]:
|
||||||
|
"""
|
||||||
|
Recursively find all 'Messages' directories under the given root.
|
||||||
|
Returns a list of Path objects.
|
||||||
|
"""
|
||||||
|
if root is None:
|
||||||
|
# Auto-detect user's mail path
|
||||||
|
home_dir = os.path.expanduser("~")
|
||||||
|
root = os.path.join(home_dir, "Library", "Mail")
|
||||||
|
|
||||||
|
messages_dirs = []
|
||||||
|
for dirpath, _dirnames, _filenames in os.walk(root):
|
||||||
|
if os.path.basename(dirpath) == "Messages":
|
||||||
|
messages_dirs.append(Path(dirpath))
|
||||||
|
return messages_dirs
|
||||||
|
|
||||||
|
|
||||||
|
class EmlxReader(BaseReader):
|
||||||
|
"""
|
||||||
|
Apple Mail .emlx file reader with embedded metadata.
|
||||||
|
|
||||||
|
Reads individual .emlx files from Apple Mail's storage format.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, include_html: bool = False) -> None:
|
||||||
|
"""
|
||||||
|
Initialize.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
include_html: Whether to include HTML content in the email body (default: False)
|
||||||
|
"""
|
||||||
|
self.include_html = include_html
|
||||||
|
|
||||||
|
def load_data(self, input_dir: str, **load_kwargs: Any) -> list[Document]:
|
||||||
|
"""
|
||||||
|
Load data from the input directory containing .emlx files.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_dir: Directory containing .emlx files
|
||||||
|
**load_kwargs:
|
||||||
|
max_count (int): Maximum amount of messages to read.
|
||||||
|
"""
|
||||||
|
docs: list[Document] = []
|
||||||
|
max_count = load_kwargs.get("max_count", 1000)
|
||||||
|
count = 0
|
||||||
|
total_files = 0
|
||||||
|
successful_files = 0
|
||||||
|
failed_files = 0
|
||||||
|
|
||||||
|
print(f"Starting to process directory: {input_dir}")
|
||||||
|
|
||||||
|
# Walk through the directory recursively
|
||||||
|
for dirpath, dirnames, filenames in os.walk(input_dir):
|
||||||
|
# Skip hidden directories
|
||||||
|
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
|
||||||
|
|
||||||
|
for filename in filenames:
|
||||||
|
# Check if we've reached the max count (skip if max_count == -1)
|
||||||
|
if max_count > 0 and count >= max_count:
|
||||||
|
break
|
||||||
|
|
||||||
|
if filename.endswith(".emlx"):
|
||||||
|
total_files += 1
|
||||||
|
filepath = os.path.join(dirpath, filename)
|
||||||
|
try:
|
||||||
|
# Read the .emlx file
|
||||||
|
with open(filepath, encoding="utf-8", errors="ignore") as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
# .emlx files have a length prefix followed by the email content
|
||||||
|
# The first line contains the length, followed by the email
|
||||||
|
lines = content.split("\n", 1)
|
||||||
|
if len(lines) >= 2:
|
||||||
|
email_content = lines[1]
|
||||||
|
|
||||||
|
# Parse the email using Python's email module
|
||||||
|
try:
|
||||||
|
msg = email.message_from_string(email_content)
|
||||||
|
|
||||||
|
# Extract email metadata
|
||||||
|
subject = msg.get("Subject", "No Subject")
|
||||||
|
from_addr = msg.get("From", "Unknown")
|
||||||
|
to_addr = msg.get("To", "Unknown")
|
||||||
|
date = msg.get("Date", "Unknown")
|
||||||
|
|
||||||
|
# Extract email body
|
||||||
|
body = ""
|
||||||
|
if msg.is_multipart():
|
||||||
|
for part in msg.walk():
|
||||||
|
if (
|
||||||
|
part.get_content_type() == "text/plain"
|
||||||
|
or part.get_content_type() == "text/html"
|
||||||
|
):
|
||||||
|
if (
|
||||||
|
part.get_content_type() == "text/html"
|
||||||
|
and not self.include_html
|
||||||
|
):
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
payload = part.get_payload(decode=True)
|
||||||
|
if payload:
|
||||||
|
body += payload.decode("utf-8", errors="ignore")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error decoding payload: {e}")
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
payload = msg.get_payload(decode=True)
|
||||||
|
if payload:
|
||||||
|
body = payload.decode("utf-8", errors="ignore")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error decoding single part payload: {e}")
|
||||||
|
body = ""
|
||||||
|
|
||||||
|
# Only create document if we have some content
|
||||||
|
if body.strip() or subject != "No Subject":
|
||||||
|
# Create document content with metadata embedded in text
|
||||||
|
doc_content = f"""
|
||||||
|
[File]: {filename}
|
||||||
|
[From]: {from_addr}
|
||||||
|
[To]: {to_addr}
|
||||||
|
[Subject]: {subject}
|
||||||
|
[Date]: {date}
|
||||||
|
[EMAIL BODY Start]:
|
||||||
|
{body}
|
||||||
|
"""
|
||||||
|
|
||||||
|
# No separate metadata - everything is in the text
|
||||||
|
doc = Document(text=doc_content, metadata={})
|
||||||
|
docs.append(doc)
|
||||||
|
count += 1
|
||||||
|
successful_files += 1
|
||||||
|
|
||||||
|
# Print first few successful files for debugging
|
||||||
|
if successful_files <= 3:
|
||||||
|
print(
|
||||||
|
f"Successfully loaded: {filename} - Subject: {subject[:50]}..."
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
failed_files += 1
|
||||||
|
if failed_files <= 5: # Only print first few errors
|
||||||
|
print(f"Error parsing email from {filepath}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
failed_files += 1
|
||||||
|
if failed_files <= 5: # Only print first few errors
|
||||||
|
print(f"Error reading file {filepath}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
print("Processing summary:")
|
||||||
|
print(f" Total .emlx files found: {total_files}")
|
||||||
|
print(f" Successfully loaded: {successful_files}")
|
||||||
|
print(f" Failed to load: {failed_files}")
|
||||||
|
print(f" Final documents: {len(docs)}")
|
||||||
|
|
||||||
|
return docs
|
||||||
@@ -7,9 +7,9 @@ Contains simple parser for mbox files.
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any
|
||||||
from fsspec import AbstractFileSystem
|
|
||||||
|
|
||||||
|
from fsspec import AbstractFileSystem
|
||||||
from llama_index.core.readers.base import BaseReader
|
from llama_index.core.readers.base import BaseReader
|
||||||
from llama_index.core.schema import Document
|
from llama_index.core.schema import Document
|
||||||
|
|
||||||
@@ -27,11 +27,7 @@ class MboxReader(BaseReader):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
DEFAULT_MESSAGE_FORMAT: str = (
|
DEFAULT_MESSAGE_FORMAT: str = (
|
||||||
"Date: {_date}\n"
|
"Date: {_date}\nFrom: {_from}\nTo: {_to}\nSubject: {_subject}\nContent: {_content}"
|
||||||
"From: {_from}\n"
|
|
||||||
"To: {_to}\n"
|
|
||||||
"Subject: {_subject}\n"
|
|
||||||
"Content: {_content}"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -45,9 +41,7 @@ class MboxReader(BaseReader):
|
|||||||
try:
|
try:
|
||||||
from bs4 import BeautifulSoup # noqa
|
from bs4 import BeautifulSoup # noqa
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise ImportError(
|
raise ImportError("`beautifulsoup4` package not found: `pip install beautifulsoup4`")
|
||||||
"`beautifulsoup4` package not found: `pip install beautifulsoup4`"
|
|
||||||
)
|
|
||||||
|
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self.max_count = max_count
|
self.max_count = max_count
|
||||||
@@ -56,9 +50,9 @@ class MboxReader(BaseReader):
|
|||||||
def load_data(
|
def load_data(
|
||||||
self,
|
self,
|
||||||
file: Path,
|
file: Path,
|
||||||
extra_info: Optional[Dict] = None,
|
extra_info: dict | None = None,
|
||||||
fs: Optional[AbstractFileSystem] = None,
|
fs: AbstractFileSystem | None = None,
|
||||||
) -> List[Document]:
|
) -> list[Document]:
|
||||||
"""Parse file into string."""
|
"""Parse file into string."""
|
||||||
# Import required libraries
|
# Import required libraries
|
||||||
import mailbox
|
import mailbox
|
||||||
@@ -74,7 +68,7 @@ class MboxReader(BaseReader):
|
|||||||
)
|
)
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
results: List[str] = []
|
results: list[str] = []
|
||||||
# Load file using mailbox
|
# Load file using mailbox
|
||||||
bytes_parser = BytesParser(policy=default).parse
|
bytes_parser = BytesParser(policy=default).parse
|
||||||
mbox = mailbox.mbox(file, factory=bytes_parser) # type: ignore
|
mbox = mailbox.mbox(file, factory=bytes_parser) # type: ignore
|
||||||
@@ -134,12 +128,12 @@ class EmlxMboxReader(MboxReader):
|
|||||||
def load_data(
|
def load_data(
|
||||||
self,
|
self,
|
||||||
directory: Path,
|
directory: Path,
|
||||||
extra_info: Optional[Dict] = None,
|
extra_info: dict | None = None,
|
||||||
fs: Optional[AbstractFileSystem] = None,
|
fs: AbstractFileSystem | None = None,
|
||||||
) -> List[Document]:
|
) -> list[Document]:
|
||||||
"""Parse .emlx files from directory into strings using MboxReader logic."""
|
"""Parse .emlx files from directory into strings using MboxReader logic."""
|
||||||
import tempfile
|
|
||||||
import os
|
import os
|
||||||
|
import tempfile
|
||||||
|
|
||||||
if fs:
|
if fs:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
@@ -156,18 +150,18 @@ class EmlxMboxReader(MboxReader):
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
# Create a temporary mbox file
|
# Create a temporary mbox file
|
||||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.mbox', delete=False) as temp_mbox:
|
with tempfile.NamedTemporaryFile(mode="w", suffix=".mbox", delete=False) as temp_mbox:
|
||||||
temp_mbox_path = temp_mbox.name
|
temp_mbox_path = temp_mbox.name
|
||||||
|
|
||||||
# Convert .emlx files to mbox format
|
# Convert .emlx files to mbox format
|
||||||
for emlx_file in emlx_files:
|
for emlx_file in emlx_files:
|
||||||
try:
|
try:
|
||||||
# Read the .emlx file
|
# Read the .emlx file
|
||||||
with open(emlx_file, 'r', encoding='utf-8', errors='ignore') as f:
|
with open(emlx_file, encoding="utf-8", errors="ignore") as f:
|
||||||
content = f.read()
|
content = f.read()
|
||||||
|
|
||||||
# .emlx format: first line is length, rest is email content
|
# .emlx format: first line is length, rest is email content
|
||||||
lines = content.split('\n', 1)
|
lines = content.split("\n", 1)
|
||||||
if len(lines) >= 2:
|
if len(lines) >= 2:
|
||||||
email_content = lines[1] # Skip the length line
|
email_content = lines[1] # Skip the length line
|
||||||
|
|
||||||
@@ -188,5 +182,5 @@ class EmlxMboxReader(MboxReader):
|
|||||||
# Clean up temporary file
|
# Clean up temporary file
|
||||||
try:
|
try:
|
||||||
os.unlink(temp_mbox_path)
|
os.unlink(temp_mbox_path)
|
||||||
except:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
156
apps/email_rag.py
Normal file
156
apps/email_rag.py
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
"""
|
||||||
|
Email RAG example using the unified interface.
|
||||||
|
Supports Apple Mail on macOS.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add parent directory to path for imports
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent))
|
||||||
|
|
||||||
|
from base_rag_example import BaseRAGExample, create_text_chunks
|
||||||
|
|
||||||
|
from .email_data.LEANN_email_reader import EmlxReader
|
||||||
|
|
||||||
|
|
||||||
|
class EmailRAG(BaseRAGExample):
|
||||||
|
"""RAG example for Apple Mail processing."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
# Set default values BEFORE calling super().__init__
|
||||||
|
self.max_items_default = -1 # Process all emails by default
|
||||||
|
self.embedding_model_default = (
|
||||||
|
"sentence-transformers/all-MiniLM-L6-v2" # Fast 384-dim model
|
||||||
|
)
|
||||||
|
|
||||||
|
super().__init__(
|
||||||
|
name="Email",
|
||||||
|
description="Process and query Apple Mail emails with LEANN",
|
||||||
|
default_index_name="mail_index",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _add_specific_arguments(self, parser):
|
||||||
|
"""Add email-specific arguments."""
|
||||||
|
email_group = parser.add_argument_group("Email Parameters")
|
||||||
|
email_group.add_argument(
|
||||||
|
"--mail-path",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Path to Apple Mail directory (auto-detected if not specified)",
|
||||||
|
)
|
||||||
|
email_group.add_argument(
|
||||||
|
"--include-html", action="store_true", help="Include HTML content in email processing"
|
||||||
|
)
|
||||||
|
email_group.add_argument(
|
||||||
|
"--chunk-size", type=int, default=256, help="Text chunk size (default: 256)"
|
||||||
|
)
|
||||||
|
email_group.add_argument(
|
||||||
|
"--chunk-overlap", type=int, default=25, help="Text chunk overlap (default: 25)"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _find_mail_directories(self) -> list[Path]:
|
||||||
|
"""Auto-detect all Apple Mail directories."""
|
||||||
|
mail_base = Path.home() / "Library" / "Mail"
|
||||||
|
if not mail_base.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Find all Messages directories
|
||||||
|
messages_dirs = []
|
||||||
|
for item in mail_base.rglob("Messages"):
|
||||||
|
if item.is_dir():
|
||||||
|
messages_dirs.append(item)
|
||||||
|
|
||||||
|
return messages_dirs
|
||||||
|
|
||||||
|
async def load_data(self, args) -> list[str]:
|
||||||
|
"""Load emails and convert to text chunks."""
|
||||||
|
# Determine mail directories
|
||||||
|
if args.mail_path:
|
||||||
|
messages_dirs = [Path(args.mail_path)]
|
||||||
|
else:
|
||||||
|
print("Auto-detecting Apple Mail directories...")
|
||||||
|
messages_dirs = self._find_mail_directories()
|
||||||
|
|
||||||
|
if not messages_dirs:
|
||||||
|
print("No Apple Mail directories found!")
|
||||||
|
print("Please specify --mail-path manually")
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"Found {len(messages_dirs)} mail directories")
|
||||||
|
|
||||||
|
# Create reader
|
||||||
|
reader = EmlxReader(include_html=args.include_html)
|
||||||
|
|
||||||
|
# Process each directory
|
||||||
|
all_documents = []
|
||||||
|
total_processed = 0
|
||||||
|
|
||||||
|
for i, messages_dir in enumerate(messages_dirs):
|
||||||
|
print(f"\nProcessing directory {i + 1}/{len(messages_dirs)}: {messages_dir}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Count emlx files
|
||||||
|
emlx_files = list(messages_dir.glob("*.emlx"))
|
||||||
|
print(f"Found {len(emlx_files)} email files")
|
||||||
|
|
||||||
|
# Apply max_items limit per directory
|
||||||
|
max_per_dir = -1 # Default to process all
|
||||||
|
if args.max_items > 0:
|
||||||
|
remaining = args.max_items - total_processed
|
||||||
|
if remaining <= 0:
|
||||||
|
break
|
||||||
|
max_per_dir = remaining
|
||||||
|
# If args.max_items == -1, max_per_dir stays -1 (process all)
|
||||||
|
|
||||||
|
# Load emails - fix the parameter passing
|
||||||
|
documents = reader.load_data(
|
||||||
|
input_dir=str(messages_dir),
|
||||||
|
max_count=max_per_dir,
|
||||||
|
)
|
||||||
|
|
||||||
|
if documents:
|
||||||
|
all_documents.extend(documents)
|
||||||
|
total_processed += len(documents)
|
||||||
|
print(f"Processed {len(documents)} emails from this directory")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error processing {messages_dir}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not all_documents:
|
||||||
|
print("No emails found to process!")
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"\nTotal emails processed: {len(all_documents)}")
|
||||||
|
print("now starting to split into text chunks ... take some time")
|
||||||
|
|
||||||
|
# Convert to text chunks
|
||||||
|
# Email reader uses chunk_overlap=25 as in original
|
||||||
|
all_texts = create_text_chunks(
|
||||||
|
all_documents, chunk_size=args.chunk_size, chunk_overlap=args.chunk_overlap
|
||||||
|
)
|
||||||
|
|
||||||
|
return all_texts
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
# Check platform
|
||||||
|
if sys.platform != "darwin":
|
||||||
|
print("\n⚠️ Warning: This example is designed for macOS (Apple Mail)")
|
||||||
|
print(" Windows/Linux support coming soon!\n")
|
||||||
|
|
||||||
|
# Example queries for email RAG
|
||||||
|
print("\n📧 Email RAG Example")
|
||||||
|
print("=" * 50)
|
||||||
|
print("\nExample queries you can try:")
|
||||||
|
print("- 'What did my boss say about deadlines?'")
|
||||||
|
print("- 'Find emails about travel expenses'")
|
||||||
|
print("- 'Show me emails from last month about the project'")
|
||||||
|
print("- 'What food did I order from DoorDash?'")
|
||||||
|
print("\nNote: You may need to grant Full Disk Access to your terminal\n")
|
||||||
|
|
||||||
|
rag = EmailRAG()
|
||||||
|
asyncio.run(rag.run())
|
||||||
@@ -1,3 +1,3 @@
|
|||||||
from .history import ChromeHistoryReader
|
from .history import ChromeHistoryReader
|
||||||
|
|
||||||
__all__ = ['ChromeHistoryReader']
|
__all__ = ["ChromeHistoryReader"]
|
||||||
@@ -1,10 +1,12 @@
|
|||||||
import sqlite3
|
|
||||||
import os
|
import os
|
||||||
|
import sqlite3
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Any
|
from typing import Any
|
||||||
|
|
||||||
from llama_index.core import Document
|
from llama_index.core import Document
|
||||||
from llama_index.core.readers.base import BaseReader
|
from llama_index.core.readers.base import BaseReader
|
||||||
|
|
||||||
|
|
||||||
class ChromeHistoryReader(BaseReader):
|
class ChromeHistoryReader(BaseReader):
|
||||||
"""
|
"""
|
||||||
Chrome browser history reader that extracts browsing data from SQLite database.
|
Chrome browser history reader that extracts browsing data from SQLite database.
|
||||||
@@ -17,7 +19,7 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
"""Initialize."""
|
"""Initialize."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def load_data(self, input_dir: str = None, **load_kwargs: Any) -> List[Document]:
|
def load_data(self, input_dir: str | None = None, **load_kwargs: Any) -> list[Document]:
|
||||||
"""
|
"""
|
||||||
Load Chrome history data from the default Chrome profile location.
|
Load Chrome history data from the default Chrome profile location.
|
||||||
|
|
||||||
@@ -27,13 +29,15 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
max_count (int): Maximum amount of history entries to read.
|
max_count (int): Maximum amount of history entries to read.
|
||||||
chrome_profile_path (str): Custom path to Chrome profile directory.
|
chrome_profile_path (str): Custom path to Chrome profile directory.
|
||||||
"""
|
"""
|
||||||
docs: List[Document] = []
|
docs: list[Document] = []
|
||||||
max_count = load_kwargs.get('max_count', 1000)
|
max_count = load_kwargs.get("max_count", 1000)
|
||||||
chrome_profile_path = load_kwargs.get('chrome_profile_path', None)
|
chrome_profile_path = load_kwargs.get("chrome_profile_path", None)
|
||||||
|
|
||||||
# Default Chrome profile path on macOS
|
# Default Chrome profile path on macOS
|
||||||
if chrome_profile_path is None:
|
if chrome_profile_path is None:
|
||||||
chrome_profile_path = os.path.expanduser("~/Library/Application Support/Google/Chrome/Default")
|
chrome_profile_path = os.path.expanduser(
|
||||||
|
"~/Library/Application Support/Google/Chrome/Default"
|
||||||
|
)
|
||||||
|
|
||||||
history_db_path = os.path.join(chrome_profile_path, "History")
|
history_db_path = os.path.join(chrome_profile_path, "History")
|
||||||
|
|
||||||
@@ -82,7 +86,7 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# Create document with embedded metadata
|
# Create document with embedded metadata
|
||||||
doc = Document(text=doc_content, metadata={ "title": title[0:150]})
|
doc = Document(text=doc_content, metadata={"title": title[0:150]})
|
||||||
# if len(title) > 150:
|
# if len(title) > 150:
|
||||||
# print(f"Title is too long: {title}")
|
# print(f"Title is too long: {title}")
|
||||||
docs.append(doc)
|
docs.append(doc)
|
||||||
@@ -93,12 +97,17 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error reading Chrome history: {e}")
|
print(f"Error reading Chrome history: {e}")
|
||||||
|
# add you may need to close your browser to make the database file available
|
||||||
|
# also highlight in red
|
||||||
|
print(
|
||||||
|
"\033[91mYou may need to close your browser to make the database file available\033[0m"
|
||||||
|
)
|
||||||
return docs
|
return docs
|
||||||
|
|
||||||
return docs
|
return docs
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def find_chrome_profiles() -> List[Path]:
|
def find_chrome_profiles() -> list[Path]:
|
||||||
"""
|
"""
|
||||||
Find all Chrome profile directories.
|
Find all Chrome profile directories.
|
||||||
|
|
||||||
@@ -124,7 +133,9 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
return profile_dirs
|
return profile_dirs
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def export_history_to_file(output_file: str = "chrome_history_export.txt", max_count: int = 1000):
|
def export_history_to_file(
|
||||||
|
output_file: str = "chrome_history_export.txt", max_count: int = 1000
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Export Chrome history to a text file using the same SQL query format.
|
Export Chrome history to a text file using the same SQL query format.
|
||||||
|
|
||||||
@@ -132,7 +143,9 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
output_file: Path to the output file
|
output_file: Path to the output file
|
||||||
max_count: Maximum number of entries to export
|
max_count: Maximum number of entries to export
|
||||||
"""
|
"""
|
||||||
chrome_profile_path = os.path.expanduser("~/Library/Application Support/Google/Chrome/Default")
|
chrome_profile_path = os.path.expanduser(
|
||||||
|
"~/Library/Application Support/Google/Chrome/Default"
|
||||||
|
)
|
||||||
history_db_path = os.path.join(chrome_profile_path, "History")
|
history_db_path = os.path.join(chrome_profile_path, "History")
|
||||||
|
|
||||||
if not os.path.exists(history_db_path):
|
if not os.path.exists(history_db_path):
|
||||||
@@ -159,10 +172,12 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
cursor.execute(query, (max_count,))
|
cursor.execute(query, (max_count,))
|
||||||
rows = cursor.fetchall()
|
rows = cursor.fetchall()
|
||||||
|
|
||||||
with open(output_file, 'w', encoding='utf-8') as f:
|
with open(output_file, "w", encoding="utf-8") as f:
|
||||||
for row in rows:
|
for row in rows:
|
||||||
last_visit, url, title, visit_count, typed_count, hidden = row
|
last_visit, url, title, visit_count, typed_count, hidden = row
|
||||||
f.write(f"{last_visit}\t{url}\t{title}\t{visit_count}\t{typed_count}\t{hidden}\n")
|
f.write(
|
||||||
|
f"{last_visit}\t{url}\t{title}\t{visit_count}\t{typed_count}\t{hidden}\n"
|
||||||
|
)
|
||||||
|
|
||||||
conn.close()
|
conn.close()
|
||||||
print(f"Exported {len(rows)} history entries to {output_file}")
|
print(f"Exported {len(rows)} history entries to {output_file}")
|
||||||
@@ -2,13 +2,14 @@ import json
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
|
||||||
import time
|
import time
|
||||||
|
from datetime import datetime
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Any, Dict, Optional
|
from typing import Any
|
||||||
|
|
||||||
from llama_index.core import Document
|
from llama_index.core import Document
|
||||||
from llama_index.core.readers.base import BaseReader
|
from llama_index.core.readers.base import BaseReader
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
class WeChatHistoryReader(BaseReader):
|
class WeChatHistoryReader(BaseReader):
|
||||||
"""
|
"""
|
||||||
@@ -43,10 +44,16 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
wechattweak_path = self.wechat_exporter_dir / "wechattweak-cli"
|
wechattweak_path = self.wechat_exporter_dir / "wechattweak-cli"
|
||||||
if not wechattweak_path.exists():
|
if not wechattweak_path.exists():
|
||||||
print("Downloading WeChatTweak CLI...")
|
print("Downloading WeChatTweak CLI...")
|
||||||
subprocess.run([
|
subprocess.run(
|
||||||
"curl", "-L", "-o", str(wechattweak_path),
|
[
|
||||||
"https://github.com/JettChenT/WeChatTweak-CLI/releases/latest/download/wechattweak-cli"
|
"curl",
|
||||||
], check=True)
|
"-L",
|
||||||
|
"-o",
|
||||||
|
str(wechattweak_path),
|
||||||
|
"https://github.com/JettChenT/WeChatTweak-CLI/releases/latest/download/wechattweak-cli",
|
||||||
|
],
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
|
||||||
# Make executable
|
# Make executable
|
||||||
wechattweak_path.chmod(0o755)
|
wechattweak_path.chmod(0o755)
|
||||||
@@ -73,16 +80,16 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
def check_api_available(self) -> bool:
|
def check_api_available(self) -> bool:
|
||||||
"""Check if WeChatTweak API is available."""
|
"""Check if WeChatTweak API is available."""
|
||||||
try:
|
try:
|
||||||
result = subprocess.run([
|
result = subprocess.run(
|
||||||
"curl", "-s", "http://localhost:48065/wechat/allcontacts"
|
["curl", "-s", "http://localhost:48065/wechat/allcontacts"],
|
||||||
], capture_output=True, text=True, timeout=5)
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=5,
|
||||||
|
)
|
||||||
return result.returncode == 0 and result.stdout.strip()
|
return result.returncode == 0 and result.stdout.strip()
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def _extract_readable_text(self, content: str) -> str:
|
def _extract_readable_text(self, content: str) -> str:
|
||||||
"""
|
"""
|
||||||
Extract readable text from message content, removing XML and system messages.
|
Extract readable text from message content, removing XML and system messages.
|
||||||
@@ -100,14 +107,14 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
if isinstance(content, dict):
|
if isinstance(content, dict):
|
||||||
# Extract text from dictionary structure
|
# Extract text from dictionary structure
|
||||||
text_parts = []
|
text_parts = []
|
||||||
if 'title' in content:
|
if "title" in content:
|
||||||
text_parts.append(str(content['title']))
|
text_parts.append(str(content["title"]))
|
||||||
if 'quoted' in content:
|
if "quoted" in content:
|
||||||
text_parts.append(str(content['quoted']))
|
text_parts.append(str(content["quoted"]))
|
||||||
if 'content' in content:
|
if "content" in content:
|
||||||
text_parts.append(str(content['content']))
|
text_parts.append(str(content["content"]))
|
||||||
if 'text' in content:
|
if "text" in content:
|
||||||
text_parts.append(str(content['text']))
|
text_parts.append(str(content["text"]))
|
||||||
|
|
||||||
if text_parts:
|
if text_parts:
|
||||||
return " | ".join(text_parts)
|
return " | ".join(text_parts)
|
||||||
@@ -120,11 +127,11 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
return ""
|
return ""
|
||||||
|
|
||||||
# Remove common prefixes like "wxid_xxx:\n"
|
# Remove common prefixes like "wxid_xxx:\n"
|
||||||
clean_content = re.sub(r'^wxid_[^:]+:\s*', '', content)
|
clean_content = re.sub(r"^wxid_[^:]+:\s*", "", content)
|
||||||
clean_content = re.sub(r'^[^:]+:\s*', '', clean_content)
|
clean_content = re.sub(r"^[^:]+:\s*", "", clean_content)
|
||||||
|
|
||||||
# If it's just XML or system message, return empty
|
# If it's just XML or system message, return empty
|
||||||
if clean_content.strip().startswith('<') or 'recalled a message' in clean_content:
|
if clean_content.strip().startswith("<") or "recalled a message" in clean_content:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
return clean_content.strip()
|
return clean_content.strip()
|
||||||
@@ -145,9 +152,9 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
# Handle dictionary content
|
# Handle dictionary content
|
||||||
if isinstance(content, dict):
|
if isinstance(content, dict):
|
||||||
# Check if dict has any readable text fields
|
# Check if dict has any readable text fields
|
||||||
text_fields = ['title', 'quoted', 'content', 'text']
|
text_fields = ["title", "quoted", "content", "text"]
|
||||||
for field in text_fields:
|
for field in text_fields:
|
||||||
if field in content and content[field]:
|
if content.get(field):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@@ -156,42 +163,47 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
# Skip image messages (contain XML with img tags)
|
# Skip image messages (contain XML with img tags)
|
||||||
if '<img' in content and 'cdnurl' in content:
|
if "<img" in content and "cdnurl" in content:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Skip emoji messages (contain emoji XML tags)
|
# Skip emoji messages (contain emoji XML tags)
|
||||||
if '<emoji' in content and 'productid' in content:
|
if "<emoji" in content and "productid" in content:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Skip voice messages
|
# Skip voice messages
|
||||||
if '<voice' in content:
|
if "<voice" in content:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Skip video messages
|
# Skip video messages
|
||||||
if '<video' in content:
|
if "<video" in content:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Skip file messages
|
# Skip file messages
|
||||||
if '<appmsg' in content and 'appid' in content:
|
if "<appmsg" in content and "appid" in content:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Skip system messages (like "recalled a message")
|
# Skip system messages (like "recalled a message")
|
||||||
if 'recalled a message' in content:
|
if "recalled a message" in content:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Check if there's actual readable text (not just XML or system messages)
|
# Check if there's actual readable text (not just XML or system messages)
|
||||||
# Remove common prefixes like "wxid_xxx:\n" and check for actual content
|
# Remove common prefixes like "wxid_xxx:\n" and check for actual content
|
||||||
clean_content = re.sub(r'^wxid_[^:]+:\s*', '', content)
|
clean_content = re.sub(r"^wxid_[^:]+:\s*", "", content)
|
||||||
clean_content = re.sub(r'^[^:]+:\s*', '', clean_content)
|
clean_content = re.sub(r"^[^:]+:\s*", "", clean_content)
|
||||||
|
|
||||||
# If after cleaning we have meaningful text, consider it readable
|
# If after cleaning we have meaningful text, consider it readable
|
||||||
if len(clean_content.strip()) > 0 and not clean_content.strip().startswith('<'):
|
if len(clean_content.strip()) > 0 and not clean_content.strip().startswith("<"):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _concatenate_messages(self, messages: List[Dict], max_length: int = 128,
|
def _concatenate_messages(
|
||||||
time_window_minutes: int = 30, overlap_messages: int = 0) -> List[Dict]:
|
self,
|
||||||
|
messages: list[dict],
|
||||||
|
max_length: int = 128,
|
||||||
|
time_window_minutes: int = 30,
|
||||||
|
overlap_messages: int = 0,
|
||||||
|
) -> list[dict]:
|
||||||
"""
|
"""
|
||||||
Concatenate messages based on length and time rules.
|
Concatenate messages based on length and time rules.
|
||||||
|
|
||||||
@@ -214,12 +226,12 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
|
|
||||||
for message in messages:
|
for message in messages:
|
||||||
# Extract message info
|
# Extract message info
|
||||||
content = message.get('content', '')
|
content = message.get("content", "")
|
||||||
message_text = message.get('message', '')
|
message_text = message.get("message", "")
|
||||||
create_time = message.get('createTime', 0)
|
create_time = message.get("createTime", 0)
|
||||||
from_user = message.get('fromUser', '')
|
message.get("fromUser", "")
|
||||||
to_user = message.get('toUser', '')
|
message.get("toUser", "")
|
||||||
is_sent_from_self = message.get('isSentFromSelf', False)
|
message.get("isSentFromSelf", False)
|
||||||
|
|
||||||
# Extract readable text
|
# Extract readable text
|
||||||
readable_text = self._extract_readable_text(content)
|
readable_text = self._extract_readable_text(content)
|
||||||
@@ -236,16 +248,24 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
if time_diff_minutes > time_window_minutes:
|
if time_diff_minutes > time_window_minutes:
|
||||||
# Time gap too large, start new group
|
# Time gap too large, start new group
|
||||||
if current_group:
|
if current_group:
|
||||||
concatenated_groups.append({
|
concatenated_groups.append(
|
||||||
'messages': current_group,
|
{
|
||||||
'total_length': current_length,
|
"messages": current_group,
|
||||||
'start_time': current_group[0].get('createTime', 0),
|
"total_length": current_length,
|
||||||
'end_time': current_group[-1].get('createTime', 0)
|
"start_time": current_group[0].get("createTime", 0),
|
||||||
})
|
"end_time": current_group[-1].get("createTime", 0),
|
||||||
|
}
|
||||||
|
)
|
||||||
# Keep last few messages for overlap
|
# Keep last few messages for overlap
|
||||||
if overlap_messages > 0 and len(current_group) > overlap_messages:
|
if overlap_messages > 0 and len(current_group) > overlap_messages:
|
||||||
current_group = current_group[-overlap_messages:]
|
current_group = current_group[-overlap_messages:]
|
||||||
current_length = sum(len(self._extract_readable_text(msg.get('content', '')) or msg.get('message', '')) for msg in current_group)
|
current_length = sum(
|
||||||
|
len(
|
||||||
|
self._extract_readable_text(msg.get("content", ""))
|
||||||
|
or msg.get("message", "")
|
||||||
|
)
|
||||||
|
for msg in current_group
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
current_group = []
|
current_group = []
|
||||||
current_length = 0
|
current_length = 0
|
||||||
@@ -254,16 +274,24 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
message_length = len(readable_text)
|
message_length = len(readable_text)
|
||||||
if max_length != -1 and current_length + message_length > max_length and current_group:
|
if max_length != -1 and current_length + message_length > max_length and current_group:
|
||||||
# Current group would exceed max length, save it and start new
|
# Current group would exceed max length, save it and start new
|
||||||
concatenated_groups.append({
|
concatenated_groups.append(
|
||||||
'messages': current_group,
|
{
|
||||||
'total_length': current_length,
|
"messages": current_group,
|
||||||
'start_time': current_group[0].get('createTime', 0),
|
"total_length": current_length,
|
||||||
'end_time': current_group[-1].get('createTime', 0)
|
"start_time": current_group[0].get("createTime", 0),
|
||||||
})
|
"end_time": current_group[-1].get("createTime", 0),
|
||||||
|
}
|
||||||
|
)
|
||||||
# Keep last few messages for overlap
|
# Keep last few messages for overlap
|
||||||
if overlap_messages > 0 and len(current_group) > overlap_messages:
|
if overlap_messages > 0 and len(current_group) > overlap_messages:
|
||||||
current_group = current_group[-overlap_messages:]
|
current_group = current_group[-overlap_messages:]
|
||||||
current_length = sum(len(self._extract_readable_text(msg.get('content', '')) or msg.get('message', '')) for msg in current_group)
|
current_length = sum(
|
||||||
|
len(
|
||||||
|
self._extract_readable_text(msg.get("content", ""))
|
||||||
|
or msg.get("message", "")
|
||||||
|
)
|
||||||
|
for msg in current_group
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
current_group = []
|
current_group = []
|
||||||
current_length = 0
|
current_length = 0
|
||||||
@@ -275,16 +303,18 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
|
|
||||||
# Add the last group if it exists
|
# Add the last group if it exists
|
||||||
if current_group:
|
if current_group:
|
||||||
concatenated_groups.append({
|
concatenated_groups.append(
|
||||||
'messages': current_group,
|
{
|
||||||
'total_length': current_length,
|
"messages": current_group,
|
||||||
'start_time': current_group[0].get('createTime', 0),
|
"total_length": current_length,
|
||||||
'end_time': current_group[-1].get('createTime', 0)
|
"start_time": current_group[0].get("createTime", 0),
|
||||||
})
|
"end_time": current_group[-1].get("createTime", 0),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
return concatenated_groups
|
return concatenated_groups
|
||||||
|
|
||||||
def _create_concatenated_content(self, message_group: Dict, contact_name: str) -> str:
|
def _create_concatenated_content(self, message_group: dict, contact_name: str) -> str:
|
||||||
"""
|
"""
|
||||||
Create concatenated content from a group of messages.
|
Create concatenated content from a group of messages.
|
||||||
|
|
||||||
@@ -295,16 +325,16 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
Returns:
|
Returns:
|
||||||
Formatted concatenated content
|
Formatted concatenated content
|
||||||
"""
|
"""
|
||||||
messages = message_group['messages']
|
messages = message_group["messages"]
|
||||||
start_time = message_group['start_time']
|
start_time = message_group["start_time"]
|
||||||
end_time = message_group['end_time']
|
end_time = message_group["end_time"]
|
||||||
|
|
||||||
# Format timestamps
|
# Format timestamps
|
||||||
if start_time:
|
if start_time:
|
||||||
try:
|
try:
|
||||||
start_timestamp = datetime.fromtimestamp(start_time)
|
start_timestamp = datetime.fromtimestamp(start_time)
|
||||||
start_time_str = start_timestamp.strftime('%Y-%m-%d %H:%M:%S')
|
start_time_str = start_timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
||||||
except:
|
except (ValueError, OSError):
|
||||||
start_time_str = str(start_time)
|
start_time_str = str(start_time)
|
||||||
else:
|
else:
|
||||||
start_time_str = "Unknown"
|
start_time_str = "Unknown"
|
||||||
@@ -312,8 +342,8 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
if end_time:
|
if end_time:
|
||||||
try:
|
try:
|
||||||
end_timestamp = datetime.fromtimestamp(end_time)
|
end_timestamp = datetime.fromtimestamp(end_time)
|
||||||
end_time_str = end_timestamp.strftime('%Y-%m-%d %H:%M:%S')
|
end_time_str = end_timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
||||||
except:
|
except (ValueError, OSError):
|
||||||
end_time_str = str(end_time)
|
end_time_str = str(end_time)
|
||||||
else:
|
else:
|
||||||
end_time_str = "Unknown"
|
end_time_str = "Unknown"
|
||||||
@@ -321,10 +351,10 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
# Build concatenated message content
|
# Build concatenated message content
|
||||||
message_parts = []
|
message_parts = []
|
||||||
for message in messages:
|
for message in messages:
|
||||||
content = message.get('content', '')
|
content = message.get("content", "")
|
||||||
message_text = message.get('message', '')
|
message_text = message.get("message", "")
|
||||||
create_time = message.get('createTime', 0)
|
create_time = message.get("createTime", 0)
|
||||||
is_sent_from_self = message.get('isSentFromSelf', False)
|
is_sent_from_self = message.get("isSentFromSelf", False)
|
||||||
|
|
||||||
# Extract readable text
|
# Extract readable text
|
||||||
readable_text = self._extract_readable_text(content)
|
readable_text = self._extract_readable_text(content)
|
||||||
@@ -336,8 +366,8 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
try:
|
try:
|
||||||
timestamp = datetime.fromtimestamp(create_time)
|
timestamp = datetime.fromtimestamp(create_time)
|
||||||
# change to YYYY-MM-DD HH:MM:SS
|
# change to YYYY-MM-DD HH:MM:SS
|
||||||
time_str = timestamp.strftime('%Y-%m-%d %H:%M:%S')
|
time_str = timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
||||||
except:
|
except (ValueError, OSError):
|
||||||
time_str = str(create_time)
|
time_str = str(create_time)
|
||||||
else:
|
else:
|
||||||
time_str = "Unknown"
|
time_str = "Unknown"
|
||||||
@@ -351,7 +381,7 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
doc_content = f"""
|
doc_content = f"""
|
||||||
Contact: {contact_name}
|
Contact: {contact_name}
|
||||||
Time Range: {start_time_str} - {end_time_str}
|
Time Range: {start_time_str} - {end_time_str}
|
||||||
Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
Messages ({len(messages)} messages, {message_group["total_length"]} chars):
|
||||||
|
|
||||||
{concatenated_text}
|
{concatenated_text}
|
||||||
"""
|
"""
|
||||||
@@ -361,7 +391,7 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
"""
|
"""
|
||||||
return doc_content, contact_name
|
return doc_content, contact_name
|
||||||
|
|
||||||
def load_data(self, input_dir: str = None, **load_kwargs: Any) -> List[Document]:
|
def load_data(self, input_dir: str | None = None, **load_kwargs: Any) -> list[Document]:
|
||||||
"""
|
"""
|
||||||
Load WeChat chat history data from exported JSON files.
|
Load WeChat chat history data from exported JSON files.
|
||||||
|
|
||||||
@@ -376,13 +406,13 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
time_window_minutes (int): Time window in minutes to group messages together (default: 30).
|
time_window_minutes (int): Time window in minutes to group messages together (default: 30).
|
||||||
overlap_messages (int): Number of messages to overlap between consecutive groups (default: 2).
|
overlap_messages (int): Number of messages to overlap between consecutive groups (default: 2).
|
||||||
"""
|
"""
|
||||||
docs: List[Document] = []
|
docs: list[Document] = []
|
||||||
max_count = load_kwargs.get('max_count', 1000)
|
max_count = load_kwargs.get("max_count", 1000)
|
||||||
wechat_export_dir = load_kwargs.get('wechat_export_dir', None)
|
wechat_export_dir = load_kwargs.get("wechat_export_dir", None)
|
||||||
include_non_text = load_kwargs.get('include_non_text', False)
|
include_non_text = load_kwargs.get("include_non_text", False)
|
||||||
concatenate_messages = load_kwargs.get('concatenate_messages', False)
|
concatenate_messages = load_kwargs.get("concatenate_messages", False)
|
||||||
max_length = load_kwargs.get('max_length', 1000)
|
max_length = load_kwargs.get("max_length", 1000)
|
||||||
time_window_minutes = load_kwargs.get('time_window_minutes', 30)
|
time_window_minutes = load_kwargs.get("time_window_minutes", 30)
|
||||||
|
|
||||||
# Default WeChat export path
|
# Default WeChat export path
|
||||||
if wechat_export_dir is None:
|
if wechat_export_dir is None:
|
||||||
@@ -403,7 +433,7 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
break
|
break
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(json_file, 'r', encoding='utf-8') as f:
|
with open(json_file, encoding="utf-8") as f:
|
||||||
chat_data = json.load(f)
|
chat_data = json.load(f)
|
||||||
|
|
||||||
# Extract contact name from filename
|
# Extract contact name from filename
|
||||||
@@ -414,7 +444,7 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
readable_messages = []
|
readable_messages = []
|
||||||
for message in chat_data:
|
for message in chat_data:
|
||||||
try:
|
try:
|
||||||
content = message.get('content', '')
|
content = message.get("content", "")
|
||||||
if not include_non_text and not self._is_text_message(content):
|
if not include_non_text and not self._is_text_message(content):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@@ -430,9 +460,9 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
# Concatenate messages based on rules
|
# Concatenate messages based on rules
|
||||||
message_groups = self._concatenate_messages(
|
message_groups = self._concatenate_messages(
|
||||||
readable_messages,
|
readable_messages,
|
||||||
max_length=-1,
|
max_length=max_length,
|
||||||
time_window_minutes=-1,
|
time_window_minutes=time_window_minutes,
|
||||||
overlap_messages=0 # Keep 2 messages overlap between groups
|
overlap_messages=0, # No overlap between groups
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create documents from concatenated groups
|
# Create documents from concatenated groups
|
||||||
@@ -440,12 +470,19 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
if count >= max_count and max_count > 0:
|
if count >= max_count and max_count > 0:
|
||||||
break
|
break
|
||||||
|
|
||||||
doc_content, contact_name = self._create_concatenated_content(message_group, contact_name)
|
doc_content, contact_name = self._create_concatenated_content(
|
||||||
doc = Document(text=doc_content, metadata={"contact_name": contact_name})
|
message_group, contact_name
|
||||||
|
)
|
||||||
|
doc = Document(
|
||||||
|
text=doc_content,
|
||||||
|
metadata={"contact_name": contact_name},
|
||||||
|
)
|
||||||
docs.append(doc)
|
docs.append(doc)
|
||||||
count += 1
|
count += 1
|
||||||
|
|
||||||
print(f"Created {len(message_groups)} concatenated message groups for {contact_name}")
|
print(
|
||||||
|
f"Created {len(message_groups)} concatenated message groups for {contact_name}"
|
||||||
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# Original single-message processing
|
# Original single-message processing
|
||||||
@@ -454,12 +491,12 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
break
|
break
|
||||||
|
|
||||||
# Extract message information
|
# Extract message information
|
||||||
from_user = message.get('fromUser', '')
|
message.get("fromUser", "")
|
||||||
to_user = message.get('toUser', '')
|
message.get("toUser", "")
|
||||||
content = message.get('content', '')
|
content = message.get("content", "")
|
||||||
message_text = message.get('message', '')
|
message_text = message.get("message", "")
|
||||||
create_time = message.get('createTime', 0)
|
create_time = message.get("createTime", 0)
|
||||||
is_sent_from_self = message.get('isSentFromSelf', False)
|
is_sent_from_self = message.get("isSentFromSelf", False)
|
||||||
|
|
||||||
# Handle content that might be dict or string
|
# Handle content that might be dict or string
|
||||||
try:
|
try:
|
||||||
@@ -480,8 +517,8 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
if create_time:
|
if create_time:
|
||||||
try:
|
try:
|
||||||
timestamp = datetime.fromtimestamp(create_time)
|
timestamp = datetime.fromtimestamp(create_time)
|
||||||
time_str = timestamp.strftime('%Y-%m-%d %H:%M:%S')
|
time_str = timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
||||||
except:
|
except (ValueError, OSError):
|
||||||
time_str = str(create_time)
|
time_str = str(create_time)
|
||||||
else:
|
else:
|
||||||
time_str = "Unknown"
|
time_str = "Unknown"
|
||||||
@@ -495,7 +532,9 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# Create document with embedded metadata
|
# Create document with embedded metadata
|
||||||
doc = Document(text=doc_content, metadata={})
|
doc = Document(
|
||||||
|
text=doc_content, metadata={"contact_name": contact_name}
|
||||||
|
)
|
||||||
docs.append(doc)
|
docs.append(doc)
|
||||||
count += 1
|
count += 1
|
||||||
|
|
||||||
@@ -512,7 +551,7 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
return docs
|
return docs
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def find_wechat_export_dirs() -> List[Path]:
|
def find_wechat_export_dirs() -> list[Path]:
|
||||||
"""
|
"""
|
||||||
Find all WeChat export directories.
|
Find all WeChat export directories.
|
||||||
|
|
||||||
@@ -523,10 +562,10 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
|
|
||||||
# Look for common export directory names
|
# Look for common export directory names
|
||||||
possible_dirs = [
|
possible_dirs = [
|
||||||
Path("./wechat_export_test"),
|
|
||||||
Path("./wechat_export"),
|
Path("./wechat_export"),
|
||||||
|
Path("./wechat_export_direct"),
|
||||||
Path("./wechat_chat_history"),
|
Path("./wechat_chat_history"),
|
||||||
Path("./chat_export")
|
Path("./chat_export"),
|
||||||
]
|
]
|
||||||
|
|
||||||
for export_dir in possible_dirs:
|
for export_dir in possible_dirs:
|
||||||
@@ -534,13 +573,20 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
json_files = list(export_dir.glob("*.json"))
|
json_files = list(export_dir.glob("*.json"))
|
||||||
if json_files:
|
if json_files:
|
||||||
export_dirs.append(export_dir)
|
export_dirs.append(export_dir)
|
||||||
print(f"Found WeChat export directory: {export_dir} with {len(json_files)} files")
|
print(
|
||||||
|
f"Found WeChat export directory: {export_dir} with {len(json_files)} files"
|
||||||
|
)
|
||||||
|
|
||||||
print(f"Found {len(export_dirs)} WeChat export directories")
|
print(f"Found {len(export_dirs)} WeChat export directories")
|
||||||
return export_dirs
|
return export_dirs
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def export_chat_to_file(output_file: str = "wechat_chat_export.txt", max_count: int = 1000, export_dir: str = None, include_non_text: bool = False):
|
def export_chat_to_file(
|
||||||
|
output_file: str = "wechat_chat_export.txt",
|
||||||
|
max_count: int = 1000,
|
||||||
|
export_dir: str | None = None,
|
||||||
|
include_non_text: bool = False,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Export WeChat chat history to a text file.
|
Export WeChat chat history to a text file.
|
||||||
|
|
||||||
@@ -560,14 +606,14 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
try:
|
try:
|
||||||
json_files = list(Path(export_dir).glob("*.json"))
|
json_files = list(Path(export_dir).glob("*.json"))
|
||||||
|
|
||||||
with open(output_file, 'w', encoding='utf-8') as f:
|
with open(output_file, "w", encoding="utf-8") as f:
|
||||||
count = 0
|
count = 0
|
||||||
for json_file in json_files:
|
for json_file in json_files:
|
||||||
if count >= max_count and max_count > 0:
|
if count >= max_count and max_count > 0:
|
||||||
break
|
break
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(json_file, 'r', encoding='utf-8') as json_f:
|
with open(json_file, encoding="utf-8") as json_f:
|
||||||
chat_data = json.load(json_f)
|
chat_data = json.load(json_f)
|
||||||
|
|
||||||
contact_name = json_file.stem
|
contact_name = json_file.stem
|
||||||
@@ -577,10 +623,10 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
if count >= max_count and max_count > 0:
|
if count >= max_count and max_count > 0:
|
||||||
break
|
break
|
||||||
|
|
||||||
from_user = message.get('fromUser', '')
|
from_user = message.get("fromUser", "")
|
||||||
content = message.get('content', '')
|
content = message.get("content", "")
|
||||||
message_text = message.get('message', '')
|
message_text = message.get("message", "")
|
||||||
create_time = message.get('createTime', 0)
|
create_time = message.get("createTime", 0)
|
||||||
|
|
||||||
# Skip non-text messages unless requested
|
# Skip non-text messages unless requested
|
||||||
if not include_non_text:
|
if not include_non_text:
|
||||||
@@ -595,8 +641,8 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
if create_time:
|
if create_time:
|
||||||
try:
|
try:
|
||||||
timestamp = datetime.fromtimestamp(create_time)
|
timestamp = datetime.fromtimestamp(create_time)
|
||||||
time_str = timestamp.strftime('%Y-%m-%d %H:%M:%S')
|
time_str = timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
||||||
except:
|
except (ValueError, OSError):
|
||||||
time_str = str(create_time)
|
time_str = str(create_time)
|
||||||
else:
|
else:
|
||||||
time_str = "Unknown"
|
time_str = "Unknown"
|
||||||
@@ -613,7 +659,7 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error exporting WeChat chat history: {e}")
|
print(f"Error exporting WeChat chat history: {e}")
|
||||||
|
|
||||||
def export_wechat_chat_history(self, export_dir: str = "./wechat_export_direct") -> Optional[Path]:
|
def export_wechat_chat_history(self, export_dir: str = "./wechat_export_direct") -> Path | None:
|
||||||
"""
|
"""
|
||||||
Export WeChat chat history using wechat-exporter tool.
|
Export WeChat chat history using wechat-exporter tool.
|
||||||
|
|
||||||
@@ -642,16 +688,21 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
requirements_file = self.wechat_exporter_dir / "requirements.txt"
|
requirements_file = self.wechat_exporter_dir / "requirements.txt"
|
||||||
if requirements_file.exists():
|
if requirements_file.exists():
|
||||||
print("Installing wechat-exporter requirements...")
|
print("Installing wechat-exporter requirements...")
|
||||||
subprocess.run([
|
subprocess.run(["uv", "pip", "install", "-r", str(requirements_file)], check=True)
|
||||||
"uv", "pip", "install", "-r", str(requirements_file)
|
|
||||||
], check=True)
|
|
||||||
|
|
||||||
# Run the export command
|
# Run the export command
|
||||||
print("Running wechat-exporter...")
|
print("Running wechat-exporter...")
|
||||||
result = subprocess.run([
|
result = subprocess.run(
|
||||||
sys.executable, str(self.wechat_exporter_dir / "main.py"),
|
[
|
||||||
"export-all", str(export_path)
|
sys.executable,
|
||||||
], capture_output=True, text=True, check=True)
|
str(self.wechat_exporter_dir / "main.py"),
|
||||||
|
"export-all",
|
||||||
|
str(export_path),
|
||||||
|
],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
|
||||||
print("Export command output:")
|
print("Export command output:")
|
||||||
print(result.stdout)
|
print(result.stdout)
|
||||||
@@ -662,7 +713,9 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
# Check if export was successful
|
# Check if export was successful
|
||||||
if export_path.exists() and any(export_path.glob("*.json")):
|
if export_path.exists() and any(export_path.glob("*.json")):
|
||||||
json_files = list(export_path.glob("*.json"))
|
json_files = list(export_path.glob("*.json"))
|
||||||
print(f"Successfully exported {len(json_files)} chat history files to {export_path}")
|
print(
|
||||||
|
f"Successfully exported {len(json_files)} chat history files to {export_path}"
|
||||||
|
)
|
||||||
return export_path
|
return export_path
|
||||||
else:
|
else:
|
||||||
print("Export completed but no JSON files found")
|
print("Export completed but no JSON files found")
|
||||||
@@ -678,7 +731,7 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
print("Please ensure WeChat is running and WeChatTweak is installed.")
|
print("Please ensure WeChat is running and WeChatTweak is installed.")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def find_or_export_wechat_data(self, export_dir: str = "./wechat_export_direct") -> List[Path]:
|
def find_or_export_wechat_data(self, export_dir: str = "./wechat_export_direct") -> list[Path]:
|
||||||
"""
|
"""
|
||||||
Find existing WeChat exports or create new ones.
|
Find existing WeChat exports or create new ones.
|
||||||
|
|
||||||
@@ -697,7 +750,7 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
Path("./wechat_export"),
|
Path("./wechat_export"),
|
||||||
Path("./wechat_export_direct"),
|
Path("./wechat_export_direct"),
|
||||||
Path("./wechat_chat_history"),
|
Path("./wechat_chat_history"),
|
||||||
Path("./chat_export")
|
Path("./chat_export"),
|
||||||
]
|
]
|
||||||
|
|
||||||
for export_dir_path in possible_export_dirs:
|
for export_dir_path in possible_export_dirs:
|
||||||
@@ -714,6 +767,8 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
if exported_path:
|
if exported_path:
|
||||||
export_dirs = [exported_path]
|
export_dirs = [exported_path]
|
||||||
else:
|
else:
|
||||||
print("Failed to export WeChat data. Please ensure WeChat is running and WeChatTweak is installed.")
|
print(
|
||||||
|
"Failed to export WeChat data. Please ensure WeChat is running and WeChatTweak is installed."
|
||||||
|
)
|
||||||
|
|
||||||
return export_dirs
|
return export_dirs
|
||||||
189
apps/wechat_rag.py
Normal file
189
apps/wechat_rag.py
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
"""
|
||||||
|
WeChat History RAG example using the unified interface.
|
||||||
|
Supports WeChat chat history export and search.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add parent directory to path for imports
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent))
|
||||||
|
|
||||||
|
from base_rag_example import BaseRAGExample
|
||||||
|
|
||||||
|
from .history_data.wechat_history import WeChatHistoryReader
|
||||||
|
|
||||||
|
|
||||||
|
class WeChatRAG(BaseRAGExample):
|
||||||
|
"""RAG example for WeChat chat history."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
# Set default values BEFORE calling super().__init__
|
||||||
|
self.max_items_default = -1 # Match original default
|
||||||
|
self.embedding_model_default = (
|
||||||
|
"sentence-transformers/all-MiniLM-L6-v2" # Fast 384-dim model
|
||||||
|
)
|
||||||
|
|
||||||
|
super().__init__(
|
||||||
|
name="WeChat History",
|
||||||
|
description="Process and query WeChat chat history with LEANN",
|
||||||
|
default_index_name="wechat_history_magic_test_11Debug_new",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _add_specific_arguments(self, parser):
|
||||||
|
"""Add WeChat-specific arguments."""
|
||||||
|
wechat_group = parser.add_argument_group("WeChat Parameters")
|
||||||
|
wechat_group.add_argument(
|
||||||
|
"--export-dir",
|
||||||
|
type=str,
|
||||||
|
default="./wechat_export",
|
||||||
|
help="Directory to store WeChat exports (default: ./wechat_export)",
|
||||||
|
)
|
||||||
|
wechat_group.add_argument(
|
||||||
|
"--force-export",
|
||||||
|
action="store_true",
|
||||||
|
help="Force re-export of WeChat data even if exports exist",
|
||||||
|
)
|
||||||
|
wechat_group.add_argument(
|
||||||
|
"--chunk-size", type=int, default=192, help="Text chunk size (default: 192)"
|
||||||
|
)
|
||||||
|
wechat_group.add_argument(
|
||||||
|
"--chunk-overlap", type=int, default=64, help="Text chunk overlap (default: 64)"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _export_wechat_data(self, export_dir: Path) -> bool:
|
||||||
|
"""Export WeChat data using wechattweak-cli."""
|
||||||
|
print("Exporting WeChat data...")
|
||||||
|
|
||||||
|
# Check if WeChat is running
|
||||||
|
try:
|
||||||
|
result = subprocess.run(["pgrep", "WeChat"], capture_output=True, text=True)
|
||||||
|
if result.returncode != 0:
|
||||||
|
print("WeChat is not running. Please start WeChat first.")
|
||||||
|
return False
|
||||||
|
except Exception:
|
||||||
|
pass # pgrep might not be available on all systems
|
||||||
|
|
||||||
|
# Create export directory
|
||||||
|
export_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Run export command
|
||||||
|
cmd = ["packages/wechat-exporter/wechattweak-cli", "export", str(export_dir)]
|
||||||
|
|
||||||
|
try:
|
||||||
|
print(f"Running: {' '.join(cmd)}")
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||||
|
|
||||||
|
if result.returncode == 0:
|
||||||
|
print("WeChat data exported successfully!")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
print(f"Export failed: {result.stderr}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
except FileNotFoundError:
|
||||||
|
print("\nError: wechattweak-cli not found!")
|
||||||
|
print("Please install it first:")
|
||||||
|
print(" sudo packages/wechat-exporter/wechattweak-cli install")
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Export error: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def load_data(self, args) -> list[str]:
|
||||||
|
"""Load WeChat history and convert to text chunks."""
|
||||||
|
# Initialize WeChat reader with export capabilities
|
||||||
|
reader = WeChatHistoryReader()
|
||||||
|
|
||||||
|
# Find existing exports or create new ones using the centralized method
|
||||||
|
export_dirs = reader.find_or_export_wechat_data(args.export_dir)
|
||||||
|
if not export_dirs:
|
||||||
|
print("Failed to find or export WeChat data. Trying to find any existing exports...")
|
||||||
|
# Try to find any existing exports in common locations
|
||||||
|
export_dirs = reader.find_wechat_export_dirs()
|
||||||
|
if not export_dirs:
|
||||||
|
print("No WeChat data found. Please ensure WeChat exports exist.")
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Load documents from all found export directories
|
||||||
|
all_documents = []
|
||||||
|
total_processed = 0
|
||||||
|
|
||||||
|
for i, export_dir in enumerate(export_dirs):
|
||||||
|
print(f"\nProcessing WeChat export {i + 1}/{len(export_dirs)}: {export_dir}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Apply max_items limit per export
|
||||||
|
max_per_export = -1
|
||||||
|
if args.max_items > 0:
|
||||||
|
remaining = args.max_items - total_processed
|
||||||
|
if remaining <= 0:
|
||||||
|
break
|
||||||
|
max_per_export = remaining
|
||||||
|
|
||||||
|
documents = reader.load_data(
|
||||||
|
wechat_export_dir=str(export_dir),
|
||||||
|
max_count=max_per_export,
|
||||||
|
concatenate_messages=True, # Enable message concatenation for better context
|
||||||
|
)
|
||||||
|
|
||||||
|
if documents:
|
||||||
|
print(f"Loaded {len(documents)} chat documents from {export_dir}")
|
||||||
|
all_documents.extend(documents)
|
||||||
|
total_processed += len(documents)
|
||||||
|
else:
|
||||||
|
print(f"No documents loaded from {export_dir}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error processing {export_dir}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not all_documents:
|
||||||
|
print("No documents loaded from any source. Exiting.")
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"\nTotal loaded {len(all_documents)} chat documents from {len(export_dirs)} exports")
|
||||||
|
print("now starting to split into text chunks ... take some time")
|
||||||
|
|
||||||
|
# Convert to text chunks with contact information
|
||||||
|
all_texts = []
|
||||||
|
for doc in all_documents:
|
||||||
|
# Split the document into chunks
|
||||||
|
from llama_index.core.node_parser import SentenceSplitter
|
||||||
|
|
||||||
|
text_splitter = SentenceSplitter(
|
||||||
|
chunk_size=args.chunk_size, chunk_overlap=args.chunk_overlap
|
||||||
|
)
|
||||||
|
nodes = text_splitter.get_nodes_from_documents([doc])
|
||||||
|
|
||||||
|
for node in nodes:
|
||||||
|
# Add contact information to each chunk
|
||||||
|
contact_name = doc.metadata.get("contact_name", "Unknown")
|
||||||
|
text = f"[Contact] means the message is from: {contact_name}\n" + node.get_content()
|
||||||
|
all_texts.append(text)
|
||||||
|
|
||||||
|
print(f"Created {len(all_texts)} text chunks from {len(all_documents)} documents")
|
||||||
|
return all_texts
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
# Check platform
|
||||||
|
if sys.platform != "darwin":
|
||||||
|
print("\n⚠️ Warning: WeChat export is only supported on macOS")
|
||||||
|
print(" You can still query existing exports on other platforms\n")
|
||||||
|
|
||||||
|
# Example queries for WeChat RAG
|
||||||
|
print("\n💬 WeChat History RAG Example")
|
||||||
|
print("=" * 50)
|
||||||
|
print("\nExample queries you can try:")
|
||||||
|
print("- 'Show me conversations about travel plans'")
|
||||||
|
print("- 'Find group chats about weekend activities'")
|
||||||
|
print("- '我想买魔术师约翰逊的球衣,给我一些对应聊天记录?'")
|
||||||
|
print("- 'What did we discuss about the project last month?'")
|
||||||
|
print("\nNote: WeChat must be running for export to work\n")
|
||||||
|
|
||||||
|
rag = WeChatRAG()
|
||||||
|
asyncio.run(rag.run())
|
||||||
BIN
assets/claude_code_leann.png
Normal file
BIN
assets/claude_code_leann.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 73 KiB |
BIN
assets/mcp_leann.png
Normal file
BIN
assets/mcp_leann.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 224 KiB |
@@ -1,9 +1,24 @@
|
|||||||
# 🧪 Leann Sanity Checks
|
# 🧪 LEANN Benchmarks & Testing
|
||||||
|
|
||||||
This directory contains comprehensive sanity checks for the Leann system, ensuring all components work correctly across different configurations.
|
This directory contains performance benchmarks and comprehensive tests for the LEANN system, including backend comparisons and sanity checks across different configurations.
|
||||||
|
|
||||||
## 📁 Test Files
|
## 📁 Test Files
|
||||||
|
|
||||||
|
### `diskann_vs_hnsw_speed_comparison.py`
|
||||||
|
Performance comparison between DiskANN and HNSW backends:
|
||||||
|
- ✅ **Search latency** comparison with both backends using recompute
|
||||||
|
- ✅ **Index size** and **build time** measurements
|
||||||
|
- ✅ **Score validity** testing (ensures no -inf scores)
|
||||||
|
- ✅ **Configurable dataset sizes** for different scales
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Quick comparison with 500 docs, 10 queries
|
||||||
|
python benchmarks/diskann_vs_hnsw_speed_comparison.py
|
||||||
|
|
||||||
|
# Large-scale comparison with 2000 docs, 20 queries
|
||||||
|
python benchmarks/diskann_vs_hnsw_speed_comparison.py 2000 20
|
||||||
|
```
|
||||||
|
|
||||||
### `test_distance_functions.py`
|
### `test_distance_functions.py`
|
||||||
Tests all supported distance functions across DiskANN backend:
|
Tests all supported distance functions across DiskANN backend:
|
||||||
- ✅ **MIPS** (Maximum Inner Product Search)
|
- ✅ **MIPS** (Maximum Inner Product Search)
|
||||||
@@ -1,29 +1,32 @@
|
|||||||
import time
|
import time
|
||||||
import numpy as np
|
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import torch
|
|
||||||
from sentence_transformers import SentenceTransformer
|
|
||||||
import mlx.core as mx
|
import mlx.core as mx
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
from mlx_lm import load
|
from mlx_lm import load
|
||||||
|
from sentence_transformers import SentenceTransformer
|
||||||
|
|
||||||
# --- Configuration ---
|
# --- Configuration ---
|
||||||
MODEL_NAME_TORCH = "Qwen/Qwen3-Embedding-0.6B"
|
MODEL_NAME_TORCH = "Qwen/Qwen3-Embedding-0.6B"
|
||||||
MODEL_NAME_MLX = "mlx-community/Qwen3-Embedding-0.6B-4bit-DWQ"
|
MODEL_NAME_MLX = "mlx-community/Qwen3-Embedding-0.6B-4bit-DWQ"
|
||||||
BATCH_SIZES = [1, 8, 16, 32, 64, 128]
|
BATCH_SIZES = [1, 8, 16, 32, 64, 128]
|
||||||
NUM_RUNS = 10 # Number of runs to average for each batch size
|
NUM_RUNS = 10 # Number of runs to average for each batch size
|
||||||
WARMUP_RUNS = 2 # Number of warm-up runs
|
WARMUP_RUNS = 2 # Number of warm-up runs
|
||||||
|
|
||||||
# --- Generate Dummy Data ---
|
# --- Generate Dummy Data ---
|
||||||
DUMMY_SENTENCES = ["This is a test sentence for benchmarking." * 5] * max(BATCH_SIZES)
|
DUMMY_SENTENCES = ["This is a test sentence for benchmarking." * 5] * max(BATCH_SIZES)
|
||||||
|
|
||||||
# --- Benchmark Functions ---b
|
# --- Benchmark Functions ---b
|
||||||
|
|
||||||
|
|
||||||
def benchmark_torch(model, sentences):
|
def benchmark_torch(model, sentences):
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
model.encode(sentences, convert_to_numpy=True)
|
model.encode(sentences, convert_to_numpy=True)
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
return (end_time - start_time) * 1000 # Return time in ms
|
return (end_time - start_time) * 1000 # Return time in ms
|
||||||
|
|
||||||
|
|
||||||
def benchmark_mlx(model, tokenizer, sentences):
|
def benchmark_mlx(model, tokenizer, sentences):
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
@@ -63,6 +66,7 @@ def benchmark_mlx(model, tokenizer, sentences):
|
|||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
return (end_time - start_time) * 1000 # Return time in ms
|
return (end_time - start_time) * 1000 # Return time in ms
|
||||||
|
|
||||||
|
|
||||||
# --- Main Execution ---
|
# --- Main Execution ---
|
||||||
def main():
|
def main():
|
||||||
print("--- Initializing Models ---")
|
print("--- Initializing Models ---")
|
||||||
@@ -98,7 +102,9 @@ def main():
|
|||||||
results_torch.append(np.mean(torch_times))
|
results_torch.append(np.mean(torch_times))
|
||||||
|
|
||||||
# Benchmark MLX
|
# Benchmark MLX
|
||||||
mlx_times = [benchmark_mlx(model_mlx, tokenizer_mlx, sentences_batch) for _ in range(NUM_RUNS)]
|
mlx_times = [
|
||||||
|
benchmark_mlx(model_mlx, tokenizer_mlx, sentences_batch) for _ in range(NUM_RUNS)
|
||||||
|
]
|
||||||
results_mlx.append(np.mean(mlx_times))
|
results_mlx.append(np.mean(mlx_times))
|
||||||
|
|
||||||
print("\n--- Benchmark Results (Average time per batch in ms) ---")
|
print("\n--- Benchmark Results (Average time per batch in ms) ---")
|
||||||
@@ -109,10 +115,16 @@ def main():
|
|||||||
# --- Plotting ---
|
# --- Plotting ---
|
||||||
print("\n--- Generating Plot ---")
|
print("\n--- Generating Plot ---")
|
||||||
plt.figure(figsize=(10, 6))
|
plt.figure(figsize=(10, 6))
|
||||||
plt.plot(BATCH_SIZES, results_torch, marker='o', linestyle='-', label=f'PyTorch ({device})')
|
plt.plot(
|
||||||
plt.plot(BATCH_SIZES, results_mlx, marker='s', linestyle='-', label='MLX')
|
BATCH_SIZES,
|
||||||
|
results_torch,
|
||||||
|
marker="o",
|
||||||
|
linestyle="-",
|
||||||
|
label=f"PyTorch ({device})",
|
||||||
|
)
|
||||||
|
plt.plot(BATCH_SIZES, results_mlx, marker="s", linestyle="-", label="MLX")
|
||||||
|
|
||||||
plt.title(f'Embedding Performance: MLX vs PyTorch\nModel: {MODEL_NAME_TORCH}')
|
plt.title(f"Embedding Performance: MLX vs PyTorch\nModel: {MODEL_NAME_TORCH}")
|
||||||
plt.xlabel("Batch Size")
|
plt.xlabel("Batch Size")
|
||||||
plt.ylabel("Average Time per Batch (ms)")
|
plt.ylabel("Average Time per Batch (ms)")
|
||||||
plt.xticks(BATCH_SIZES)
|
plt.xticks(BATCH_SIZES)
|
||||||
@@ -124,5 +136,6 @@ def main():
|
|||||||
plt.savefig(output_filename)
|
plt.savefig(output_filename)
|
||||||
print(f"Plot saved to {output_filename}")
|
print(f"Plot saved to {output_filename}")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
148
benchmarks/benchmark_no_recompute.py
Normal file
148
benchmarks/benchmark_no_recompute.py
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from leann import LeannBuilder, LeannSearcher
|
||||||
|
|
||||||
|
|
||||||
|
def _meta_exists(index_path: str) -> bool:
|
||||||
|
p = Path(index_path)
|
||||||
|
return (p.parent / f"{p.stem}.meta.json").exists()
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_index(index_path: str, backend_name: str, num_docs: int, is_recompute: bool) -> None:
|
||||||
|
# if _meta_exists(index_path):
|
||||||
|
# return
|
||||||
|
kwargs = {}
|
||||||
|
if backend_name == "hnsw":
|
||||||
|
kwargs["is_compact"] = is_recompute
|
||||||
|
builder = LeannBuilder(
|
||||||
|
backend_name=backend_name,
|
||||||
|
embedding_model=os.getenv("LEANN_EMBED_MODEL", "facebook/contriever"),
|
||||||
|
embedding_mode=os.getenv("LEANN_EMBED_MODE", "sentence-transformers"),
|
||||||
|
graph_degree=32,
|
||||||
|
complexity=64,
|
||||||
|
is_recompute=is_recompute,
|
||||||
|
num_threads=4,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
for i in range(num_docs):
|
||||||
|
builder.add_text(
|
||||||
|
f"This is a test document number {i}. It contains some repeated text for benchmarking."
|
||||||
|
)
|
||||||
|
builder.build_index(index_path)
|
||||||
|
|
||||||
|
|
||||||
|
def _bench_group(
|
||||||
|
index_path: str,
|
||||||
|
recompute: bool,
|
||||||
|
query: str,
|
||||||
|
repeats: int,
|
||||||
|
complexity: int = 32,
|
||||||
|
top_k: int = 10,
|
||||||
|
) -> float:
|
||||||
|
# Independent searcher per group; fixed port when recompute
|
||||||
|
searcher = LeannSearcher(index_path=index_path)
|
||||||
|
|
||||||
|
# Warm-up once
|
||||||
|
_ = searcher.search(
|
||||||
|
query,
|
||||||
|
top_k=top_k,
|
||||||
|
complexity=complexity,
|
||||||
|
recompute_embeddings=recompute,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _once() -> float:
|
||||||
|
t0 = time.time()
|
||||||
|
_ = searcher.search(
|
||||||
|
query,
|
||||||
|
top_k=top_k,
|
||||||
|
complexity=complexity,
|
||||||
|
recompute_embeddings=recompute,
|
||||||
|
)
|
||||||
|
return time.time() - t0
|
||||||
|
|
||||||
|
if repeats <= 1:
|
||||||
|
t = _once()
|
||||||
|
else:
|
||||||
|
vals = [_once() for _ in range(repeats)]
|
||||||
|
vals.sort()
|
||||||
|
t = vals[len(vals) // 2]
|
||||||
|
|
||||||
|
searcher.cleanup()
|
||||||
|
return t
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("--num-docs", type=int, default=5000)
|
||||||
|
parser.add_argument("--repeats", type=int, default=3)
|
||||||
|
parser.add_argument("--complexity", type=int, default=32)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
base = Path.cwd() / ".leann" / "indexes" / f"bench_n{args.num_docs}"
|
||||||
|
base.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
# ---------- Build HNSW variants ----------
|
||||||
|
hnsw_r = str(base / f"hnsw_recompute_n{args.num_docs}.leann")
|
||||||
|
hnsw_nr = str(base / f"hnsw_norecompute_n{args.num_docs}.leann")
|
||||||
|
ensure_index(hnsw_r, "hnsw", args.num_docs, True)
|
||||||
|
ensure_index(hnsw_nr, "hnsw", args.num_docs, False)
|
||||||
|
|
||||||
|
# ---------- Build DiskANN variants ----------
|
||||||
|
diskann_r = str(base / "diskann_r.leann")
|
||||||
|
diskann_nr = str(base / "diskann_nr.leann")
|
||||||
|
ensure_index(diskann_r, "diskann", args.num_docs, True)
|
||||||
|
ensure_index(diskann_nr, "diskann", args.num_docs, False)
|
||||||
|
|
||||||
|
# ---------- Helpers ----------
|
||||||
|
def _size_for(prefix: str) -> int:
|
||||||
|
p = Path(prefix)
|
||||||
|
base_dir = p.parent
|
||||||
|
stem = p.stem
|
||||||
|
total = 0
|
||||||
|
for f in base_dir.iterdir():
|
||||||
|
if f.is_file() and f.name.startswith(stem):
|
||||||
|
total += f.stat().st_size
|
||||||
|
return total
|
||||||
|
|
||||||
|
# ---------- HNSW benchmark ----------
|
||||||
|
t_hnsw_r = _bench_group(
|
||||||
|
hnsw_r, True, "test document number 42", repeats=args.repeats, complexity=args.complexity
|
||||||
|
)
|
||||||
|
t_hnsw_nr = _bench_group(
|
||||||
|
hnsw_nr, False, "test document number 42", repeats=args.repeats, complexity=args.complexity
|
||||||
|
)
|
||||||
|
size_hnsw_r = _size_for(hnsw_r)
|
||||||
|
size_hnsw_nr = _size_for(hnsw_nr)
|
||||||
|
|
||||||
|
print("Benchmark results (HNSW):")
|
||||||
|
print(f" recompute=True: search_time={t_hnsw_r:.3f}s, size={size_hnsw_r / 1024 / 1024:.1f}MB")
|
||||||
|
print(
|
||||||
|
f" recompute=False: search_time={t_hnsw_nr:.3f}s, size={size_hnsw_nr / 1024 / 1024:.1f}MB"
|
||||||
|
)
|
||||||
|
print(" Expectation: no-recompute should be faster but larger on disk.")
|
||||||
|
|
||||||
|
# ---------- DiskANN benchmark ----------
|
||||||
|
t_diskann_r = _bench_group(
|
||||||
|
diskann_r, True, "DiskANN R test doc 123", repeats=args.repeats, complexity=args.complexity
|
||||||
|
)
|
||||||
|
t_diskann_nr = _bench_group(
|
||||||
|
diskann_nr,
|
||||||
|
False,
|
||||||
|
"DiskANN NR test doc 123",
|
||||||
|
repeats=args.repeats,
|
||||||
|
complexity=args.complexity,
|
||||||
|
)
|
||||||
|
size_diskann_r = _size_for(diskann_r)
|
||||||
|
size_diskann_nr = _size_for(diskann_nr)
|
||||||
|
|
||||||
|
print("\nBenchmark results (DiskANN):")
|
||||||
|
print(f" build(recompute=True, partition): size={size_diskann_r / 1024 / 1024:.1f}MB")
|
||||||
|
print(f" build(recompute=False): size={size_diskann_nr / 1024 / 1024:.1f}MB")
|
||||||
|
print(f" search recompute=True (final rerank): {t_diskann_r:.3f}s")
|
||||||
|
print(f" search recompute=False (PQ only): {t_diskann_nr:.3f}s")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -3,14 +3,15 @@
|
|||||||
Memory comparison between Faiss HNSW and LEANN HNSW backend
|
Memory comparison between Faiss HNSW and LEANN HNSW backend
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import gc
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import psutil
|
|
||||||
import gc
|
|
||||||
import subprocess
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
import psutil
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
from llama_index.core.node_parser import SentenceSplitter
|
||||||
|
|
||||||
# Setup logging
|
# Setup logging
|
||||||
@@ -61,7 +62,7 @@ def test_faiss_hnsw():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
result = subprocess.run(
|
result = subprocess.run(
|
||||||
[sys.executable, "examples/faiss_only.py"],
|
[sys.executable, "benchmarks/faiss_only.py"],
|
||||||
capture_output=True,
|
capture_output=True,
|
||||||
text=True,
|
text=True,
|
||||||
timeout=300,
|
timeout=300,
|
||||||
@@ -83,9 +84,7 @@ def test_faiss_hnsw():
|
|||||||
|
|
||||||
for line in lines:
|
for line in lines:
|
||||||
if "Peak Memory:" in line:
|
if "Peak Memory:" in line:
|
||||||
peak_memory = float(
|
peak_memory = float(line.split("Peak Memory:")[1].split("MB")[0].strip())
|
||||||
line.split("Peak Memory:")[1].split("MB")[0].strip()
|
|
||||||
)
|
|
||||||
|
|
||||||
return {"peak_memory": peak_memory}
|
return {"peak_memory": peak_memory}
|
||||||
|
|
||||||
@@ -111,13 +110,12 @@ def test_leann_hnsw():
|
|||||||
|
|
||||||
tracker.checkpoint("After imports")
|
tracker.checkpoint("After imports")
|
||||||
|
|
||||||
|
from leann.api import LeannBuilder
|
||||||
from llama_index.core import SimpleDirectoryReader
|
from llama_index.core import SimpleDirectoryReader
|
||||||
from leann.api import LeannBuilder, LeannSearcher
|
|
||||||
|
|
||||||
|
|
||||||
# Load and parse documents
|
# Load and parse documents
|
||||||
documents = SimpleDirectoryReader(
|
documents = SimpleDirectoryReader(
|
||||||
"examples/data",
|
"data",
|
||||||
recursive=True,
|
recursive=True,
|
||||||
encoding="utf-8",
|
encoding="utf-8",
|
||||||
required_exts=[".pdf", ".txt", ".md"],
|
required_exts=[".pdf", ".txt", ".md"],
|
||||||
@@ -135,6 +133,7 @@ def test_leann_hnsw():
|
|||||||
nodes = node_parser.get_nodes_from_documents([doc])
|
nodes = node_parser.get_nodes_from_documents([doc])
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
all_texts.append(node.get_content())
|
all_texts.append(node.get_content())
|
||||||
|
print(f"Total number of chunks: {len(all_texts)}")
|
||||||
|
|
||||||
tracker.checkpoint("After text chunking")
|
tracker.checkpoint("After text chunking")
|
||||||
|
|
||||||
@@ -201,11 +200,9 @@ def test_leann_hnsw():
|
|||||||
searcher = LeannSearcher(index_path)
|
searcher = LeannSearcher(index_path)
|
||||||
tracker.checkpoint("After searcher loading")
|
tracker.checkpoint("After searcher loading")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
print("Running search queries...")
|
print("Running search queries...")
|
||||||
queries = [
|
queries = [
|
||||||
"什么是盘古大模型以及盘古开发过程中遇到了什么阴暗面,任务令一般在什么城市颁发",
|
"什么是盘古大模型以及盘古开发过程中遇到了什么阴暗面,任务令一般在什么城市颁发",
|
||||||
"What is LEANN and how does it work?",
|
"What is LEANN and how does it work?",
|
||||||
"华为诺亚方舟实验室的主要研究内容",
|
"华为诺亚方舟实验室的主要研究内容",
|
||||||
]
|
]
|
||||||
@@ -303,21 +300,15 @@ def main():
|
|||||||
|
|
||||||
print("\nLEANN vs Faiss Performance:")
|
print("\nLEANN vs Faiss Performance:")
|
||||||
memory_saving = faiss_results["peak_memory"] - leann_results["peak_memory"]
|
memory_saving = faiss_results["peak_memory"] - leann_results["peak_memory"]
|
||||||
print(
|
print(f" Search Memory: {memory_ratio:.1f}x less ({memory_saving:.1f} MB saved)")
|
||||||
f" Search Memory: {memory_ratio:.1f}x less ({memory_saving:.1f} MB saved)"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Storage comparison
|
# Storage comparison
|
||||||
if leann_storage_size > faiss_storage_size:
|
if leann_storage_size > faiss_storage_size:
|
||||||
storage_ratio = leann_storage_size / faiss_storage_size
|
storage_ratio = leann_storage_size / faiss_storage_size
|
||||||
print(
|
print(f" Storage Size: {storage_ratio:.1f}x larger (LEANN uses more storage)")
|
||||||
f" Storage Size: {storage_ratio:.1f}x larger (LEANN uses more storage)"
|
|
||||||
)
|
|
||||||
elif faiss_storage_size > leann_storage_size:
|
elif faiss_storage_size > leann_storage_size:
|
||||||
storage_ratio = faiss_storage_size / leann_storage_size
|
storage_ratio = faiss_storage_size / leann_storage_size
|
||||||
print(
|
print(f" Storage Size: {storage_ratio:.1f}x smaller (LEANN uses less storage)")
|
||||||
f" Storage Size: {storage_ratio:.1f}x smaller (LEANN uses less storage)"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
print(" Storage Size: similar")
|
print(" Storage Size: similar")
|
||||||
else:
|
else:
|
||||||
286
benchmarks/diskann_vs_hnsw_speed_comparison.py
Normal file
286
benchmarks/diskann_vs_hnsw_speed_comparison.py
Normal file
@@ -0,0 +1,286 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
DiskANN vs HNSW Search Performance Comparison
|
||||||
|
|
||||||
|
This benchmark compares search performance between DiskANN and HNSW backends:
|
||||||
|
- DiskANN: With graph partitioning enabled (is_recompute=True)
|
||||||
|
- HNSW: With recompute enabled (is_recompute=True)
|
||||||
|
- Tests performance across different dataset sizes
|
||||||
|
- Measures search latency, recall, and index size
|
||||||
|
"""
|
||||||
|
|
||||||
|
import gc
|
||||||
|
import multiprocessing as mp
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
# Prefer 'fork' start method to avoid POSIX semaphore leaks on macOS
|
||||||
|
try:
|
||||||
|
mp.set_start_method("fork", force=True)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def create_test_texts(n_docs: int) -> list[str]:
|
||||||
|
"""Create synthetic test documents for benchmarking."""
|
||||||
|
np.random.seed(42)
|
||||||
|
topics = [
|
||||||
|
"machine learning and artificial intelligence",
|
||||||
|
"natural language processing and text analysis",
|
||||||
|
"computer vision and image recognition",
|
||||||
|
"data science and statistical analysis",
|
||||||
|
"deep learning and neural networks",
|
||||||
|
"information retrieval and search engines",
|
||||||
|
"database systems and data management",
|
||||||
|
"software engineering and programming",
|
||||||
|
"cybersecurity and network protection",
|
||||||
|
"cloud computing and distributed systems",
|
||||||
|
]
|
||||||
|
|
||||||
|
texts = []
|
||||||
|
for i in range(n_docs):
|
||||||
|
topic = topics[i % len(topics)]
|
||||||
|
variation = np.random.randint(1, 100)
|
||||||
|
text = (
|
||||||
|
f"This is document {i} about {topic}. Content variation {variation}. "
|
||||||
|
f"Additional information about {topic} with details and examples. "
|
||||||
|
f"Technical discussion of {topic} including implementation aspects."
|
||||||
|
)
|
||||||
|
texts.append(text)
|
||||||
|
|
||||||
|
return texts
|
||||||
|
|
||||||
|
|
||||||
|
def benchmark_backend(
|
||||||
|
backend_name: str, texts: list[str], test_queries: list[str], backend_kwargs: dict[str, Any]
|
||||||
|
) -> dict[str, float]:
|
||||||
|
"""Benchmark a specific backend with the given configuration."""
|
||||||
|
from leann.api import LeannBuilder, LeannSearcher
|
||||||
|
|
||||||
|
print(f"\n🔧 Testing {backend_name.upper()} backend...")
|
||||||
|
|
||||||
|
with tempfile.TemporaryDirectory() as temp_dir:
|
||||||
|
index_path = str(Path(temp_dir) / f"benchmark_{backend_name}.leann")
|
||||||
|
|
||||||
|
# Build index
|
||||||
|
print(f"📦 Building {backend_name} index with {len(texts)} documents...")
|
||||||
|
start_time = time.time()
|
||||||
|
|
||||||
|
builder = LeannBuilder(
|
||||||
|
backend_name=backend_name,
|
||||||
|
embedding_model="facebook/contriever",
|
||||||
|
embedding_mode="sentence-transformers",
|
||||||
|
**backend_kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
for text in texts:
|
||||||
|
builder.add_text(text)
|
||||||
|
|
||||||
|
builder.build_index(index_path)
|
||||||
|
build_time = time.time() - start_time
|
||||||
|
|
||||||
|
# Measure index size
|
||||||
|
index_dir = Path(index_path).parent
|
||||||
|
index_files = list(index_dir.glob(f"{Path(index_path).stem}.*"))
|
||||||
|
total_size = sum(f.stat().st_size for f in index_files if f.is_file())
|
||||||
|
size_mb = total_size / (1024 * 1024)
|
||||||
|
|
||||||
|
print(f" ✅ Build completed in {build_time:.2f}s, index size: {size_mb:.1f}MB")
|
||||||
|
|
||||||
|
# Search benchmark
|
||||||
|
print("🔍 Running search benchmark...")
|
||||||
|
searcher = LeannSearcher(index_path)
|
||||||
|
|
||||||
|
search_times = []
|
||||||
|
all_results = []
|
||||||
|
|
||||||
|
for query in test_queries:
|
||||||
|
start_time = time.time()
|
||||||
|
results = searcher.search(query, top_k=5)
|
||||||
|
search_time = time.time() - start_time
|
||||||
|
search_times.append(search_time)
|
||||||
|
all_results.append(results)
|
||||||
|
|
||||||
|
avg_search_time = np.mean(search_times) * 1000 # Convert to ms
|
||||||
|
print(f" ✅ Average search time: {avg_search_time:.1f}ms")
|
||||||
|
|
||||||
|
# Check for valid scores (detect -inf issues)
|
||||||
|
all_scores = [
|
||||||
|
result.score
|
||||||
|
for results in all_results
|
||||||
|
for result in results
|
||||||
|
if result.score is not None
|
||||||
|
]
|
||||||
|
valid_scores = [
|
||||||
|
score for score in all_scores if score != float("-inf") and score != float("inf")
|
||||||
|
]
|
||||||
|
score_validity_rate = len(valid_scores) / len(all_scores) if all_scores else 0
|
||||||
|
|
||||||
|
# Clean up (ensure embedding server shutdown and object GC)
|
||||||
|
try:
|
||||||
|
if hasattr(searcher, "cleanup"):
|
||||||
|
searcher.cleanup()
|
||||||
|
del searcher
|
||||||
|
del builder
|
||||||
|
gc.collect()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"⚠️ Warning: Resource cleanup error: {e}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"build_time": build_time,
|
||||||
|
"avg_search_time_ms": avg_search_time,
|
||||||
|
"index_size_mb": size_mb,
|
||||||
|
"score_validity_rate": score_validity_rate,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def run_comparison(n_docs: int = 500, n_queries: int = 10):
|
||||||
|
"""Run performance comparison between DiskANN and HNSW."""
|
||||||
|
print("🚀 Starting DiskANN vs HNSW Performance Comparison")
|
||||||
|
print(f"📊 Dataset: {n_docs} documents, {n_queries} test queries")
|
||||||
|
|
||||||
|
# Create test data
|
||||||
|
texts = create_test_texts(n_docs)
|
||||||
|
test_queries = [
|
||||||
|
"machine learning algorithms",
|
||||||
|
"natural language processing",
|
||||||
|
"computer vision techniques",
|
||||||
|
"data analysis methods",
|
||||||
|
"neural network architectures",
|
||||||
|
"database query optimization",
|
||||||
|
"software development practices",
|
||||||
|
"security vulnerabilities",
|
||||||
|
"cloud infrastructure",
|
||||||
|
"distributed computing",
|
||||||
|
][:n_queries]
|
||||||
|
|
||||||
|
# HNSW benchmark
|
||||||
|
hnsw_results = benchmark_backend(
|
||||||
|
backend_name="hnsw",
|
||||||
|
texts=texts,
|
||||||
|
test_queries=test_queries,
|
||||||
|
backend_kwargs={
|
||||||
|
"is_recompute": True, # Enable recompute for fair comparison
|
||||||
|
"M": 16,
|
||||||
|
"efConstruction": 200,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# DiskANN benchmark
|
||||||
|
diskann_results = benchmark_backend(
|
||||||
|
backend_name="diskann",
|
||||||
|
texts=texts,
|
||||||
|
test_queries=test_queries,
|
||||||
|
backend_kwargs={
|
||||||
|
"is_recompute": True, # Enable graph partitioning
|
||||||
|
"num_neighbors": 32,
|
||||||
|
"search_list_size": 50,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Performance comparison
|
||||||
|
print("\n📈 Performance Comparison Results")
|
||||||
|
print(f"{'=' * 60}")
|
||||||
|
print(f"{'Metric':<25} {'HNSW':<15} {'DiskANN':<15} {'Speedup':<10}")
|
||||||
|
print(f"{'-' * 60}")
|
||||||
|
|
||||||
|
# Build time comparison
|
||||||
|
build_speedup = hnsw_results["build_time"] / diskann_results["build_time"]
|
||||||
|
print(
|
||||||
|
f"{'Build Time (s)':<25} {hnsw_results['build_time']:<15.2f} {diskann_results['build_time']:<15.2f} {build_speedup:<10.2f}x"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Search time comparison
|
||||||
|
search_speedup = hnsw_results["avg_search_time_ms"] / diskann_results["avg_search_time_ms"]
|
||||||
|
print(
|
||||||
|
f"{'Search Time (ms)':<25} {hnsw_results['avg_search_time_ms']:<15.1f} {diskann_results['avg_search_time_ms']:<15.1f} {search_speedup:<10.2f}x"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Index size comparison
|
||||||
|
size_ratio = diskann_results["index_size_mb"] / hnsw_results["index_size_mb"]
|
||||||
|
print(
|
||||||
|
f"{'Index Size (MB)':<25} {hnsw_results['index_size_mb']:<15.1f} {diskann_results['index_size_mb']:<15.1f} {size_ratio:<10.2f}x"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Score validity
|
||||||
|
print(
|
||||||
|
f"{'Score Validity (%)':<25} {hnsw_results['score_validity_rate'] * 100:<15.1f} {diskann_results['score_validity_rate'] * 100:<15.1f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"{'=' * 60}")
|
||||||
|
print("\n🎯 Summary:")
|
||||||
|
if search_speedup > 1:
|
||||||
|
print(f" DiskANN is {search_speedup:.2f}x faster than HNSW for search")
|
||||||
|
else:
|
||||||
|
print(f" HNSW is {1 / search_speedup:.2f}x faster than DiskANN for search")
|
||||||
|
|
||||||
|
if size_ratio > 1:
|
||||||
|
print(f" DiskANN uses {size_ratio:.2f}x more storage than HNSW")
|
||||||
|
else:
|
||||||
|
print(f" DiskANN uses {1 / size_ratio:.2f}x less storage than HNSW")
|
||||||
|
|
||||||
|
print(
|
||||||
|
f" Both backends achieved {min(hnsw_results['score_validity_rate'], diskann_results['score_validity_rate']) * 100:.1f}% score validity"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Handle help request
|
||||||
|
if len(sys.argv) > 1 and sys.argv[1] in ["-h", "--help", "help"]:
|
||||||
|
print("DiskANN vs HNSW Performance Comparison")
|
||||||
|
print("=" * 50)
|
||||||
|
print(f"Usage: python {sys.argv[0]} [n_docs] [n_queries]")
|
||||||
|
print()
|
||||||
|
print("Arguments:")
|
||||||
|
print(" n_docs Number of documents to index (default: 500)")
|
||||||
|
print(" n_queries Number of test queries to run (default: 10)")
|
||||||
|
print()
|
||||||
|
print("Examples:")
|
||||||
|
print(" python benchmarks/diskann_vs_hnsw_speed_comparison.py")
|
||||||
|
print(" python benchmarks/diskann_vs_hnsw_speed_comparison.py 1000")
|
||||||
|
print(" python benchmarks/diskann_vs_hnsw_speed_comparison.py 2000 20")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
# Parse command line arguments
|
||||||
|
n_docs = int(sys.argv[1]) if len(sys.argv) > 1 else 500
|
||||||
|
n_queries = int(sys.argv[2]) if len(sys.argv) > 2 else 10
|
||||||
|
|
||||||
|
print("DiskANN vs HNSW Performance Comparison")
|
||||||
|
print("=" * 50)
|
||||||
|
print(f"Dataset: {n_docs} documents, {n_queries} queries")
|
||||||
|
print()
|
||||||
|
|
||||||
|
run_comparison(n_docs=n_docs, n_queries=n_queries)
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\n⚠️ Benchmark interrupted by user")
|
||||||
|
sys.exit(130)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"\n❌ Benchmark failed: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
finally:
|
||||||
|
# Ensure clean exit (forceful to prevent rare hangs from atexit/threads)
|
||||||
|
try:
|
||||||
|
gc.collect()
|
||||||
|
print("\n🧹 Cleanup completed")
|
||||||
|
# Flush stdio to ensure message is visible before hard-exit
|
||||||
|
try:
|
||||||
|
import sys as _sys
|
||||||
|
|
||||||
|
_sys.stdout.flush()
|
||||||
|
_sys.stderr.flush()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
# Use os._exit to bypass atexit handlers that may hang in rare cases
|
||||||
|
import os as _os
|
||||||
|
|
||||||
|
_os._exit(0)
|
||||||
@@ -1,11 +1,11 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
"""Test only Faiss HNSW"""
|
"""Test only Faiss HNSW"""
|
||||||
|
|
||||||
|
import os
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
||||||
import psutil
|
import psutil
|
||||||
import gc
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
def get_memory_usage():
|
def get_memory_usage():
|
||||||
@@ -37,20 +37,20 @@ def main():
|
|||||||
import faiss
|
import faiss
|
||||||
except ImportError:
|
except ImportError:
|
||||||
print("Faiss is not installed.")
|
print("Faiss is not installed.")
|
||||||
print("Please install it with `uv pip install faiss-cpu`")
|
print(
|
||||||
|
"Please install it with `uv pip install faiss-cpu` and you can then run this script again"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
from llama_index.core import (
|
from llama_index.core import (
|
||||||
SimpleDirectoryReader,
|
|
||||||
VectorStoreIndex,
|
|
||||||
StorageContext,
|
|
||||||
Settings,
|
Settings,
|
||||||
node_parser,
|
SimpleDirectoryReader,
|
||||||
Document,
|
StorageContext,
|
||||||
|
VectorStoreIndex,
|
||||||
)
|
)
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
from llama_index.core.node_parser import SentenceSplitter
|
||||||
from llama_index.vector_stores.faiss import FaissVectorStore
|
|
||||||
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
||||||
|
from llama_index.vector_stores.faiss import FaissVectorStore
|
||||||
|
|
||||||
tracker = MemoryTracker("Faiss HNSW")
|
tracker = MemoryTracker("Faiss HNSW")
|
||||||
tracker.checkpoint("Initial")
|
tracker.checkpoint("Initial")
|
||||||
@@ -65,7 +65,7 @@ def main():
|
|||||||
tracker.checkpoint("After Faiss index creation")
|
tracker.checkpoint("After Faiss index creation")
|
||||||
|
|
||||||
documents = SimpleDirectoryReader(
|
documents = SimpleDirectoryReader(
|
||||||
"examples/data",
|
"data",
|
||||||
recursive=True,
|
recursive=True,
|
||||||
encoding="utf-8",
|
encoding="utf-8",
|
||||||
required_exts=[".pdf", ".txt", ".md"],
|
required_exts=[".pdf", ".txt", ".md"],
|
||||||
@@ -90,8 +90,9 @@ def main():
|
|||||||
vector_store=vector_store, persist_dir="./storage_faiss"
|
vector_store=vector_store, persist_dir="./storage_faiss"
|
||||||
)
|
)
|
||||||
from llama_index.core import load_index_from_storage
|
from llama_index.core import load_index_from_storage
|
||||||
|
|
||||||
index = load_index_from_storage(storage_context=storage_context)
|
index = load_index_from_storage(storage_context=storage_context)
|
||||||
print(f"Index loaded from ./storage_faiss")
|
print("Index loaded from ./storage_faiss")
|
||||||
tracker.checkpoint("After loading existing index")
|
tracker.checkpoint("After loading existing index")
|
||||||
index_loaded = True
|
index_loaded = True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -99,6 +100,7 @@ def main():
|
|||||||
print("Cleaning up corrupted index and building new one...")
|
print("Cleaning up corrupted index and building new one...")
|
||||||
# Clean up corrupted index
|
# Clean up corrupted index
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
if os.path.exists("./storage_faiss"):
|
if os.path.exists("./storage_faiss"):
|
||||||
shutil.rmtree("./storage_faiss")
|
shutil.rmtree("./storage_faiss")
|
||||||
|
|
||||||
@@ -109,9 +111,7 @@ def main():
|
|||||||
vector_store = FaissVectorStore(faiss_index=faiss_index)
|
vector_store = FaissVectorStore(faiss_index=faiss_index)
|
||||||
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
||||||
index = VectorStoreIndex.from_documents(
|
index = VectorStoreIndex.from_documents(
|
||||||
documents,
|
documents, storage_context=storage_context, transformations=[node_parser]
|
||||||
storage_context=storage_context,
|
|
||||||
transformations=[node_parser]
|
|
||||||
)
|
)
|
||||||
tracker.checkpoint("After index building")
|
tracker.checkpoint("After index building")
|
||||||
|
|
||||||
@@ -127,7 +127,7 @@ def main():
|
|||||||
|
|
||||||
query_engine = index.as_query_engine(similarity_top_k=20)
|
query_engine = index.as_query_engine(similarity_top_k=20)
|
||||||
queries = [
|
queries = [
|
||||||
"什么是盘古大模型以及盘古开发过程中遇到了什么阴暗面,任务令一般在什么城市颁发",
|
"什么是盘古大模型以及盘古开发过程中遇到了什么阴暗面,任务令一般在什么城市颁发",
|
||||||
"What is LEANN and how does it work?",
|
"What is LEANN and how does it work?",
|
||||||
"华为诺亚方舟实验室的主要研究内容",
|
"华为诺亚方舟实验室的主要研究内容",
|
||||||
]
|
]
|
||||||
@@ -2,20 +2,20 @@
|
|||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import time
|
import time
|
||||||
|
from contextlib import contextmanager
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Dict, List, Optional, Tuple
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from transformers import AutoModel, BitsAndBytesConfig
|
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from contextlib import contextmanager
|
from transformers import AutoModel, BitsAndBytesConfig
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class BenchmarkConfig:
|
class BenchmarkConfig:
|
||||||
model_path: str
|
model_path: str
|
||||||
batch_sizes: List[int]
|
batch_sizes: list[int]
|
||||||
seq_length: int
|
seq_length: int
|
||||||
num_runs: int
|
num_runs: int
|
||||||
use_fp16: bool = True
|
use_fp16: bool = True
|
||||||
@@ -32,13 +32,11 @@ class GraphContainer:
|
|||||||
def __init__(self, model: nn.Module, seq_length: int):
|
def __init__(self, model: nn.Module, seq_length: int):
|
||||||
self.model = model
|
self.model = model
|
||||||
self.seq_length = seq_length
|
self.seq_length = seq_length
|
||||||
self.graphs: Dict[int, 'GraphWrapper'] = {}
|
self.graphs: dict[int, GraphWrapper] = {}
|
||||||
|
|
||||||
def get_or_create(self, batch_size: int) -> 'GraphWrapper':
|
def get_or_create(self, batch_size: int) -> "GraphWrapper":
|
||||||
if batch_size not in self.graphs:
|
if batch_size not in self.graphs:
|
||||||
self.graphs[batch_size] = GraphWrapper(
|
self.graphs[batch_size] = GraphWrapper(self.model, batch_size, self.seq_length)
|
||||||
self.model, batch_size, self.seq_length
|
|
||||||
)
|
|
||||||
return self.graphs[batch_size]
|
return self.graphs[batch_size]
|
||||||
|
|
||||||
|
|
||||||
@@ -55,13 +53,13 @@ class GraphWrapper:
|
|||||||
self._warmup()
|
self._warmup()
|
||||||
|
|
||||||
# Only use CUDA graphs on NVIDIA GPUs
|
# Only use CUDA graphs on NVIDIA GPUs
|
||||||
if torch.cuda.is_available() and hasattr(torch.cuda, 'CUDAGraph'):
|
if torch.cuda.is_available() and hasattr(torch.cuda, "CUDAGraph"):
|
||||||
# Capture graph
|
# Capture graph
|
||||||
self.graph = torch.cuda.CUDAGraph()
|
self.graph = torch.cuda.CUDAGraph()
|
||||||
with torch.cuda.graph(self.graph):
|
with torch.cuda.graph(self.graph):
|
||||||
self.static_output = self.model(
|
self.static_output = self.model(
|
||||||
input_ids=self.static_input,
|
input_ids=self.static_input,
|
||||||
attention_mask=self.static_attention_mask
|
attention_mask=self.static_attention_mask,
|
||||||
)
|
)
|
||||||
self.use_cuda_graph = True
|
self.use_cuda_graph = True
|
||||||
else:
|
else:
|
||||||
@@ -79,9 +77,7 @@ class GraphWrapper:
|
|||||||
|
|
||||||
def _create_random_batch(self, batch_size: int, seq_length: int) -> torch.Tensor:
|
def _create_random_batch(self, batch_size: int, seq_length: int) -> torch.Tensor:
|
||||||
return torch.randint(
|
return torch.randint(
|
||||||
0, 1000, (batch_size, seq_length),
|
0, 1000, (batch_size, seq_length), device=self.device, dtype=torch.long
|
||||||
device=self.device,
|
|
||||||
dtype=torch.long
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def _warmup(self, num_warmup: int = 3):
|
def _warmup(self, num_warmup: int = 3):
|
||||||
@@ -89,7 +85,7 @@ class GraphWrapper:
|
|||||||
for _ in range(num_warmup):
|
for _ in range(num_warmup):
|
||||||
self.model(
|
self.model(
|
||||||
input_ids=self.static_input,
|
input_ids=self.static_input,
|
||||||
attention_mask=self.static_attention_mask
|
attention_mask=self.static_attention_mask,
|
||||||
)
|
)
|
||||||
|
|
||||||
def __call__(self, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
|
def __call__(self, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
|
||||||
@@ -133,8 +129,12 @@ class ModelOptimizer:
|
|||||||
print("- Using FP16 precision")
|
print("- Using FP16 precision")
|
||||||
|
|
||||||
# Check if using SDPA (only on CUDA)
|
# Check if using SDPA (only on CUDA)
|
||||||
if torch.cuda.is_available() and torch.version.cuda and float(torch.version.cuda[:3]) >= 11.6:
|
if (
|
||||||
if hasattr(torch.nn.functional, 'scaled_dot_product_attention'):
|
torch.cuda.is_available()
|
||||||
|
and torch.version.cuda
|
||||||
|
and float(torch.version.cuda[:3]) >= 11.6
|
||||||
|
):
|
||||||
|
if hasattr(torch.nn.functional, "scaled_dot_product_attention"):
|
||||||
print("- Using PyTorch SDPA (scaled_dot_product_attention)")
|
print("- Using PyTorch SDPA (scaled_dot_product_attention)")
|
||||||
else:
|
else:
|
||||||
print("- PyTorch SDPA not available")
|
print("- PyTorch SDPA not available")
|
||||||
@@ -142,7 +142,8 @@ class ModelOptimizer:
|
|||||||
# Flash Attention (only on CUDA)
|
# Flash Attention (only on CUDA)
|
||||||
if config.use_flash_attention and torch.cuda.is_available():
|
if config.use_flash_attention and torch.cuda.is_available():
|
||||||
try:
|
try:
|
||||||
from flash_attn.flash_attention import FlashAttention
|
from flash_attn.flash_attention import FlashAttention # noqa: F401
|
||||||
|
|
||||||
print("- Flash Attention 2 available")
|
print("- Flash Attention 2 available")
|
||||||
if hasattr(model.config, "attention_mode"):
|
if hasattr(model.config, "attention_mode"):
|
||||||
model.config.attention_mode = "flash_attention_2"
|
model.config.attention_mode = "flash_attention_2"
|
||||||
@@ -153,8 +154,9 @@ class ModelOptimizer:
|
|||||||
# Memory efficient attention (only on CUDA)
|
# Memory efficient attention (only on CUDA)
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
try:
|
try:
|
||||||
from xformers.ops import memory_efficient_attention
|
from xformers.ops import memory_efficient_attention # noqa: F401
|
||||||
if hasattr(model, 'enable_xformers_memory_efficient_attention'):
|
|
||||||
|
if hasattr(model, "enable_xformers_memory_efficient_attention"):
|
||||||
model.enable_xformers_memory_efficient_attention()
|
model.enable_xformers_memory_efficient_attention()
|
||||||
print("- Enabled xformers memory efficient attention")
|
print("- Enabled xformers memory efficient attention")
|
||||||
else:
|
else:
|
||||||
@@ -220,7 +222,7 @@ class Benchmark:
|
|||||||
self.graphs = None
|
self.graphs = None
|
||||||
self.timer = Timer()
|
self.timer = Timer()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR in benchmark initialization: {str(e)}")
|
print(f"ERROR in benchmark initialization: {e!s}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def _load_model(self) -> nn.Module:
|
def _load_model(self) -> nn.Module:
|
||||||
@@ -230,15 +232,17 @@ class Benchmark:
|
|||||||
# Int4 quantization using HuggingFace integration
|
# Int4 quantization using HuggingFace integration
|
||||||
if self.config.use_int4:
|
if self.config.use_int4:
|
||||||
import bitsandbytes as bnb
|
import bitsandbytes as bnb
|
||||||
|
|
||||||
print(f"- bitsandbytes version: {bnb.__version__}")
|
print(f"- bitsandbytes version: {bnb.__version__}")
|
||||||
|
|
||||||
# 检查是否使用自定义的8bit量化
|
# Check if using custom 8bit quantization
|
||||||
if hasattr(self.config, 'use_linear8bitlt') and self.config.use_linear8bitlt:
|
if hasattr(self.config, "use_linear8bitlt") and self.config.use_linear8bitlt:
|
||||||
print("- Using custom Linear8bitLt replacement for all linear layers")
|
print("- Using custom Linear8bitLt replacement for all linear layers")
|
||||||
|
|
||||||
# 加载原始模型(不使用量化配置)
|
# Load original model (without quantization config)
|
||||||
import bitsandbytes as bnb
|
import bitsandbytes as bnb
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
# set default to half
|
# set default to half
|
||||||
torch.set_default_dtype(torch.float16)
|
torch.set_default_dtype(torch.float16)
|
||||||
compute_dtype = torch.float16 if self.config.use_fp16 else torch.float32
|
compute_dtype = torch.float16 if self.config.use_fp16 else torch.float32
|
||||||
@@ -247,52 +251,58 @@ class Benchmark:
|
|||||||
torch_dtype=compute_dtype,
|
torch_dtype=compute_dtype,
|
||||||
)
|
)
|
||||||
|
|
||||||
# 定义替换函数
|
# Define replacement function
|
||||||
def replace_linear_with_linear8bitlt(model):
|
def replace_linear_with_linear8bitlt(model):
|
||||||
"""递归地将模型中的所有nn.Linear层替换为Linear8bitLt"""
|
"""Recursively replace all nn.Linear layers with Linear8bitLt"""
|
||||||
for name, module in list(model.named_children()):
|
for name, module in list(model.named_children()):
|
||||||
if isinstance(module, nn.Linear):
|
if isinstance(module, nn.Linear):
|
||||||
# 获取原始线性层的参数
|
# Get original linear layer parameters
|
||||||
in_features = module.in_features
|
in_features = module.in_features
|
||||||
out_features = module.out_features
|
out_features = module.out_features
|
||||||
bias = module.bias is not None
|
bias = module.bias is not None
|
||||||
|
|
||||||
# 创建8bit线性层
|
# Create 8bit linear layer
|
||||||
# print size
|
# print size
|
||||||
print(f"in_features: {in_features}, out_features: {out_features}")
|
print(f"in_features: {in_features}, out_features: {out_features}")
|
||||||
new_module = bnb.nn.Linear8bitLt(
|
new_module = bnb.nn.Linear8bitLt(
|
||||||
in_features,
|
in_features,
|
||||||
out_features,
|
out_features,
|
||||||
bias=bias,
|
bias=bias,
|
||||||
has_fp16_weights=False
|
has_fp16_weights=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
# 复制权重和偏置
|
# Copy weights and bias
|
||||||
new_module.weight.data = module.weight.data
|
new_module.weight.data = module.weight.data
|
||||||
if bias:
|
if bias:
|
||||||
new_module.bias.data = module.bias.data
|
new_module.bias.data = module.bias.data
|
||||||
|
|
||||||
# 替换模块
|
# Replace module
|
||||||
setattr(model, name, new_module)
|
setattr(model, name, new_module)
|
||||||
else:
|
else:
|
||||||
# 递归处理子模块
|
# Process child modules recursively
|
||||||
replace_linear_with_linear8bitlt(module)
|
replace_linear_with_linear8bitlt(module)
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
# 替换所有线性层
|
# Replace all linear layers
|
||||||
model = replace_linear_with_linear8bitlt(model)
|
model = replace_linear_with_linear8bitlt(model)
|
||||||
# add torch compile
|
# add torch compile
|
||||||
model = torch.compile(model)
|
model = torch.compile(model)
|
||||||
|
|
||||||
# 将模型移到GPU(量化发生在这里)
|
# Move model to GPU (quantization happens here)
|
||||||
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
device = (
|
||||||
|
"cuda"
|
||||||
|
if torch.cuda.is_available()
|
||||||
|
else "mps"
|
||||||
|
if torch.backends.mps.is_available()
|
||||||
|
else "cpu"
|
||||||
|
)
|
||||||
model = model.to(device)
|
model = model.to(device)
|
||||||
|
|
||||||
print("- All linear layers replaced with Linear8bitLt")
|
print("- All linear layers replaced with Linear8bitLt")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# 使用原来的Int4量化方法
|
# Use original Int4 quantization method
|
||||||
print("- Using bitsandbytes for Int4 quantization")
|
print("- Using bitsandbytes for Int4 quantization")
|
||||||
|
|
||||||
# Create quantization config
|
# Create quantization config
|
||||||
@@ -302,7 +312,7 @@ class Benchmark:
|
|||||||
load_in_4bit=True,
|
load_in_4bit=True,
|
||||||
bnb_4bit_compute_dtype=compute_dtype,
|
bnb_4bit_compute_dtype=compute_dtype,
|
||||||
bnb_4bit_use_double_quant=True,
|
bnb_4bit_use_double_quant=True,
|
||||||
bnb_4bit_quant_type="nf4"
|
bnb_4bit_quant_type="nf4",
|
||||||
)
|
)
|
||||||
|
|
||||||
print("- Quantization config:", quantization_config)
|
print("- Quantization config:", quantization_config)
|
||||||
@@ -312,7 +322,7 @@ class Benchmark:
|
|||||||
self.config.model_path,
|
self.config.model_path,
|
||||||
quantization_config=quantization_config,
|
quantization_config=quantization_config,
|
||||||
torch_dtype=compute_dtype,
|
torch_dtype=compute_dtype,
|
||||||
device_map="auto" # Let HF decide on device mapping
|
device_map="auto", # Let HF decide on device mapping
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check if model loaded successfully
|
# Check if model loaded successfully
|
||||||
@@ -324,7 +334,7 @@ class Benchmark:
|
|||||||
# Apply optimizations directly here
|
# Apply optimizations directly here
|
||||||
print("\nApplying model optimizations:")
|
print("\nApplying model optimizations:")
|
||||||
|
|
||||||
if hasattr(self.config, 'use_linear8bitlt') and self.config.use_linear8bitlt:
|
if hasattr(self.config, "use_linear8bitlt") and self.config.use_linear8bitlt:
|
||||||
print("- Model moved to GPU with Linear8bitLt quantization")
|
print("- Model moved to GPU with Linear8bitLt quantization")
|
||||||
else:
|
else:
|
||||||
# Skip moving to GPU since device_map="auto" already did that
|
# Skip moving to GPU since device_map="auto" already did that
|
||||||
@@ -334,8 +344,12 @@ class Benchmark:
|
|||||||
print(f"- Using {compute_dtype} for compute dtype")
|
print(f"- Using {compute_dtype} for compute dtype")
|
||||||
|
|
||||||
# Check CUDA and SDPA
|
# Check CUDA and SDPA
|
||||||
if torch.cuda.is_available() and torch.version.cuda and float(torch.version.cuda[:3]) >= 11.6:
|
if (
|
||||||
if hasattr(torch.nn.functional, 'scaled_dot_product_attention'):
|
torch.cuda.is_available()
|
||||||
|
and torch.version.cuda
|
||||||
|
and float(torch.version.cuda[:3]) >= 11.6
|
||||||
|
):
|
||||||
|
if hasattr(torch.nn.functional, "scaled_dot_product_attention"):
|
||||||
print("- Using PyTorch SDPA (scaled_dot_product_attention)")
|
print("- Using PyTorch SDPA (scaled_dot_product_attention)")
|
||||||
else:
|
else:
|
||||||
print("- PyTorch SDPA not available")
|
print("- PyTorch SDPA not available")
|
||||||
@@ -343,8 +357,7 @@ class Benchmark:
|
|||||||
# Try xformers if available (only on CUDA)
|
# Try xformers if available (only on CUDA)
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
try:
|
try:
|
||||||
from xformers.ops import memory_efficient_attention
|
if hasattr(model, "enable_xformers_memory_efficient_attention"):
|
||||||
if hasattr(model, 'enable_xformers_memory_efficient_attention'):
|
|
||||||
model.enable_xformers_memory_efficient_attention()
|
model.enable_xformers_memory_efficient_attention()
|
||||||
print("- Enabled xformers memory efficient attention")
|
print("- Enabled xformers memory efficient attention")
|
||||||
else:
|
else:
|
||||||
@@ -370,7 +383,7 @@ class Benchmark:
|
|||||||
self.config.model_path,
|
self.config.model_path,
|
||||||
quantization_config=quantization_config,
|
quantization_config=quantization_config,
|
||||||
torch_dtype=compute_dtype,
|
torch_dtype=compute_dtype,
|
||||||
device_map="auto"
|
device_map="auto",
|
||||||
)
|
)
|
||||||
|
|
||||||
if model is None:
|
if model is None:
|
||||||
@@ -389,6 +402,7 @@ class Benchmark:
|
|||||||
# Apply standard optimizations
|
# Apply standard optimizations
|
||||||
# set default to half
|
# set default to half
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
torch.set_default_dtype(torch.bfloat16)
|
torch.set_default_dtype(torch.bfloat16)
|
||||||
model = ModelOptimizer.optimize(model, self.config)
|
model = ModelOptimizer.optimize(model, self.config)
|
||||||
model = model.half()
|
model = model.half()
|
||||||
@@ -403,25 +417,31 @@ class Benchmark:
|
|||||||
return model
|
return model
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR loading model: {str(e)}")
|
print(f"ERROR loading model: {e!s}")
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def _create_random_batch(self, batch_size: int) -> torch.Tensor:
|
def _create_random_batch(self, batch_size: int) -> torch.Tensor:
|
||||||
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
device = (
|
||||||
|
"cuda"
|
||||||
|
if torch.cuda.is_available()
|
||||||
|
else "mps"
|
||||||
|
if torch.backends.mps.is_available()
|
||||||
|
else "cpu"
|
||||||
|
)
|
||||||
return torch.randint(
|
return torch.randint(
|
||||||
0, 1000,
|
0,
|
||||||
|
1000,
|
||||||
(batch_size, self.config.seq_length),
|
(batch_size, self.config.seq_length),
|
||||||
device=device,
|
device=device,
|
||||||
dtype=torch.long
|
dtype=torch.long,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _run_inference(
|
def _run_inference(
|
||||||
self,
|
self, input_ids: torch.Tensor, graph_wrapper: GraphWrapper | None = None
|
||||||
input_ids: torch.Tensor,
|
) -> tuple[float, torch.Tensor]:
|
||||||
graph_wrapper: Optional[GraphWrapper] = None
|
|
||||||
) -> Tuple[float, torch.Tensor]:
|
|
||||||
attention_mask = torch.ones_like(input_ids)
|
attention_mask = torch.ones_like(input_ids)
|
||||||
|
|
||||||
with torch.no_grad(), self.timer.timing():
|
with torch.no_grad(), self.timer.timing():
|
||||||
@@ -432,7 +452,7 @@ class Benchmark:
|
|||||||
|
|
||||||
return self.timer.elapsed_time(), output
|
return self.timer.elapsed_time(), output
|
||||||
|
|
||||||
def run(self) -> Dict[int, Dict[str, float]]:
|
def run(self) -> dict[int, dict[str, float]]:
|
||||||
results = {}
|
results = {}
|
||||||
|
|
||||||
# Reset peak memory stats
|
# Reset peak memory stats
|
||||||
@@ -450,9 +470,7 @@ class Benchmark:
|
|||||||
|
|
||||||
# Get or create graph for this batch size
|
# Get or create graph for this batch size
|
||||||
graph_wrapper = (
|
graph_wrapper = (
|
||||||
self.graphs.get_or_create(batch_size)
|
self.graphs.get_or_create(batch_size) if self.graphs is not None else None
|
||||||
if self.graphs is not None
|
|
||||||
else None
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Pre-allocate input tensor
|
# Pre-allocate input tensor
|
||||||
@@ -490,7 +508,7 @@ class Benchmark:
|
|||||||
|
|
||||||
# Log memory usage
|
# Log memory usage
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
peak_memory_gb = torch.cuda.max_memory_allocated() / (1024 ** 3)
|
peak_memory_gb = torch.cuda.max_memory_allocated() / (1024**3)
|
||||||
elif torch.backends.mps.is_available():
|
elif torch.backends.mps.is_available():
|
||||||
# MPS doesn't have max_memory_allocated, use 0
|
# MPS doesn't have max_memory_allocated, use 0
|
||||||
peak_memory_gb = 0.0
|
peak_memory_gb = 0.0
|
||||||
@@ -604,7 +622,15 @@ def main():
|
|||||||
os.makedirs("results", exist_ok=True)
|
os.makedirs("results", exist_ok=True)
|
||||||
|
|
||||||
# Generate filename based on configuration
|
# Generate filename based on configuration
|
||||||
precision_type = "int4" if config.use_int4 else "int8" if config.use_int8 else "fp16" if config.use_fp16 else "fp32"
|
precision_type = (
|
||||||
|
"int4"
|
||||||
|
if config.use_int4
|
||||||
|
else "int8"
|
||||||
|
if config.use_int8
|
||||||
|
else "fp16"
|
||||||
|
if config.use_fp16
|
||||||
|
else "fp32"
|
||||||
|
)
|
||||||
model_name = os.path.basename(config.model_path)
|
model_name = os.path.basename(config.model_path)
|
||||||
output_file = f"results/benchmark_{model_name}_{precision_type}.json"
|
output_file = f"results/benchmark_{model_name}_{precision_type}.json"
|
||||||
|
|
||||||
@@ -612,17 +638,20 @@ def main():
|
|||||||
with open(output_file, "w") as f:
|
with open(output_file, "w") as f:
|
||||||
json.dump(
|
json.dump(
|
||||||
{
|
{
|
||||||
"config": {k: str(v) if isinstance(v, list) else v for k, v in vars(config).items()},
|
"config": {
|
||||||
"results": {str(k): v for k, v in results.items()}
|
k: str(v) if isinstance(v, list) else v for k, v in vars(config).items()
|
||||||
|
},
|
||||||
|
"results": {str(k): v for k, v in results.items()},
|
||||||
},
|
},
|
||||||
f,
|
f,
|
||||||
indent=2
|
indent=2,
|
||||||
)
|
)
|
||||||
print(f"Results saved to {output_file}")
|
print(f"Results saved to {output_file}")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Benchmark failed: {e}")
|
print(f"Benchmark failed: {e}")
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
|
|
||||||
@@ -5,24 +5,21 @@ It correctly compares results by fetching the text content for both the new sear
|
|||||||
results and the golden standard results, making the comparison robust to ID changes.
|
results and the golden standard results, making the comparison robust to ID changes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
import time
|
import time
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import sys
|
|
||||||
import numpy as np
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from leann.api import LeannSearcher, LeannBuilder
|
import numpy as np
|
||||||
|
from leann.api import LeannBuilder, LeannSearcher
|
||||||
|
|
||||||
|
|
||||||
def download_data_if_needed(data_root: Path, download_embeddings: bool = False):
|
def download_data_if_needed(data_root: Path, download_embeddings: bool = False):
|
||||||
"""Checks if the data directory exists, and if not, downloads it from HF Hub."""
|
"""Checks if the data directory exists, and if not, downloads it from HF Hub."""
|
||||||
if not data_root.exists():
|
if not data_root.exists():
|
||||||
print(f"Data directory '{data_root}' not found.")
|
print(f"Data directory '{data_root}' not found.")
|
||||||
print(
|
print("Downloading evaluation data from Hugging Face Hub... (this may take a moment)")
|
||||||
"Downloading evaluation data from Hugging Face Hub... (this may take a moment)"
|
|
||||||
)
|
|
||||||
try:
|
try:
|
||||||
from huggingface_hub import snapshot_download
|
from huggingface_hub import snapshot_download
|
||||||
|
|
||||||
@@ -63,7 +60,7 @@ def download_data_if_needed(data_root: Path, download_embeddings: bool = False):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def download_embeddings_if_needed(data_root: Path, dataset_type: str = None):
|
def download_embeddings_if_needed(data_root: Path, dataset_type: str | None = None):
|
||||||
"""Download embeddings files specifically."""
|
"""Download embeddings files specifically."""
|
||||||
embeddings_dir = data_root / "embeddings"
|
embeddings_dir = data_root / "embeddings"
|
||||||
|
|
||||||
@@ -101,7 +98,7 @@ def download_embeddings_if_needed(data_root: Path, dataset_type: str = None):
|
|||||||
|
|
||||||
|
|
||||||
# --- Helper Function to get Golden Passages ---
|
# --- Helper Function to get Golden Passages ---
|
||||||
def get_golden_texts(searcher: LeannSearcher, golden_ids: List[int]) -> set:
|
def get_golden_texts(searcher: LeannSearcher, golden_ids: list[int]) -> set:
|
||||||
"""
|
"""
|
||||||
Retrieves the text for golden passage IDs directly from the LeannSearcher's
|
Retrieves the text for golden passage IDs directly from the LeannSearcher's
|
||||||
passage manager.
|
passage manager.
|
||||||
@@ -113,24 +110,20 @@ def get_golden_texts(searcher: LeannSearcher, golden_ids: List[int]) -> set:
|
|||||||
passage_data = searcher.passage_manager.get_passage(str(gid))
|
passage_data = searcher.passage_manager.get_passage(str(gid))
|
||||||
golden_texts.add(passage_data["text"])
|
golden_texts.add(passage_data["text"])
|
||||||
except KeyError:
|
except KeyError:
|
||||||
print(
|
print(f"Warning: Golden passage ID '{gid}' not found in the index's passage data.")
|
||||||
f"Warning: Golden passage ID '{gid}' not found in the index's passage data."
|
|
||||||
)
|
|
||||||
return golden_texts
|
return golden_texts
|
||||||
|
|
||||||
|
|
||||||
def load_queries(file_path: Path) -> List[str]:
|
def load_queries(file_path: Path) -> list[str]:
|
||||||
queries = []
|
queries = []
|
||||||
with open(file_path, "r", encoding="utf-8") as f:
|
with open(file_path, encoding="utf-8") as f:
|
||||||
for line in f:
|
for line in f:
|
||||||
data = json.loads(line)
|
data = json.loads(line)
|
||||||
queries.append(data["query"])
|
queries.append(data["query"])
|
||||||
return queries
|
return queries
|
||||||
|
|
||||||
|
|
||||||
def build_index_from_embeddings(
|
def build_index_from_embeddings(embeddings_file: str, output_path: str, backend: str = "hnsw"):
|
||||||
embeddings_file: str, output_path: str, backend: str = "hnsw"
|
|
||||||
):
|
|
||||||
"""
|
"""
|
||||||
Build a LEANN index from pre-computed embeddings.
|
Build a LEANN index from pre-computed embeddings.
|
||||||
|
|
||||||
@@ -173,9 +166,7 @@ def build_index_from_embeddings(
|
|||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(description="Run recall evaluation on a LEANN index.")
|
||||||
description="Run recall evaluation on a LEANN index."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"index_path",
|
"index_path",
|
||||||
type=str,
|
type=str,
|
||||||
@@ -202,26 +193,22 @@ def main():
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--num-queries", type=int, default=10, help="Number of queries to evaluate."
|
"--num-queries", type=int, default=10, help="Number of queries to evaluate."
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument("--top-k", type=int, default=3, help="The 'k' value for recall@k.")
|
||||||
"--top-k", type=int, default=3, help="The 'k' value for recall@k."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--ef-search", type=int, default=120, help="The 'efSearch' parameter for HNSW."
|
"--ef-search", type=int, default=120, help="The 'efSearch' parameter for HNSW."
|
||||||
)
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
# --- Path Configuration ---
|
# --- Path Configuration ---
|
||||||
# Assumes a project structure where the script is in 'examples/'
|
# Assumes a project structure where the script is in 'benchmarks/'
|
||||||
# and data is in 'data/' at the project root.
|
# and evaluation data is in 'benchmarks/data/'.
|
||||||
project_root = Path(__file__).resolve().parent.parent
|
script_dir = Path(__file__).resolve().parent
|
||||||
data_root = project_root / "data"
|
data_root = script_dir / "data"
|
||||||
|
|
||||||
# Download data based on mode
|
# Download data based on mode
|
||||||
if args.mode == "build":
|
if args.mode == "build":
|
||||||
# For building mode, we need embeddings
|
# For building mode, we need embeddings
|
||||||
download_data_if_needed(
|
download_data_if_needed(data_root, download_embeddings=False) # Basic data first
|
||||||
data_root, download_embeddings=False
|
|
||||||
) # Basic data first
|
|
||||||
|
|
||||||
# Auto-detect dataset type and download embeddings
|
# Auto-detect dataset type and download embeddings
|
||||||
if args.embeddings_file:
|
if args.embeddings_file:
|
||||||
@@ -262,9 +249,7 @@ def main():
|
|||||||
print(f"Index built successfully: {built_index_path}")
|
print(f"Index built successfully: {built_index_path}")
|
||||||
|
|
||||||
# Ask if user wants to run evaluation
|
# Ask if user wants to run evaluation
|
||||||
eval_response = (
|
eval_response = input("Run evaluation on the built index? (y/n): ").strip().lower()
|
||||||
input("Run evaluation on the built index? (y/n): ").strip().lower()
|
|
||||||
)
|
|
||||||
if eval_response != "y":
|
if eval_response != "y":
|
||||||
print("Index building complete. Exiting.")
|
print("Index building complete. Exiting.")
|
||||||
return
|
return
|
||||||
@@ -293,11 +278,9 @@ def main():
|
|||||||
break
|
break
|
||||||
|
|
||||||
if not args.index_path:
|
if not args.index_path:
|
||||||
|
print("No indices found. The data download should have included pre-built indices.")
|
||||||
print(
|
print(
|
||||||
"No indices found. The data download should have included pre-built indices."
|
"Please check the benchmarks/data/indices/ directory or provide --index-path manually."
|
||||||
)
|
|
||||||
print(
|
|
||||||
"Please check the data/indices/ directory or provide --index-path manually."
|
|
||||||
)
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
@@ -310,14 +293,10 @@ def main():
|
|||||||
else:
|
else:
|
||||||
# Fallback: try to infer from the index directory name
|
# Fallback: try to infer from the index directory name
|
||||||
dataset_type = Path(args.index_path).name
|
dataset_type = Path(args.index_path).name
|
||||||
print(
|
print(f"WARNING: Could not detect dataset type from path, inferred '{dataset_type}'.")
|
||||||
f"WARNING: Could not detect dataset type from path, inferred '{dataset_type}'."
|
|
||||||
)
|
|
||||||
|
|
||||||
queries_file = data_root / "queries" / "nq_open.jsonl"
|
queries_file = data_root / "queries" / "nq_open.jsonl"
|
||||||
golden_results_file = (
|
golden_results_file = data_root / "ground_truth" / dataset_type / "flat_results_nq_k3.json"
|
||||||
data_root / "ground_truth" / dataset_type / "flat_results_nq_k3.json"
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"INFO: Detected dataset type: {dataset_type}")
|
print(f"INFO: Detected dataset type: {dataset_type}")
|
||||||
print(f"INFO: Using queries file: {queries_file}")
|
print(f"INFO: Using queries file: {queries_file}")
|
||||||
@@ -327,7 +306,7 @@ def main():
|
|||||||
searcher = LeannSearcher(args.index_path)
|
searcher = LeannSearcher(args.index_path)
|
||||||
queries = load_queries(queries_file)
|
queries = load_queries(queries_file)
|
||||||
|
|
||||||
with open(golden_results_file, "r") as f:
|
with open(golden_results_file) as f:
|
||||||
golden_results_data = json.load(f)
|
golden_results_data = json.load(f)
|
||||||
|
|
||||||
num_eval_queries = min(args.num_queries, len(queries))
|
num_eval_queries = min(args.num_queries, len(queries))
|
||||||
@@ -339,9 +318,7 @@ def main():
|
|||||||
|
|
||||||
for i in range(num_eval_queries):
|
for i in range(num_eval_queries):
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
new_results = searcher.search(
|
new_results = searcher.search(queries[i], top_k=args.top_k, ef=args.ef_search)
|
||||||
queries[i], top_k=args.top_k, ef=args.ef_search
|
|
||||||
)
|
|
||||||
search_times.append(time.time() - start_time)
|
search_times.append(time.time() - start_time)
|
||||||
|
|
||||||
# Correct Recall Calculation: Based on TEXT content
|
# Correct Recall Calculation: Based on TEXT content
|
||||||
@@ -1,26 +1,27 @@
|
|||||||
import time
|
import time
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Dict, List
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from transformers import AutoModel, BitsAndBytesConfig
|
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
from transformers import AutoModel
|
||||||
|
|
||||||
# Add MLX imports
|
# Add MLX imports
|
||||||
try:
|
try:
|
||||||
import mlx.core as mx
|
import mlx.core as mx
|
||||||
from mlx_lm.utils import load
|
from mlx_lm.utils import load
|
||||||
|
|
||||||
MLX_AVAILABLE = True
|
MLX_AVAILABLE = True
|
||||||
except ImportError as e:
|
except ImportError:
|
||||||
print("MLX not available. Install with: uv pip install mlx mlx-lm")
|
print("MLX not available. Install with: uv pip install mlx mlx-lm")
|
||||||
MLX_AVAILABLE = False
|
MLX_AVAILABLE = False
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class BenchmarkConfig:
|
class BenchmarkConfig:
|
||||||
model_path: str = "facebook/contriever"
|
model_path: str = "facebook/contriever"
|
||||||
batch_sizes: List[int] = None
|
batch_sizes: list[int] = None
|
||||||
seq_length: int = 256
|
seq_length: int = 256
|
||||||
num_runs: int = 5
|
num_runs: int = 5
|
||||||
use_fp16: bool = True
|
use_fp16: bool = True
|
||||||
@@ -35,6 +36,7 @@ class BenchmarkConfig:
|
|||||||
if self.batch_sizes is None:
|
if self.batch_sizes is None:
|
||||||
self.batch_sizes = [1, 2, 4, 8, 16, 32, 64]
|
self.batch_sizes = [1, 2, 4, 8, 16, 32, 64]
|
||||||
|
|
||||||
|
|
||||||
class MLXBenchmark:
|
class MLXBenchmark:
|
||||||
"""MLX-specific benchmark for embedding models"""
|
"""MLX-specific benchmark for embedding models"""
|
||||||
|
|
||||||
@@ -55,11 +57,7 @@ class MLXBenchmark:
|
|||||||
|
|
||||||
def _create_random_batch(self, batch_size: int):
|
def _create_random_batch(self, batch_size: int):
|
||||||
"""Create random input batches for MLX testing - same as PyTorch"""
|
"""Create random input batches for MLX testing - same as PyTorch"""
|
||||||
return torch.randint(
|
return torch.randint(0, 1000, (batch_size, self.config.seq_length), dtype=torch.long)
|
||||||
0, 1000,
|
|
||||||
(batch_size, self.config.seq_length),
|
|
||||||
dtype=torch.long
|
|
||||||
)
|
|
||||||
|
|
||||||
def _run_inference(self, input_ids: torch.Tensor) -> float:
|
def _run_inference(self, input_ids: torch.Tensor) -> float:
|
||||||
"""Run MLX inference with same input as PyTorch"""
|
"""Run MLX inference with same input as PyTorch"""
|
||||||
@@ -82,12 +80,12 @@ class MLXBenchmark:
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"MLX inference error: {e}")
|
print(f"MLX inference error: {e}")
|
||||||
return float('inf')
|
return float("inf")
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
|
|
||||||
return end_time - start_time
|
return end_time - start_time
|
||||||
|
|
||||||
def run(self) -> Dict[int, Dict[str, float]]:
|
def run(self) -> dict[int, dict[str, float]]:
|
||||||
"""Run the MLX benchmark across all batch sizes"""
|
"""Run the MLX benchmark across all batch sizes"""
|
||||||
results = {}
|
results = {}
|
||||||
|
|
||||||
@@ -111,10 +109,10 @@ class MLXBenchmark:
|
|||||||
break
|
break
|
||||||
|
|
||||||
# Run benchmark
|
# Run benchmark
|
||||||
for i in tqdm(range(self.config.num_runs), desc=f"MLX Batch size {batch_size}"):
|
for _i in tqdm(range(self.config.num_runs), desc=f"MLX Batch size {batch_size}"):
|
||||||
try:
|
try:
|
||||||
elapsed_time = self._run_inference(input_ids)
|
elapsed_time = self._run_inference(input_ids)
|
||||||
if elapsed_time != float('inf'):
|
if elapsed_time != float("inf"):
|
||||||
times.append(elapsed_time)
|
times.append(elapsed_time)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error during MLX inference: {e}")
|
print(f"Error during MLX inference: {e}")
|
||||||
@@ -145,16 +143,22 @@ class MLXBenchmark:
|
|||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
class Benchmark:
|
class Benchmark:
|
||||||
def __init__(self, config: BenchmarkConfig):
|
def __init__(self, config: BenchmarkConfig):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
self.device = (
|
||||||
|
"cuda"
|
||||||
|
if torch.cuda.is_available()
|
||||||
|
else "mps"
|
||||||
|
if torch.backends.mps.is_available()
|
||||||
|
else "cpu"
|
||||||
|
)
|
||||||
self.model = self._load_model()
|
self.model = self._load_model()
|
||||||
|
|
||||||
def _load_model(self) -> nn.Module:
|
def _load_model(self) -> nn.Module:
|
||||||
print(f"Loading model from {self.config.model_path}...")
|
print(f"Loading model from {self.config.model_path}...")
|
||||||
|
|
||||||
|
|
||||||
model = AutoModel.from_pretrained(self.config.model_path)
|
model = AutoModel.from_pretrained(self.config.model_path)
|
||||||
if self.config.use_fp16:
|
if self.config.use_fp16:
|
||||||
model = model.half()
|
model = model.half()
|
||||||
@@ -166,10 +170,11 @@ class Benchmark:
|
|||||||
|
|
||||||
def _create_random_batch(self, batch_size: int) -> torch.Tensor:
|
def _create_random_batch(self, batch_size: int) -> torch.Tensor:
|
||||||
return torch.randint(
|
return torch.randint(
|
||||||
0, 1000,
|
0,
|
||||||
|
1000,
|
||||||
(batch_size, self.config.seq_length),
|
(batch_size, self.config.seq_length),
|
||||||
device=self.device,
|
device=self.device,
|
||||||
dtype=torch.long
|
dtype=torch.long,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _run_inference(self, input_ids: torch.Tensor) -> float:
|
def _run_inference(self, input_ids: torch.Tensor) -> float:
|
||||||
@@ -177,12 +182,15 @@ class Benchmark:
|
|||||||
|
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
|
self.model(input_ids=input_ids, attention_mask=attention_mask)
|
||||||
|
# mps sync
|
||||||
|
if torch.backends.mps.is_available():
|
||||||
|
torch.mps.synchronize()
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
|
|
||||||
return end_time - start_time
|
return end_time - start_time
|
||||||
|
|
||||||
def run(self) -> Dict[int, Dict[str, float]]:
|
def run(self) -> dict[int, dict[str, float]]:
|
||||||
results = {}
|
results = {}
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
@@ -194,7 +202,7 @@ class Benchmark:
|
|||||||
|
|
||||||
input_ids = self._create_random_batch(batch_size)
|
input_ids = self._create_random_batch(batch_size)
|
||||||
|
|
||||||
for i in tqdm(range(self.config.num_runs), desc=f"Batch size {batch_size}"):
|
for _i in tqdm(range(self.config.num_runs), desc=f"Batch size {batch_size}"):
|
||||||
try:
|
try:
|
||||||
elapsed_time = self._run_inference(input_ids)
|
elapsed_time = self._run_inference(input_ids)
|
||||||
times.append(elapsed_time)
|
times.append(elapsed_time)
|
||||||
@@ -219,7 +227,7 @@ class Benchmark:
|
|||||||
print(f"Throughput: {throughput:.2f} sequences/second")
|
print(f"Throughput: {throughput:.2f} sequences/second")
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
peak_memory_gb = torch.cuda.max_memory_allocated() / (1024 ** 3)
|
peak_memory_gb = torch.cuda.max_memory_allocated() / (1024**3)
|
||||||
else:
|
else:
|
||||||
peak_memory_gb = 0.0
|
peak_memory_gb = 0.0
|
||||||
|
|
||||||
@@ -228,6 +236,7 @@ class Benchmark:
|
|||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
def run_benchmark():
|
def run_benchmark():
|
||||||
"""Main function to run the benchmark with optimized parameters."""
|
"""Main function to run the benchmark with optimized parameters."""
|
||||||
config = BenchmarkConfig()
|
config = BenchmarkConfig()
|
||||||
@@ -242,16 +251,13 @@ def run_benchmark():
|
|||||||
return {
|
return {
|
||||||
"max_throughput": max_throughput,
|
"max_throughput": max_throughput,
|
||||||
"avg_throughput": avg_throughput,
|
"avg_throughput": avg_throughput,
|
||||||
"results": results
|
"results": results,
|
||||||
}
|
}
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Benchmark failed: {e}")
|
print(f"Benchmark failed: {e}")
|
||||||
return {
|
return {"max_throughput": 0.0, "avg_throughput": 0.0, "error": str(e)}
|
||||||
"max_throughput": 0.0,
|
|
||||||
"avg_throughput": 0.0,
|
|
||||||
"error": str(e)
|
|
||||||
}
|
|
||||||
|
|
||||||
def run_mlx_benchmark():
|
def run_mlx_benchmark():
|
||||||
"""Run MLX-specific benchmark"""
|
"""Run MLX-specific benchmark"""
|
||||||
@@ -260,13 +266,10 @@ def run_mlx_benchmark():
|
|||||||
return {
|
return {
|
||||||
"max_throughput": 0.0,
|
"max_throughput": 0.0,
|
||||||
"avg_throughput": 0.0,
|
"avg_throughput": 0.0,
|
||||||
"error": "MLX not available"
|
"error": "MLX not available",
|
||||||
}
|
}
|
||||||
|
|
||||||
config = BenchmarkConfig(
|
config = BenchmarkConfig(model_path="mlx-community/all-MiniLM-L6-v2-4bit", use_mlx=True)
|
||||||
model_path="mlx-community/all-MiniLM-L6-v2-4bit",
|
|
||||||
use_mlx=True
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
benchmark = MLXBenchmark(config)
|
benchmark = MLXBenchmark(config)
|
||||||
@@ -276,7 +279,7 @@ def run_mlx_benchmark():
|
|||||||
return {
|
return {
|
||||||
"max_throughput": 0.0,
|
"max_throughput": 0.0,
|
||||||
"avg_throughput": 0.0,
|
"avg_throughput": 0.0,
|
||||||
"error": "No valid results"
|
"error": "No valid results",
|
||||||
}
|
}
|
||||||
|
|
||||||
max_throughput = max(results[batch_size]["throughput"] for batch_size in results)
|
max_throughput = max(results[batch_size]["throughput"] for batch_size in results)
|
||||||
@@ -285,16 +288,13 @@ def run_mlx_benchmark():
|
|||||||
return {
|
return {
|
||||||
"max_throughput": max_throughput,
|
"max_throughput": max_throughput,
|
||||||
"avg_throughput": avg_throughput,
|
"avg_throughput": avg_throughput,
|
||||||
"results": results
|
"results": results,
|
||||||
}
|
}
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"MLX benchmark failed: {e}")
|
print(f"MLX benchmark failed: {e}")
|
||||||
return {
|
return {"max_throughput": 0.0, "avg_throughput": 0.0, "error": str(e)}
|
||||||
"max_throughput": 0.0,
|
|
||||||
"avg_throughput": 0.0,
|
|
||||||
"error": str(e)
|
|
||||||
}
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
print("=== PyTorch Benchmark ===")
|
print("=== PyTorch Benchmark ===")
|
||||||
@@ -308,7 +308,7 @@ if __name__ == "__main__":
|
|||||||
print(f"MLX Average throughput: {mlx_result['avg_throughput']:.2f} sequences/second")
|
print(f"MLX Average throughput: {mlx_result['avg_throughput']:.2f} sequences/second")
|
||||||
|
|
||||||
# Compare results
|
# Compare results
|
||||||
if pytorch_result['max_throughput'] > 0 and mlx_result['max_throughput'] > 0:
|
if pytorch_result["max_throughput"] > 0 and mlx_result["max_throughput"] > 0:
|
||||||
speedup = mlx_result['max_throughput'] / pytorch_result['max_throughput']
|
speedup = mlx_result["max_throughput"] / pytorch_result["max_throughput"]
|
||||||
print(f"\n=== Comparison ===")
|
print("\n=== Comparison ===")
|
||||||
print(f"MLX is {speedup:.2f}x {'faster' if speedup > 1 else 'slower'} than PyTorch")
|
print(f"MLX is {speedup:.2f}x {'faster' if speedup > 1 else 'slower'} than PyTorch")
|
||||||
82
data/.gitattributes
vendored
82
data/.gitattributes
vendored
@@ -1,82 +0,0 @@
|
|||||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.mds filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.model filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
|
||||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
# Audio files - uncompressed
|
|
||||||
*.pcm filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.sam filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.raw filter=lfs diff=lfs merge=lfs -text
|
|
||||||
# Audio files - compressed
|
|
||||||
*.aac filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.flac filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.ogg filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.wav filter=lfs diff=lfs merge=lfs -text
|
|
||||||
# Image files - uncompressed
|
|
||||||
*.bmp filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.gif filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.png filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.tiff filter=lfs diff=lfs merge=lfs -text
|
|
||||||
# Image files - compressed
|
|
||||||
*.jpg filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
||||||
# Video files - compressed
|
|
||||||
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
||||||
ground_truth/dpr/id_map.json filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/dpr/dpr_diskann.passages.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/dpr/dpr_diskann.passages.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/dpr/dpr_diskann_disk.index filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/dpr/leann.labels.map filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/leann.labels.map filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.index filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.0.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.0.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.1.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.1.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.2.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.2.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.3.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.3.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.4.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.4.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.5.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.5.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.6.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.6.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.7.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.7.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
@@ -14903,5 +14903,3 @@ This website includes information about Project Gutenberg™,
|
|||||||
including how to make donations to the Project Gutenberg Literary
|
including how to make donations to the Project Gutenberg Literary
|
||||||
Archive Foundation, how to help produce our new eBooks, and how to
|
Archive Foundation, how to help produce our new eBooks, and how to
|
||||||
subscribe to our email newsletter to hear about new eBooks.
|
subscribe to our email newsletter to hear about new eBooks.
|
||||||
|
|
||||||
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
---
|
|
||||||
license: mit
|
|
||||||
---
|
|
||||||
|
|
||||||
# LEANN-RAG Evaluation Data
|
|
||||||
|
|
||||||
This repository contains the necessary data to run the recall evaluation scripts for the [LEANN-RAG](https://huggingface.co/LEANN-RAG) project.
|
|
||||||
|
|
||||||
## Dataset Components
|
|
||||||
|
|
||||||
This dataset is structured into three main parts:
|
|
||||||
|
|
||||||
1. **Pre-built LEANN Indices**:
|
|
||||||
* `dpr/`: A pre-built index for the DPR dataset.
|
|
||||||
* `rpj_wiki/`: A pre-built index for the RPJ-Wiki dataset.
|
|
||||||
These indices were created using the `leann-core` library and are required by the `LeannSearcher`.
|
|
||||||
|
|
||||||
2. **Ground Truth Data**:
|
|
||||||
* `ground_truth/`: Contains the ground truth files (`flat_results_nq_k3.json`) for both the DPR and RPJ-Wiki datasets. These files map queries to the original passage IDs from the Natural Questions benchmark, evaluated using the Contriever model.
|
|
||||||
|
|
||||||
3. **Queries**:
|
|
||||||
* `queries/`: Contains the `nq_open.jsonl` file with the Natural Questions queries used for the evaluation.
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
To use this data, you can download it locally using the `huggingface-hub` library. First, install the library:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install huggingface-hub
|
|
||||||
```
|
|
||||||
|
|
||||||
Then, you can download the entire dataset to a local directory (e.g., `data/`) with the following Python script:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from huggingface_hub import snapshot_download
|
|
||||||
|
|
||||||
snapshot_download(
|
|
||||||
repo_id="LEANN-RAG/leann-rag-evaluation-data",
|
|
||||||
repo_type="dataset",
|
|
||||||
local_dir="data"
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
This will download all the necessary files into a local `data` folder, preserving the repository structure. The evaluation scripts in the main [LEANN-RAG Space](https://huggingface.co/LEANN-RAG) are configured to work with this data structure.
|
|
||||||
105
demo.ipynb
105
demo.ipynb
@@ -1,37 +1,116 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Quick Start \n",
|
||||||
|
"\n",
|
||||||
|
"**Home GitHub Repository:** [LEANN on GitHub](https://github.com/yichuan-w/LEANN)\n",
|
||||||
|
"\n",
|
||||||
|
"**Important for Colab users:** Set your runtime type to T4 GPU for optimal performance. Go to Runtime → Change runtime type → Hardware accelerator → T4 GPU."
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from leann.api import LeannBuilder, LeannSearcher, LeannChat\n",
|
"# install this if you are using colab\n",
|
||||||
|
"! uv pip install leann-core leann-backend-hnsw --no-deps\n",
|
||||||
|
"! uv pip install leann --no-deps\n",
|
||||||
|
"# For Colab environment, we need to set some environment variables\n",
|
||||||
|
"import os\n",
|
||||||
|
"\n",
|
||||||
|
"os.environ[\"LEANN_LOG_LEVEL\"] = \"INFO\" # Enable more detailed logging"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from pathlib import Path\n",
|
||||||
|
"\n",
|
||||||
|
"INDEX_DIR = Path(\"./\").resolve()\n",
|
||||||
|
"INDEX_PATH = str(INDEX_DIR / \"demo.leann\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Build the index"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from leann.api import LeannBuilder\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# 1. Build the index (no embeddings stored!)\n",
|
|
||||||
"builder = LeannBuilder(backend_name=\"hnsw\")\n",
|
"builder = LeannBuilder(backend_name=\"hnsw\")\n",
|
||||||
"builder.add_text(\"C# is a powerful programming language\")\n",
|
"builder.add_text(\"C# is a powerful programming language and it is good at game development\")\n",
|
||||||
"builder.add_text(\"Python is a powerful programming language and it is very popular\")\n",
|
"builder.add_text(\n",
|
||||||
|
" \"Python is a powerful programming language and it is good at machine learning tasks\"\n",
|
||||||
|
")\n",
|
||||||
"builder.add_text(\"Machine learning transforms industries\")\n",
|
"builder.add_text(\"Machine learning transforms industries\")\n",
|
||||||
"builder.add_text(\"Neural networks process complex data\")\n",
|
"builder.add_text(\"Neural networks process complex data\")\n",
|
||||||
"builder.add_text(\"Leann is a great storage saving engine for RAG on your MacBook\")\n",
|
"builder.add_text(\"Leann is a great storage saving engine for RAG on your MacBook\")\n",
|
||||||
"builder.build_index(\"knowledge.leann\")\n",
|
"builder.build_index(INDEX_PATH)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Search with real-time embeddings"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from leann.api import LeannSearcher\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# 2. Search with real-time embeddings\n",
|
"searcher = LeannSearcher(INDEX_PATH)\n",
|
||||||
"searcher = LeannSearcher(\"knowledge.leann\")\n",
|
|
||||||
"results = searcher.search(\"programming languages\", top_k=2)\n",
|
"results = searcher.search(\"programming languages\", top_k=2)\n",
|
||||||
|
"results"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Chat with LEANN using retrieved results"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from leann.api import LeannChat\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# 3. Chat with LEANN using retrieved results\n",
|
|
||||||
"llm_config = {\n",
|
"llm_config = {\n",
|
||||||
" \"type\": \"ollama\",\n",
|
" \"type\": \"hf\",\n",
|
||||||
" \"model\": \"llama3.2:1b\"\n",
|
" \"model\": \"Qwen/Qwen3-0.6B\",\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"chat = LeannChat(index_path=\"knowledge.leann\", llm_config=llm_config)\n",
|
"chat = LeannChat(index_path=INDEX_PATH, llm_config=llm_config)\n",
|
||||||
"response = chat.ask(\n",
|
"response = chat.ask(\n",
|
||||||
" \"Compare the two retrieved programming languages and say which one is more popular today.\",\n",
|
" \"Compare the two retrieved programming languages and tell me their advantages.\",\n",
|
||||||
" top_k=2,\n",
|
" top_k=2,\n",
|
||||||
")"
|
" llm_kwargs={\"max_tokens\": 128},\n",
|
||||||
|
")\n",
|
||||||
|
"response"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
220
docs/CONTRIBUTING.md
Normal file
220
docs/CONTRIBUTING.md
Normal file
@@ -0,0 +1,220 @@
|
|||||||
|
# 🤝 Contributing
|
||||||
|
|
||||||
|
We welcome contributions! Leann is built by the community, for the community.
|
||||||
|
|
||||||
|
## Ways to Contribute
|
||||||
|
|
||||||
|
- 🐛 **Bug Reports**: Found an issue? Let us know!
|
||||||
|
- 💡 **Feature Requests**: Have an idea? We'd love to hear it!
|
||||||
|
- 🔧 **Code Contributions**: PRs welcome for all skill levels
|
||||||
|
- 📖 **Documentation**: Help make Leann more accessible
|
||||||
|
- 🧪 **Benchmarks**: Share your performance results
|
||||||
|
|
||||||
|
## 🚀 Development Setup
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
1. **Install uv** (fast Python package installer):
|
||||||
|
```bash
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Clone the repository**:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/LEANN-RAG/LEANN-RAG.git
|
||||||
|
cd LEANN-RAG
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Install system dependencies**:
|
||||||
|
|
||||||
|
**macOS:**
|
||||||
|
```bash
|
||||||
|
brew install llvm libomp boost protobuf zeromq pkgconf
|
||||||
|
```
|
||||||
|
|
||||||
|
**Ubuntu/Debian:**
|
||||||
|
```bash
|
||||||
|
sudo apt-get install libomp-dev libboost-all-dev protobuf-compiler \
|
||||||
|
libabsl-dev libmkl-full-dev libaio-dev libzmq3-dev
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Build from source**:
|
||||||
|
```bash
|
||||||
|
# macOS
|
||||||
|
CC=$(brew --prefix llvm)/bin/clang CXX=$(brew --prefix llvm)/bin/clang++ uv sync
|
||||||
|
|
||||||
|
# Ubuntu/Debian
|
||||||
|
uv sync
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔨 Pre-commit Hooks
|
||||||
|
|
||||||
|
We use pre-commit hooks to ensure code quality and consistency. This runs automatically before each commit.
|
||||||
|
|
||||||
|
### Setup Pre-commit
|
||||||
|
|
||||||
|
1. **Install pre-commit** (already included when you run `uv sync`):
|
||||||
|
```bash
|
||||||
|
uv pip install pre-commit
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Install the git hooks**:
|
||||||
|
```bash
|
||||||
|
pre-commit install
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Run pre-commit manually** (optional):
|
||||||
|
```bash
|
||||||
|
pre-commit run --all-files
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pre-commit Checks
|
||||||
|
|
||||||
|
Our pre-commit configuration includes:
|
||||||
|
- **Trailing whitespace removal**
|
||||||
|
- **End-of-file fixing**
|
||||||
|
- **YAML validation**
|
||||||
|
- **Large file prevention**
|
||||||
|
- **Merge conflict detection**
|
||||||
|
- **Debug statement detection**
|
||||||
|
- **Code formatting with ruff**
|
||||||
|
- **Code linting with ruff**
|
||||||
|
|
||||||
|
## 🧪 Testing
|
||||||
|
|
||||||
|
### Running Tests
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all tests
|
||||||
|
uv run pytest
|
||||||
|
|
||||||
|
# Run specific test file
|
||||||
|
uv run pytest test/test_filename.py
|
||||||
|
|
||||||
|
# Run with coverage
|
||||||
|
uv run pytest --cov=leann
|
||||||
|
```
|
||||||
|
|
||||||
|
### Writing Tests
|
||||||
|
|
||||||
|
- Place tests in the `test/` directory
|
||||||
|
- Follow the naming convention `test_*.py`
|
||||||
|
- Use descriptive test names that explain what's being tested
|
||||||
|
- Include both positive and negative test cases
|
||||||
|
|
||||||
|
## 📝 Code Style
|
||||||
|
|
||||||
|
We use `ruff` for both linting and formatting to ensure consistent code style.
|
||||||
|
|
||||||
|
### Format Your Code
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Format all files
|
||||||
|
ruff format
|
||||||
|
|
||||||
|
# Check formatting without changing files
|
||||||
|
ruff format --check
|
||||||
|
```
|
||||||
|
|
||||||
|
### Lint Your Code
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run linter with auto-fix
|
||||||
|
ruff check --fix
|
||||||
|
|
||||||
|
# Just check without fixing
|
||||||
|
ruff check
|
||||||
|
```
|
||||||
|
|
||||||
|
### Style Guidelines
|
||||||
|
|
||||||
|
- Follow PEP 8 conventions
|
||||||
|
- Use descriptive variable names
|
||||||
|
- Add type hints where appropriate
|
||||||
|
- Write docstrings for all public functions and classes
|
||||||
|
- Keep functions focused and single-purpose
|
||||||
|
|
||||||
|
## 🚦 CI/CD
|
||||||
|
|
||||||
|
Our CI pipeline runs automatically on all pull requests. It includes:
|
||||||
|
|
||||||
|
1. **Linting and Formatting**: Ensures code follows our style guidelines
|
||||||
|
2. **Multi-platform builds**: Tests on Ubuntu and macOS
|
||||||
|
3. **Python version matrix**: Tests on Python 3.9-3.13
|
||||||
|
4. **Wheel building**: Ensures packages can be built and distributed
|
||||||
|
|
||||||
|
### CI Commands
|
||||||
|
|
||||||
|
The CI uses the same commands as pre-commit to ensure consistency:
|
||||||
|
```bash
|
||||||
|
# Linting
|
||||||
|
ruff check .
|
||||||
|
|
||||||
|
# Format checking
|
||||||
|
ruff format --check .
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure your code passes these checks locally before pushing!
|
||||||
|
|
||||||
|
## 🔄 Pull Request Process
|
||||||
|
|
||||||
|
1. **Fork the repository** and create your branch from `main`:
|
||||||
|
```bash
|
||||||
|
git checkout -b feature/your-feature-name
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Make your changes**:
|
||||||
|
- Write clean, documented code
|
||||||
|
- Add tests for new functionality
|
||||||
|
- Update documentation as needed
|
||||||
|
|
||||||
|
3. **Run pre-commit checks**:
|
||||||
|
```bash
|
||||||
|
pre-commit run --all-files
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Test your changes**:
|
||||||
|
```bash
|
||||||
|
uv run pytest
|
||||||
|
```
|
||||||
|
|
||||||
|
5. **Commit with descriptive messages**:
|
||||||
|
```bash
|
||||||
|
git commit -m "feat: add new search algorithm"
|
||||||
|
```
|
||||||
|
|
||||||
|
Follow [Conventional Commits](https://www.conventionalcommits.org/):
|
||||||
|
- `feat:` for new features
|
||||||
|
- `fix:` for bug fixes
|
||||||
|
- `docs:` for documentation changes
|
||||||
|
- `test:` for test additions/changes
|
||||||
|
- `refactor:` for code refactoring
|
||||||
|
- `perf:` for performance improvements
|
||||||
|
|
||||||
|
6. **Push and create a pull request**:
|
||||||
|
- Provide a clear description of your changes
|
||||||
|
- Reference any related issues
|
||||||
|
- Include examples or screenshots if applicable
|
||||||
|
|
||||||
|
## 📚 Documentation
|
||||||
|
|
||||||
|
When adding new features or making significant changes:
|
||||||
|
|
||||||
|
1. Update relevant documentation in `/docs`
|
||||||
|
2. Add docstrings to new functions/classes
|
||||||
|
3. Update README.md if needed
|
||||||
|
4. Include usage examples
|
||||||
|
|
||||||
|
## 🤔 Getting Help
|
||||||
|
|
||||||
|
- **Discord**: Join our community for discussions
|
||||||
|
- **Issues**: Check existing issues or create a new one
|
||||||
|
- **Discussions**: For general questions and ideas
|
||||||
|
|
||||||
|
## 📄 License
|
||||||
|
|
||||||
|
By contributing, you agree that your contributions will be licensed under the same license as the project (MIT).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Thank you for contributing to LEANN! Every contribution, no matter how small, helps make the project better for everyone. 🌟
|
||||||
22
docs/RELEASE.md
Normal file
22
docs/RELEASE.md
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Release Guide
|
||||||
|
|
||||||
|
## Setup (One-time)
|
||||||
|
|
||||||
|
Add `PYPI_API_TOKEN` to GitHub Secrets:
|
||||||
|
1. Get token: https://pypi.org/manage/account/token/
|
||||||
|
2. Add to secrets: Settings → Secrets → Actions → `PYPI_API_TOKEN`
|
||||||
|
|
||||||
|
## Release (One-click)
|
||||||
|
|
||||||
|
1. Go to: https://github.com/yichuan-w/LEANN/actions/workflows/release-manual.yml
|
||||||
|
2. Click "Run workflow"
|
||||||
|
3. Enter version: `0.1.2`
|
||||||
|
4. Click green "Run workflow" button
|
||||||
|
|
||||||
|
That's it! The workflow will automatically:
|
||||||
|
- ✅ Update version in all packages
|
||||||
|
- ✅ Build all packages
|
||||||
|
- ✅ Publish to PyPI
|
||||||
|
- ✅ Create GitHub tag and release
|
||||||
|
|
||||||
|
Check progress: https://github.com/yichuan-w/LEANN/actions
|
||||||
123
docs/THINKING_BUDGET_FEATURE.md
Normal file
123
docs/THINKING_BUDGET_FEATURE.md
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
# Thinking Budget Feature Implementation
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This document describes the implementation of the **thinking budget** feature for LEANN, which allows users to control the computational effort for reasoning models like GPT-Oss:20b.
|
||||||
|
|
||||||
|
## Feature Description
|
||||||
|
|
||||||
|
The thinking budget feature provides three levels of computational effort for reasoning models:
|
||||||
|
- **`low`**: Fast responses, basic reasoning (default for simple queries)
|
||||||
|
- **`medium`**: Balanced speed and reasoning depth
|
||||||
|
- **`high`**: Maximum reasoning effort, best for complex analytical questions
|
||||||
|
|
||||||
|
## Implementation Details
|
||||||
|
|
||||||
|
### 1. Command Line Interface
|
||||||
|
|
||||||
|
Added `--thinking-budget` parameter to both CLI and RAG examples:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# LEANN CLI
|
||||||
|
leann ask my-index --llm ollama --model gpt-oss:20b --thinking-budget high
|
||||||
|
|
||||||
|
# RAG Examples
|
||||||
|
python apps/email_rag.py --llm ollama --llm-model gpt-oss:20b --thinking-budget high
|
||||||
|
python apps/document_rag.py --llm openai --llm-model o3 --thinking-budget medium
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. LLM Backend Support
|
||||||
|
|
||||||
|
#### Ollama Backend (`packages/leann-core/src/leann/chat.py`)
|
||||||
|
|
||||||
|
```python
|
||||||
|
def ask(self, prompt: str, **kwargs) -> str:
|
||||||
|
# Handle thinking budget for reasoning models
|
||||||
|
options = kwargs.copy()
|
||||||
|
thinking_budget = kwargs.get("thinking_budget")
|
||||||
|
if thinking_budget:
|
||||||
|
options.pop("thinking_budget", None)
|
||||||
|
if thinking_budget in ["low", "medium", "high"]:
|
||||||
|
options["reasoning"] = {"effort": thinking_budget, "exclude": False}
|
||||||
|
```
|
||||||
|
|
||||||
|
**API Format**: Uses Ollama's `reasoning` parameter with `effort` and `exclude` fields.
|
||||||
|
|
||||||
|
#### OpenAI Backend (`packages/leann-core/src/leann/chat.py`)
|
||||||
|
|
||||||
|
```python
|
||||||
|
def ask(self, prompt: str, **kwargs) -> str:
|
||||||
|
# Handle thinking budget for reasoning models
|
||||||
|
thinking_budget = kwargs.get("thinking_budget")
|
||||||
|
if thinking_budget and thinking_budget in ["low", "medium", "high"]:
|
||||||
|
# Check if this is an o-series model
|
||||||
|
o_series_models = ["o3", "o3-mini", "o4-mini", "o1", "o3-pro", "o3-deep-research"]
|
||||||
|
if any(model in self.model for model in o_series_models):
|
||||||
|
params["reasoning_effort"] = thinking_budget
|
||||||
|
```
|
||||||
|
|
||||||
|
**API Format**: Uses OpenAI's `reasoning_effort` parameter for o-series models.
|
||||||
|
|
||||||
|
### 3. Parameter Propagation
|
||||||
|
|
||||||
|
The thinking budget parameter is properly propagated through the LEANN architecture:
|
||||||
|
|
||||||
|
1. **CLI** (`packages/leann-core/src/leann/cli.py`): Captures `--thinking-budget` argument
|
||||||
|
2. **Base RAG** (`apps/base_rag_example.py`): Adds parameter to argument parser
|
||||||
|
3. **LeannChat** (`packages/leann-core/src/leann/api.py`): Passes `llm_kwargs` to LLM
|
||||||
|
4. **LLM Interface**: Handles the parameter in backend-specific implementations
|
||||||
|
|
||||||
|
## Files Modified
|
||||||
|
|
||||||
|
### Core Implementation
|
||||||
|
- `packages/leann-core/src/leann/chat.py`: Added thinking budget support to OllamaChat and OpenAIChat
|
||||||
|
- `packages/leann-core/src/leann/cli.py`: Added `--thinking-budget` argument
|
||||||
|
- `apps/base_rag_example.py`: Added thinking budget parameter to RAG examples
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
- `README.md`: Added thinking budget parameter to usage examples
|
||||||
|
- `docs/configuration-guide.md`: Added detailed documentation and usage guidelines
|
||||||
|
|
||||||
|
### Examples
|
||||||
|
- `examples/thinking_budget_demo.py`: Comprehensive demo script with usage examples
|
||||||
|
|
||||||
|
## Usage Examples
|
||||||
|
|
||||||
|
### Basic Usage
|
||||||
|
```bash
|
||||||
|
# High reasoning effort for complex questions
|
||||||
|
leann ask my-index --llm ollama --model gpt-oss:20b --thinking-budget high
|
||||||
|
|
||||||
|
# Medium reasoning for balanced performance
|
||||||
|
leann ask my-index --llm openai --model gpt-4o --thinking-budget medium
|
||||||
|
|
||||||
|
# Low reasoning for fast responses
|
||||||
|
leann ask my-index --llm ollama --model gpt-oss:20b --thinking-budget low
|
||||||
|
```
|
||||||
|
|
||||||
|
### RAG Examples
|
||||||
|
```bash
|
||||||
|
# Email RAG with high reasoning
|
||||||
|
python apps/email_rag.py --llm ollama --llm-model gpt-oss:20b --thinking-budget high
|
||||||
|
|
||||||
|
# Document RAG with medium reasoning
|
||||||
|
python apps/document_rag.py --llm openai --llm-model gpt-4o --thinking-budget medium
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supported Models
|
||||||
|
|
||||||
|
### Ollama Models
|
||||||
|
- **GPT-Oss:20b**: Primary target model with reasoning capabilities
|
||||||
|
- **Other reasoning models**: Any Ollama model that supports the `reasoning` parameter
|
||||||
|
|
||||||
|
### OpenAI Models
|
||||||
|
- **o3, o3-mini, o4-mini, o1**: o-series reasoning models with `reasoning_effort` parameter
|
||||||
|
- **GPT-OSS models**: Models that support reasoning capabilities
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
The implementation includes comprehensive testing:
|
||||||
|
- Parameter handling verification
|
||||||
|
- Backend-specific API format validation
|
||||||
|
- CLI argument parsing tests
|
||||||
|
- Integration with existing LEANN architecture
|
||||||
128
docs/ast_chunking_guide.md
Normal file
128
docs/ast_chunking_guide.md
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
# AST-Aware Code chunking guide
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This guide covers best practices for using AST-aware code chunking in LEANN. AST chunking provides better semantic understanding of code structure compared to traditional text-based chunking.
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
### Basic Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Enable AST chunking for mixed content (code + docs)
|
||||||
|
python -m apps.document_rag --enable-code-chunking --data-dir ./my_project
|
||||||
|
|
||||||
|
# Specialized code repository indexing
|
||||||
|
python -m apps.code_rag --repo-dir ./my_codebase
|
||||||
|
|
||||||
|
# Global CLI with AST support
|
||||||
|
leann build my-code-index --docs ./src --use-ast-chunking
|
||||||
|
```
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install LEANN with AST chunking support
|
||||||
|
uv pip install -e "."
|
||||||
|
```
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
### When to Use AST Chunking
|
||||||
|
|
||||||
|
✅ **Recommended for:**
|
||||||
|
- Code repositories with multiple languages
|
||||||
|
- Mixed documentation and code content
|
||||||
|
- Complex codebases with deep function/class hierarchies
|
||||||
|
- When working with Claude Code for code assistance
|
||||||
|
|
||||||
|
❌ **Not recommended for:**
|
||||||
|
- Pure text documents
|
||||||
|
- Very large files (>1MB)
|
||||||
|
- Languages not supported by tree-sitter
|
||||||
|
|
||||||
|
### Optimal Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Recommended settings for most codebases
|
||||||
|
python -m apps.code_rag \
|
||||||
|
--repo-dir ./src \
|
||||||
|
--ast-chunk-size 768 \
|
||||||
|
--ast-chunk-overlap 96 \
|
||||||
|
--exclude-dirs .git __pycache__ node_modules build dist
|
||||||
|
```
|
||||||
|
|
||||||
|
### Supported Languages
|
||||||
|
|
||||||
|
| Extension | Language | Status |
|
||||||
|
|-----------|----------|--------|
|
||||||
|
| `.py` | Python | ✅ Full support |
|
||||||
|
| `.java` | Java | ✅ Full support |
|
||||||
|
| `.cs` | C# | ✅ Full support |
|
||||||
|
| `.ts`, `.tsx` | TypeScript | ✅ Full support |
|
||||||
|
| `.js`, `.jsx` | JavaScript | ✅ Via TypeScript parser |
|
||||||
|
|
||||||
|
## Integration Examples
|
||||||
|
|
||||||
|
### Document RAG with Code Support
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Enable code chunking in document RAG
|
||||||
|
python -m apps.document_rag \
|
||||||
|
--enable-code-chunking \
|
||||||
|
--data-dir ./project \
|
||||||
|
--query "How does authentication work in the codebase?"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Claude Code Integration
|
||||||
|
|
||||||
|
When using with Claude Code MCP server, AST chunking provides better context for:
|
||||||
|
- Code completion and suggestions
|
||||||
|
- Bug analysis and debugging
|
||||||
|
- Architecture understanding
|
||||||
|
- Refactoring assistance
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
1. **Fallback to Traditional Chunking**
|
||||||
|
- Normal behavior for unsupported languages
|
||||||
|
- Check logs for specific language support
|
||||||
|
|
||||||
|
2. **Performance with Large Files**
|
||||||
|
- Adjust `--max-file-size` parameter
|
||||||
|
- Use `--exclude-dirs` to skip unnecessary directories
|
||||||
|
|
||||||
|
3. **Quality Issues**
|
||||||
|
- Try different `--ast-chunk-size` values (512, 768, 1024)
|
||||||
|
- Adjust overlap for better context preservation
|
||||||
|
|
||||||
|
### Debug Mode
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export LEANN_LOG_LEVEL=DEBUG
|
||||||
|
python -m apps.code_rag --repo-dir ./my_code
|
||||||
|
```
|
||||||
|
|
||||||
|
## Migration from Traditional Chunking
|
||||||
|
|
||||||
|
Existing workflows continue to work without changes. To enable AST chunking:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Before
|
||||||
|
python -m apps.document_rag --chunk-size 256
|
||||||
|
|
||||||
|
# After (maintains traditional chunking for non-code files)
|
||||||
|
python -m apps.document_rag --enable-code-chunking --chunk-size 256 --ast-chunk-size 768
|
||||||
|
```
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [astchunk GitHub Repository](https://github.com/yilinjz/astchunk)
|
||||||
|
- [LEANN MCP Integration](../packages/leann-mcp/README.md)
|
||||||
|
- [Research Paper](https://arxiv.org/html/2506.15655v1)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Note**: AST chunking maintains full backward compatibility while enhancing code understanding capabilities.
|
||||||
98
docs/code/embedding_model_compare.py
Normal file
98
docs/code/embedding_model_compare.py
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
"""
|
||||||
|
Comparison between Sentence Transformers and OpenAI embeddings
|
||||||
|
|
||||||
|
This example shows how different embedding models handle complex queries
|
||||||
|
and demonstrates the differences between local and API-based embeddings.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from leann.embedding_compute import compute_embeddings
|
||||||
|
|
||||||
|
# OpenAI API key should be set as environment variable
|
||||||
|
# export OPENAI_API_KEY="your-api-key-here"
|
||||||
|
|
||||||
|
# Test data
|
||||||
|
conference_text = "[Title]: COLING 2025 Conference\n[URL]: https://coling2025.org/"
|
||||||
|
browser_text = "[Title]: Browser Use Tool\n[URL]: https://github.com/browser-use"
|
||||||
|
|
||||||
|
# Two queries with same intent but different wording
|
||||||
|
query1 = "Tell me my browser history about some conference i often visit"
|
||||||
|
query2 = "browser history about conference I often visit"
|
||||||
|
|
||||||
|
texts = [query1, query2, conference_text, browser_text]
|
||||||
|
|
||||||
|
|
||||||
|
def cosine_similarity(a, b):
|
||||||
|
return np.dot(a, b) # Already normalized
|
||||||
|
|
||||||
|
|
||||||
|
def analyze_embeddings(embeddings, model_name):
|
||||||
|
print(f"\n=== {model_name} Results ===")
|
||||||
|
|
||||||
|
# Results for Query 1
|
||||||
|
sim1_conf = cosine_similarity(embeddings[0], embeddings[2])
|
||||||
|
sim1_browser = cosine_similarity(embeddings[0], embeddings[3])
|
||||||
|
|
||||||
|
print(f"Query 1: '{query1}'")
|
||||||
|
print(f" → Conference similarity: {sim1_conf:.4f} {'✓' if sim1_conf > sim1_browser else ''}")
|
||||||
|
print(
|
||||||
|
f" → Browser similarity: {sim1_browser:.4f} {'✓' if sim1_browser > sim1_conf else ''}"
|
||||||
|
)
|
||||||
|
print(f" Winner: {'Conference' if sim1_conf > sim1_browser else 'Browser'}")
|
||||||
|
|
||||||
|
# Results for Query 2
|
||||||
|
sim2_conf = cosine_similarity(embeddings[1], embeddings[2])
|
||||||
|
sim2_browser = cosine_similarity(embeddings[1], embeddings[3])
|
||||||
|
|
||||||
|
print(f"\nQuery 2: '{query2}'")
|
||||||
|
print(f" → Conference similarity: {sim2_conf:.4f} {'✓' if sim2_conf > sim2_browser else ''}")
|
||||||
|
print(
|
||||||
|
f" → Browser similarity: {sim2_browser:.4f} {'✓' if sim2_browser > sim2_conf else ''}"
|
||||||
|
)
|
||||||
|
print(f" Winner: {'Conference' if sim2_conf > sim2_browser else 'Browser'}")
|
||||||
|
|
||||||
|
# Show the impact
|
||||||
|
print("\n=== Impact Analysis ===")
|
||||||
|
print(f"Conference similarity change: {sim2_conf - sim1_conf:+.4f}")
|
||||||
|
print(f"Browser similarity change: {sim2_browser - sim1_browser:+.4f}")
|
||||||
|
|
||||||
|
if sim1_conf > sim1_browser and sim2_browser > sim2_conf:
|
||||||
|
print("❌ FLIP: Adding 'browser history' flips winner from Conference to Browser!")
|
||||||
|
elif sim1_conf > sim1_browser and sim2_conf > sim2_browser:
|
||||||
|
print("✅ STABLE: Conference remains winner in both queries")
|
||||||
|
elif sim1_browser > sim1_conf and sim2_browser > sim2_conf:
|
||||||
|
print("✅ STABLE: Browser remains winner in both queries")
|
||||||
|
else:
|
||||||
|
print("🔄 MIXED: Results vary between queries")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"query1_conf": sim1_conf,
|
||||||
|
"query1_browser": sim1_browser,
|
||||||
|
"query2_conf": sim2_conf,
|
||||||
|
"query2_browser": sim2_browser,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Test Sentence Transformers
|
||||||
|
print("Testing Sentence Transformers (facebook/contriever)...")
|
||||||
|
try:
|
||||||
|
st_embeddings = compute_embeddings(texts, "facebook/contriever", mode="sentence-transformers")
|
||||||
|
st_results = analyze_embeddings(st_embeddings, "Sentence Transformers (facebook/contriever)")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ Sentence Transformers failed: {e}")
|
||||||
|
st_results = None
|
||||||
|
|
||||||
|
# Test OpenAI
|
||||||
|
print("\n" + "=" * 60)
|
||||||
|
print("Testing OpenAI (text-embedding-3-small)...")
|
||||||
|
try:
|
||||||
|
openai_embeddings = compute_embeddings(texts, "text-embedding-3-small", mode="openai")
|
||||||
|
openai_results = analyze_embeddings(openai_embeddings, "OpenAI (text-embedding-3-small)")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ OpenAI failed: {e}")
|
||||||
|
openai_results = None
|
||||||
|
|
||||||
|
# Compare results
|
||||||
|
if st_results and openai_results:
|
||||||
|
print("\n" + "=" * 60)
|
||||||
|
print("=== COMPARISON SUMMARY ===")
|
||||||
384
docs/configuration-guide.md
Normal file
384
docs/configuration-guide.md
Normal file
@@ -0,0 +1,384 @@
|
|||||||
|
# LEANN Configuration Guide
|
||||||
|
|
||||||
|
This guide helps you optimize LEANN for different use cases and understand the trade-offs between various configuration options.
|
||||||
|
|
||||||
|
## Getting Started: Simple is Better
|
||||||
|
|
||||||
|
When first trying LEANN, start with a small dataset to quickly validate your approach:
|
||||||
|
|
||||||
|
**For document RAG**: The default `data/` directory works perfectly - includes 2 AI research papers, Pride and Prejudice literature, and a technical report
|
||||||
|
```bash
|
||||||
|
python -m apps.document_rag --query "What techniques does LEANN use?"
|
||||||
|
```
|
||||||
|
|
||||||
|
**For other data sources**: Limit the dataset size for quick testing
|
||||||
|
```bash
|
||||||
|
# WeChat: Test with recent messages only
|
||||||
|
python -m apps.wechat_rag --max-items 100 --query "What did we discuss about the project timeline?"
|
||||||
|
|
||||||
|
# Browser history: Last few days
|
||||||
|
python -m apps.browser_rag --max-items 500 --query "Find documentation about vector databases"
|
||||||
|
|
||||||
|
# Email: Recent inbox
|
||||||
|
python -m apps.email_rag --max-items 200 --query "Who sent updates about the deployment status?"
|
||||||
|
```
|
||||||
|
|
||||||
|
Once validated, scale up gradually:
|
||||||
|
- 100 documents → 1,000 → 10,000 → full dataset (`--max-items -1`)
|
||||||
|
- This helps identify issues early before committing to long processing times
|
||||||
|
|
||||||
|
## Embedding Model Selection: Understanding the Trade-offs
|
||||||
|
|
||||||
|
Based on our experience developing LEANN, embedding models fall into three categories:
|
||||||
|
|
||||||
|
### Small Models (< 100M parameters)
|
||||||
|
**Example**: `sentence-transformers/all-MiniLM-L6-v2` (22M params)
|
||||||
|
- **Pros**: Lightweight, fast for both indexing and inference
|
||||||
|
- **Cons**: Lower semantic understanding, may miss nuanced relationships
|
||||||
|
- **Use when**: Speed is critical, handling simple queries, interactive mode, or just experimenting with LEANN. If time is not a constraint, consider using a larger/better embedding model
|
||||||
|
|
||||||
|
### Medium Models (100M-500M parameters)
|
||||||
|
**Example**: `facebook/contriever` (110M params), `BAAI/bge-base-en-v1.5` (110M params)
|
||||||
|
- **Pros**: Balanced performance, good multilingual support, reasonable speed
|
||||||
|
- **Cons**: Requires more compute than small models
|
||||||
|
- **Use when**: Need quality results without extreme compute requirements, general-purpose RAG applications
|
||||||
|
|
||||||
|
### Large Models (500M+ parameters)
|
||||||
|
**Example**: `Qwen/Qwen3-Embedding-0.6B` (600M params), `intfloat/multilingual-e5-large` (560M params)
|
||||||
|
- **Pros**: Best semantic understanding, captures complex relationships, excellent multilingual support. **Qwen3-Embedding-0.6B achieves nearly OpenAI API performance!**
|
||||||
|
- **Cons**: Slower inference, longer index build times
|
||||||
|
- **Use when**: Quality is paramount and you have sufficient compute resources. **Highly recommended** for production use
|
||||||
|
|
||||||
|
### Quick Start: Cloud and Local Embedding Options
|
||||||
|
|
||||||
|
**OpenAI Embeddings (Fastest Setup)**
|
||||||
|
For immediate testing without local model downloads(also if you [do not have GPU](https://github.com/yichuan-w/LEANN/issues/43) and do not care that much about your document leak, you should use this, we compute the embedding and recompute using openai API):
|
||||||
|
```bash
|
||||||
|
# Set OpenAI embeddings (requires OPENAI_API_KEY)
|
||||||
|
--embedding-mode openai --embedding-model text-embedding-3-small
|
||||||
|
```
|
||||||
|
|
||||||
|
**Ollama Embeddings (Privacy-Focused)**
|
||||||
|
For local embeddings with complete privacy:
|
||||||
|
```bash
|
||||||
|
# First, pull an embedding model
|
||||||
|
ollama pull nomic-embed-text
|
||||||
|
|
||||||
|
# Use Ollama embeddings
|
||||||
|
--embedding-mode ollama --embedding-model nomic-embed-text
|
||||||
|
```
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>Cloud vs Local Trade-offs</strong></summary>
|
||||||
|
|
||||||
|
**OpenAI Embeddings** (`text-embedding-3-small/large`)
|
||||||
|
- **Pros**: No local compute needed, consistently fast, high quality
|
||||||
|
- **Cons**: Requires API key, costs money, data leaves your system, [known limitations with certain languages](https://yichuan-w.github.io/blog/lessons_learned_in_dev_leann/)
|
||||||
|
- **When to use**: Prototyping, non-sensitive data, need immediate results
|
||||||
|
|
||||||
|
**Local Embeddings**
|
||||||
|
- **Pros**: Complete privacy, no ongoing costs, full control, can sometimes outperform OpenAI embeddings
|
||||||
|
- **Cons**: Slower than cloud APIs, requires local compute resources
|
||||||
|
- **When to use**: Production systems, sensitive data, cost-sensitive applications
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## Index Selection: Matching Your Scale
|
||||||
|
|
||||||
|
### HNSW (Hierarchical Navigable Small World)
|
||||||
|
**Best for**: Small to medium datasets (< 10M vectors) - **Default and recommended for extreme low storage**
|
||||||
|
- Full recomputation required
|
||||||
|
- High memory usage during build phase
|
||||||
|
- Excellent recall (95%+)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Optimal for most use cases
|
||||||
|
--backend-name hnsw --graph-degree 32 --build-complexity 64
|
||||||
|
```
|
||||||
|
|
||||||
|
### DiskANN
|
||||||
|
**Best for**: Large datasets, especially when you want `recompute=True`.
|
||||||
|
|
||||||
|
**Key advantages:**
|
||||||
|
- **Faster search** on large datasets (3x+ speedup vs HNSW in many cases)
|
||||||
|
- **Smart storage**: `recompute=True` enables automatic graph partitioning for smaller indexes
|
||||||
|
- **Better scaling**: Designed for 100k+ documents
|
||||||
|
|
||||||
|
**Recompute behavior:**
|
||||||
|
- `recompute=True` (recommended): Pure PQ traversal + final reranking - faster and enables partitioning
|
||||||
|
- `recompute=False`: PQ + partial real distances during traversal - slower but higher accuracy
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Recommended for most use cases
|
||||||
|
--backend-name diskann --graph-degree 32 --build-complexity 64
|
||||||
|
```
|
||||||
|
|
||||||
|
**Performance Benchmark**: Run `uv run benchmarks/diskann_vs_hnsw_speed_comparison.py` to compare DiskANN and HNSW on your system.
|
||||||
|
|
||||||
|
## LLM Selection: Engine and Model Comparison
|
||||||
|
|
||||||
|
### LLM Engines
|
||||||
|
|
||||||
|
**OpenAI** (`--llm openai`)
|
||||||
|
- **Pros**: Best quality, consistent performance, no local resources needed
|
||||||
|
- **Cons**: Costs money ($0.15-2.5 per million tokens), requires internet, data privacy concerns
|
||||||
|
- **Models**: `gpt-4o-mini` (fast, cheap), `gpt-4o` (best quality), `o3` (reasoning), `o3-mini` (reasoning, cheaper)
|
||||||
|
- **Thinking Budget**: Use `--thinking-budget low/medium/high` for o-series reasoning models (o3, o3-mini, o4-mini)
|
||||||
|
- **Note**: Our current default, but we recommend switching to Ollama for most use cases
|
||||||
|
|
||||||
|
**Ollama** (`--llm ollama`)
|
||||||
|
- **Pros**: Fully local, free, privacy-preserving, good model variety
|
||||||
|
- **Cons**: Requires local GPU/CPU resources, slower than cloud APIs, need to install extra [ollama app](https://github.com/ollama/ollama?tab=readme-ov-file#ollama) and pre-download models by `ollama pull`
|
||||||
|
- **Models**: `qwen3:0.6b` (ultra-fast), `qwen3:1.7b` (balanced), `qwen3:4b` (good quality), `qwen3:7b` (high quality), `deepseek-r1:1.5b` (reasoning)
|
||||||
|
- **Thinking Budget**: Use `--thinking-budget low/medium/high` for reasoning models like GPT-Oss:20b
|
||||||
|
|
||||||
|
**HuggingFace** (`--llm hf`)
|
||||||
|
- **Pros**: Free tier available, huge model selection, direct model loading (vs Ollama's server-based approach)
|
||||||
|
- **Cons**: More complex initial setup
|
||||||
|
- **Models**: `Qwen/Qwen3-1.7B-FP8`
|
||||||
|
|
||||||
|
## Parameter Tuning Guide
|
||||||
|
|
||||||
|
### Search Complexity Parameters
|
||||||
|
|
||||||
|
**`--build-complexity`** (index building)
|
||||||
|
- Controls thoroughness during index construction
|
||||||
|
- Higher = better recall but slower build
|
||||||
|
- Recommendations:
|
||||||
|
- 32: Quick prototyping
|
||||||
|
- 64: Balanced (default)
|
||||||
|
- 128: Production systems
|
||||||
|
- 256: Maximum quality
|
||||||
|
|
||||||
|
**`--search-complexity`** (query time)
|
||||||
|
- Controls search thoroughness
|
||||||
|
- Higher = better results but slower
|
||||||
|
- Recommendations:
|
||||||
|
- 16: Fast/Interactive search
|
||||||
|
- 32: High quality with diversity
|
||||||
|
- 64+: Maximum accuracy
|
||||||
|
|
||||||
|
### Top-K Selection
|
||||||
|
|
||||||
|
**`--top-k`** (number of retrieved chunks)
|
||||||
|
- More chunks = better context but slower LLM processing
|
||||||
|
- Should be always smaller than `--search-complexity`
|
||||||
|
- Guidelines:
|
||||||
|
- 10-20: General questions (default: 20)
|
||||||
|
- 30+: Complex multi-hop reasoning requiring comprehensive context
|
||||||
|
|
||||||
|
**Trade-off formula**:
|
||||||
|
- Retrieval time ∝ log(n) × search_complexity
|
||||||
|
- LLM processing time ∝ top_k × chunk_size
|
||||||
|
- Total context = top_k × chunk_size tokens
|
||||||
|
|
||||||
|
### Thinking Budget for Reasoning Models
|
||||||
|
|
||||||
|
**`--thinking-budget`** (reasoning effort level)
|
||||||
|
- Controls the computational effort for reasoning models
|
||||||
|
- Options: `low`, `medium`, `high`
|
||||||
|
- Guidelines:
|
||||||
|
- `low`: Fast responses, basic reasoning (default for simple queries)
|
||||||
|
- `medium`: Balanced speed and reasoning depth
|
||||||
|
- `high`: Maximum reasoning effort, best for complex analytical questions
|
||||||
|
- **Supported Models**:
|
||||||
|
- **Ollama**: `gpt-oss:20b`, `gpt-oss:120b`
|
||||||
|
- **OpenAI**: `o3`, `o3-mini`, `o4-mini`, `o1` (o-series reasoning models)
|
||||||
|
- **Note**: Models without reasoning support will show a warning and proceed without reasoning parameters
|
||||||
|
- **Example**: `--thinking-budget high` for complex analytical questions
|
||||||
|
|
||||||
|
**📖 For detailed usage examples and implementation details, check out [Thinking Budget Documentation](THINKING_BUDGET_FEATURE.md)**
|
||||||
|
|
||||||
|
**💡 Quick Examples:**
|
||||||
|
```bash
|
||||||
|
# OpenAI o-series reasoning model
|
||||||
|
python apps/document_rag.py --query "What are the main techniques LEANN explores?" \
|
||||||
|
--index-dir hnswbuild --backend hnsw \
|
||||||
|
--llm openai --llm-model o3 --thinking-budget medium
|
||||||
|
|
||||||
|
# Ollama reasoning model
|
||||||
|
python apps/document_rag.py --query "What are the main techniques LEANN explores?" \
|
||||||
|
--index-dir hnswbuild --backend hnsw \
|
||||||
|
--llm ollama --llm-model gpt-oss:20b --thinking-budget high
|
||||||
|
```
|
||||||
|
|
||||||
|
### Graph Degree (HNSW/DiskANN)
|
||||||
|
|
||||||
|
**`--graph-degree`**
|
||||||
|
- Number of connections per node in the graph
|
||||||
|
- Higher = better recall but more memory
|
||||||
|
- HNSW: 16-32 (default: 32)
|
||||||
|
- DiskANN: 32-128 (default: 64)
|
||||||
|
|
||||||
|
|
||||||
|
## Performance Optimization Checklist
|
||||||
|
|
||||||
|
### If Embedding is Too Slow
|
||||||
|
|
||||||
|
1. **Switch to smaller model**:
|
||||||
|
```bash
|
||||||
|
# From large model
|
||||||
|
--embedding-model Qwen/Qwen3-Embedding-0.6B
|
||||||
|
# To small model
|
||||||
|
--embedding-model sentence-transformers/all-MiniLM-L6-v2
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Limit dataset size for testing**:
|
||||||
|
```bash
|
||||||
|
--max-items 1000 # Process first 1k items only
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Use MLX on Apple Silicon** (optional optimization):
|
||||||
|
```bash
|
||||||
|
--embedding-mode mlx --embedding-model mlx-community/Qwen3-Embedding-0.6B-8bit
|
||||||
|
```
|
||||||
|
MLX might not be the best choice, as we tested and found that it only offers 1.3x acceleration compared to HF, so maybe using ollama is a better choice for embedding generation
|
||||||
|
|
||||||
|
4. **Use Ollama**
|
||||||
|
```bash
|
||||||
|
--embedding-mode ollama --embedding-model nomic-embed-text
|
||||||
|
```
|
||||||
|
To discover additional embedding models in ollama, check out https://ollama.com/search?c=embedding or read more about embedding models at https://ollama.com/blog/embedding-models, please do check the model size that works best for you
|
||||||
|
### If Search Quality is Poor
|
||||||
|
|
||||||
|
1. **Increase retrieval count**:
|
||||||
|
```bash
|
||||||
|
--top-k 30 # Retrieve more candidates
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Upgrade embedding model**:
|
||||||
|
```bash
|
||||||
|
# For English
|
||||||
|
--embedding-model BAAI/bge-base-en-v1.5
|
||||||
|
# For multilingual
|
||||||
|
--embedding-model intfloat/multilingual-e5-large
|
||||||
|
```
|
||||||
|
|
||||||
|
## Understanding the Trade-offs
|
||||||
|
|
||||||
|
Every configuration choice involves trade-offs:
|
||||||
|
|
||||||
|
| Factor | Small/Fast | Large/Quality |
|
||||||
|
|--------|------------|---------------|
|
||||||
|
| Embedding Model | `all-MiniLM-L6-v2` | `Qwen/Qwen3-Embedding-0.6B` |
|
||||||
|
| Chunk Size | 512 tokens | 128 tokens |
|
||||||
|
| Index Type | HNSW | DiskANN |
|
||||||
|
| LLM | `qwen3:1.7b` | `gpt-4o` |
|
||||||
|
|
||||||
|
The key is finding the right balance for your specific use case. Start small and simple, measure performance, then scale up only where needed.
|
||||||
|
|
||||||
|
## Low-resource setups
|
||||||
|
|
||||||
|
If you don’t have a local GPU or builds/searches are too slow, use one or more of the options below.
|
||||||
|
|
||||||
|
### 1) Use OpenAI embeddings (no local compute)
|
||||||
|
|
||||||
|
Fastest path with zero local GPU requirements. Set your API key and use OpenAI embeddings during build and search:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export OPENAI_API_KEY=sk-...
|
||||||
|
|
||||||
|
# Build with OpenAI embeddings
|
||||||
|
leann build my-index \
|
||||||
|
--embedding-mode openai \
|
||||||
|
--embedding-model text-embedding-3-small
|
||||||
|
|
||||||
|
# Search with OpenAI embeddings (recompute at query time)
|
||||||
|
leann search my-index "your query" \
|
||||||
|
--recompute
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2) Run remote builds with SkyPilot (cloud GPU)
|
||||||
|
|
||||||
|
Offload embedding generation and index building to a GPU VM using [SkyPilot](https://skypilot.readthedocs.io/en/latest/). A template is provided at `sky/leann-build.yaml`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# One-time: install and configure SkyPilot
|
||||||
|
pip install skypilot
|
||||||
|
|
||||||
|
# Launch with defaults (L4:1) and mount ./data to ~/leann-data; the build runs automatically
|
||||||
|
sky launch -c leann-gpu sky/leann-build.yaml
|
||||||
|
|
||||||
|
# Override parameters via -e key=value (optional)
|
||||||
|
sky launch -c leann-gpu sky/leann-build.yaml \
|
||||||
|
-e index_name=my-index \
|
||||||
|
-e backend=hnsw \
|
||||||
|
-e embedding_mode=sentence-transformers \
|
||||||
|
-e embedding_model=Qwen/Qwen3-Embedding-0.6B
|
||||||
|
|
||||||
|
# Copy the built index back to your local .leann (use rsync)
|
||||||
|
rsync -Pavz leann-gpu:~/.leann/indexes/my-index ./.leann/indexes/
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3) Disable recomputation to trade storage for speed
|
||||||
|
|
||||||
|
If you need lower latency and have more storage/memory, disable recomputation. This stores full embeddings and avoids recomputing at search time.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build without recomputation (HNSW requires non-compact in this mode)
|
||||||
|
leann build my-index --no-recompute --no-compact
|
||||||
|
|
||||||
|
# Search without recomputation
|
||||||
|
leann search my-index "your query" --no-recompute
|
||||||
|
```
|
||||||
|
|
||||||
|
When to use:
|
||||||
|
- Extreme low latency requirements (high QPS, interactive assistants)
|
||||||
|
- Read-heavy workloads where storage is cheaper than latency
|
||||||
|
- No always-available GPU
|
||||||
|
|
||||||
|
Constraints:
|
||||||
|
- HNSW: when `--no-recompute` is set, LEANN automatically disables compact mode during build
|
||||||
|
- DiskANN: supported; `--no-recompute` skips selective recompute during search
|
||||||
|
|
||||||
|
Storage impact:
|
||||||
|
- Storing N embeddings of dimension D with float32 requires approximately N × D × 4 bytes
|
||||||
|
- Example: 1,000,000 chunks × 768 dims × 4 bytes ≈ 2.86 GB (plus graph/metadata)
|
||||||
|
|
||||||
|
Converting an existing index (rebuild required):
|
||||||
|
```bash
|
||||||
|
# Rebuild in-place (ensure you still have original docs or can regenerate chunks)
|
||||||
|
leann build my-index --force --no-recompute --no-compact
|
||||||
|
```
|
||||||
|
|
||||||
|
Python API usage:
|
||||||
|
```python
|
||||||
|
from leann import LeannSearcher
|
||||||
|
|
||||||
|
searcher = LeannSearcher("/path/to/my-index.leann")
|
||||||
|
results = searcher.search("your query", top_k=10, recompute_embeddings=False)
|
||||||
|
```
|
||||||
|
|
||||||
|
Trade-offs:
|
||||||
|
- Lower latency and fewer network hops at query time
|
||||||
|
- Significantly higher storage (10–100× vs selective recomputation)
|
||||||
|
- Slightly larger memory footprint during build and search
|
||||||
|
|
||||||
|
Quick benchmark results (`benchmarks/benchmark_no_recompute.py` with 5k texts, complexity=32):
|
||||||
|
|
||||||
|
- HNSW
|
||||||
|
|
||||||
|
```text
|
||||||
|
recompute=True: search_time=0.818s, size=1.1MB
|
||||||
|
recompute=False: search_time=0.012s, size=16.6MB
|
||||||
|
```
|
||||||
|
|
||||||
|
- DiskANN
|
||||||
|
|
||||||
|
```text
|
||||||
|
recompute=True: search_time=0.041s, size=5.9MB
|
||||||
|
recompute=False: search_time=0.013s, size=24.6MB
|
||||||
|
```
|
||||||
|
|
||||||
|
Conclusion:
|
||||||
|
- **HNSW**: `no-recompute` is significantly faster (no embedding recomputation) but requires much more storage (stores all embeddings)
|
||||||
|
- **DiskANN**: `no-recompute` uses PQ + partial real distances during traversal (slower but higher accuracy), while `recompute=True` uses pure PQ traversal + final reranking (faster traversal, enables build-time partitioning for smaller storage)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Further Reading
|
||||||
|
|
||||||
|
- [Lessons Learned Developing LEANN](https://yichuan-w.github.io/blog/lessons_learned_in_dev_leann/)
|
||||||
|
- [LEANN Technical Paper](https://arxiv.org/abs/2506.08276)
|
||||||
|
- [DiskANN Original Paper](https://papers.nips.cc/paper/2019/file/09853c7fb1d3f8ee67a61b6bf4a7f8e6-Paper.pdf)
|
||||||
|
- [SSD-based Graph Partitioning](https://github.com/SonglinLife/SSD_BASED_PLAN)
|
||||||
10
docs/faq.md
Normal file
10
docs/faq.md
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
# FAQ
|
||||||
|
|
||||||
|
## 1. My building time seems long
|
||||||
|
|
||||||
|
You can speed up the process by using a lightweight embedding model. Add this to your arguments:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
--embedding-model sentence-transformers/all-MiniLM-L6-v2
|
||||||
|
```
|
||||||
|
**Model sizes:** `all-MiniLM-L6-v2` (30M parameters), `facebook/contriever` (~100M parameters), `Qwen3-0.6B` (600M parameters)
|
||||||
23
docs/features.md
Normal file
23
docs/features.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# ✨ Detailed Features
|
||||||
|
|
||||||
|
## 🔥 Core Features
|
||||||
|
|
||||||
|
- **🔄 Real-time Embeddings** - Eliminate heavy embedding storage with dynamic computation using optimized ZMQ servers and highly optimized search paradigm (overlapping and batching) with highly optimized embedding engine
|
||||||
|
- **🧠 AST-Aware Code Chunking** - Intelligent code chunking that preserves semantic boundaries (functions, classes, methods) for Python, Java, C#, and TypeScript files
|
||||||
|
- **📈 Scalable Architecture** - Handles millions of documents on consumer hardware; the larger your dataset, the more LEANN can save
|
||||||
|
- **🎯 Graph Pruning** - Advanced techniques to minimize the storage overhead of vector search to a limited footprint
|
||||||
|
- **🏗️ Pluggable Backends** - HNSW/FAISS (default), with optional DiskANN for large-scale deployments
|
||||||
|
|
||||||
|
## 🛠️ Technical Highlights
|
||||||
|
- **🔄 Recompute Mode** - Highest accuracy scenarios while eliminating vector storage overhead
|
||||||
|
- **⚡ Zero-copy Operations** - Minimize IPC overhead by transferring distances instead of embeddings
|
||||||
|
- **🚀 High-throughput Embedding Pipeline** - Optimized batched processing for maximum efficiency
|
||||||
|
- **🎯 Two-level Search** - Novel coarse-to-fine search overlap for accelerated query processing (optional)
|
||||||
|
- **💾 Memory-mapped Indices** - Fast startup with raw text mapping to reduce memory overhead
|
||||||
|
- **🚀 MLX Support** - Ultra-fast recompute/build with quantized embedding models, accelerating building and search ([minimal example](../examples/mlx_demo.py))
|
||||||
|
|
||||||
|
## 🎨 Developer Experience
|
||||||
|
|
||||||
|
- **Simple Python API** - Get started in minutes
|
||||||
|
- **Extensible backend system** - Easy to add new algorithms
|
||||||
|
- **Comprehensive examples** - From basic usage to production deployment
|
||||||
75
docs/normalized_embeddings.md
Normal file
75
docs/normalized_embeddings.md
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
# Normalized Embeddings Support in LEANN
|
||||||
|
|
||||||
|
LEANN now automatically detects normalized embedding models and sets the appropriate distance metric for optimal performance.
|
||||||
|
|
||||||
|
## What are Normalized Embeddings?
|
||||||
|
|
||||||
|
Normalized embeddings are vectors with L2 norm = 1 (unit vectors). These embeddings are optimized for cosine similarity rather than Maximum Inner Product Search (MIPS).
|
||||||
|
|
||||||
|
## Automatic Detection
|
||||||
|
|
||||||
|
When you create a `LeannBuilder` instance with a normalized embedding model, LEANN will:
|
||||||
|
|
||||||
|
1. **Automatically set `distance_metric="cosine"`** if not specified
|
||||||
|
2. **Show a warning** if you manually specify a different distance metric
|
||||||
|
3. **Provide optimal search performance** with the correct metric
|
||||||
|
|
||||||
|
## Supported Normalized Embedding Models
|
||||||
|
|
||||||
|
### OpenAI
|
||||||
|
All OpenAI text embedding models are normalized:
|
||||||
|
- `text-embedding-ada-002`
|
||||||
|
- `text-embedding-3-small`
|
||||||
|
- `text-embedding-3-large`
|
||||||
|
|
||||||
|
### Voyage AI
|
||||||
|
All Voyage AI embedding models are normalized:
|
||||||
|
- `voyage-2`
|
||||||
|
- `voyage-3`
|
||||||
|
- `voyage-large-2`
|
||||||
|
- `voyage-multilingual-2`
|
||||||
|
- `voyage-code-2`
|
||||||
|
|
||||||
|
### Cohere
|
||||||
|
All Cohere embedding models are normalized:
|
||||||
|
- `embed-english-v3.0`
|
||||||
|
- `embed-multilingual-v3.0`
|
||||||
|
- `embed-english-light-v3.0`
|
||||||
|
- `embed-multilingual-light-v3.0`
|
||||||
|
|
||||||
|
## Example Usage
|
||||||
|
|
||||||
|
```python
|
||||||
|
from leann.api import LeannBuilder
|
||||||
|
|
||||||
|
# Automatic detection - will use cosine distance
|
||||||
|
builder = LeannBuilder(
|
||||||
|
backend_name="hnsw",
|
||||||
|
embedding_model="text-embedding-3-small",
|
||||||
|
embedding_mode="openai"
|
||||||
|
)
|
||||||
|
# Warning: Detected normalized embeddings model 'text-embedding-3-small'...
|
||||||
|
# Automatically setting distance_metric='cosine'
|
||||||
|
|
||||||
|
# Manual override (not recommended)
|
||||||
|
builder = LeannBuilder(
|
||||||
|
backend_name="hnsw",
|
||||||
|
embedding_model="text-embedding-3-small",
|
||||||
|
embedding_mode="openai",
|
||||||
|
distance_metric="mips" # Will show warning
|
||||||
|
)
|
||||||
|
# Warning: Using 'mips' distance metric with normalized embeddings...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Non-Normalized Embeddings
|
||||||
|
|
||||||
|
Models like `facebook/contriever` and other sentence-transformers models that are not normalized will continue to use MIPS by default, which is optimal for them.
|
||||||
|
|
||||||
|
## Why This Matters
|
||||||
|
|
||||||
|
Using the wrong distance metric with normalized embeddings can lead to:
|
||||||
|
- **Poor search quality** due to HNSW's early termination with narrow score ranges
|
||||||
|
- **Incorrect ranking** of search results
|
||||||
|
- **Suboptimal performance** compared to using the correct metric
|
||||||
|
|
||||||
|
For more details on why this happens, see our analysis in the [embedding detection code](../packages/leann-core/src/leann/api.py) which automatically handles normalized embeddings and MIPS distance metric issues.
|
||||||
21
docs/roadmap.md
Normal file
21
docs/roadmap.md
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# 📈 Roadmap
|
||||||
|
|
||||||
|
## 🎯 Q2 2025
|
||||||
|
|
||||||
|
- [X] HNSW backend integration
|
||||||
|
- [X] DiskANN backend with MIPS/L2/Cosine support
|
||||||
|
- [X] Real-time embedding pipeline
|
||||||
|
- [X] Memory-efficient graph pruning
|
||||||
|
|
||||||
|
## 🚀 Q3 2025
|
||||||
|
|
||||||
|
- [ ] Advanced caching strategies
|
||||||
|
- [ ] Add contextual-retrieval https://www.anthropic.com/news/contextual-retrieval
|
||||||
|
- [ ] Add sleep-time-compute and summarize agent! to summarilze the file on computer!
|
||||||
|
- [ ] Add OpenAI recompute API
|
||||||
|
|
||||||
|
## 🌟 Q4 2025
|
||||||
|
|
||||||
|
- [ ] Integration with LangChain/LlamaIndex
|
||||||
|
- [ ] Visual similarity search
|
||||||
|
- [ ] Query rewrtiting, rerank and expansion
|
||||||
@@ -1,16 +1,23 @@
|
|||||||
"""
|
"""
|
||||||
Simple demo showing basic leann usage
|
Simple demo showing basic leann usage
|
||||||
Run: uv run python examples/simple_demo.py
|
Run: uv run python examples/basic_demo.py
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
from leann import LeannBuilder, LeannSearcher, LeannChat
|
|
||||||
|
from leann import LeannBuilder, LeannChat, LeannSearcher
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(description="Simple demo of Leann with selectable embedding models.")
|
parser = argparse.ArgumentParser(
|
||||||
parser.add_argument("--embedding_model", type=str, default="sentence-transformers/all-mpnet-base-v2",
|
description="Simple demo of Leann with selectable embedding models."
|
||||||
help="The embedding model to use, e.g., 'sentence-transformers/all-mpnet-base-v2' or 'text-embedding-ada-002'.")
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--embedding_model",
|
||||||
|
type=str,
|
||||||
|
default="sentence-transformers/all-mpnet-base-v2",
|
||||||
|
help="The embedding model to use, e.g., 'sentence-transformers/all-mpnet-base-v2' or 'text-embedding-ada-002'.",
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
print(f"=== Leann Simple Demo with {args.embedding_model} ===")
|
print(f"=== Leann Simple Demo with {args.embedding_model} ===")
|
||||||
@@ -74,7 +81,7 @@ def main():
|
|||||||
print()
|
print()
|
||||||
|
|
||||||
print("Demo completed! Try running:")
|
print("Demo completed! Try running:")
|
||||||
print(" uv run python examples/document_search.py")
|
print(" uv run python apps/document_rag.py")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@@ -1,146 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
Document search demo with recompute mode
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
import shutil
|
|
||||||
import time
|
|
||||||
|
|
||||||
# Import backend packages to trigger plugin registration
|
|
||||||
try:
|
|
||||||
import leann_backend_diskann
|
|
||||||
import leann_backend_hnsw
|
|
||||||
print("INFO: Backend packages imported successfully.")
|
|
||||||
except ImportError as e:
|
|
||||||
print(f"WARNING: Could not import backend packages. Error: {e}")
|
|
||||||
|
|
||||||
# Import upper-level API from leann-core
|
|
||||||
from leann.api import LeannBuilder, LeannSearcher, LeannChat
|
|
||||||
|
|
||||||
|
|
||||||
def load_sample_documents():
|
|
||||||
"""Create sample documents for demonstration"""
|
|
||||||
docs = [
|
|
||||||
{"title": "Intro to Python", "content": "Python is a high-level, interpreted language known for simplicity."},
|
|
||||||
{"title": "ML Basics", "content": "Machine learning builds systems that learn from data."},
|
|
||||||
{"title": "Data Structures", "content": "Data structures like arrays, lists, and graphs organize data."},
|
|
||||||
]
|
|
||||||
return docs
|
|
||||||
|
|
||||||
def main():
|
|
||||||
print("==========================================================")
|
|
||||||
print("=== Leann Document Search Demo (DiskANN + Recompute) ===")
|
|
||||||
print("==========================================================")
|
|
||||||
|
|
||||||
INDEX_DIR = Path("./test_indices")
|
|
||||||
INDEX_PATH = str(INDEX_DIR / "documents.diskann")
|
|
||||||
BACKEND_TO_TEST = "diskann"
|
|
||||||
|
|
||||||
if INDEX_DIR.exists():
|
|
||||||
print(f"--- Cleaning up old index directory: {INDEX_DIR} ---")
|
|
||||||
shutil.rmtree(INDEX_DIR)
|
|
||||||
|
|
||||||
# --- 1. Build index ---
|
|
||||||
print(f"\n[PHASE 1] Building index using '{BACKEND_TO_TEST}' backend...")
|
|
||||||
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name=BACKEND_TO_TEST,
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64
|
|
||||||
)
|
|
||||||
|
|
||||||
documents = load_sample_documents()
|
|
||||||
print(f"Loaded {len(documents)} sample documents.")
|
|
||||||
for doc in documents:
|
|
||||||
builder.add_text(doc["content"], metadata={"title": doc["title"]})
|
|
||||||
|
|
||||||
builder.build_index(INDEX_PATH)
|
|
||||||
print(f"\nIndex built!")
|
|
||||||
|
|
||||||
# --- 2. Basic search demo ---
|
|
||||||
print(f"\n[PHASE 2] Basic search using '{BACKEND_TO_TEST}' backend...")
|
|
||||||
searcher = LeannSearcher(index_path=INDEX_PATH)
|
|
||||||
|
|
||||||
query = "What is machine learning?"
|
|
||||||
print(f"\nQuery: '{query}'")
|
|
||||||
|
|
||||||
print("\n--- Basic search mode (PQ computation) ---")
|
|
||||||
start_time = time.time()
|
|
||||||
results = searcher.search(query, top_k=2)
|
|
||||||
basic_time = time.time() - start_time
|
|
||||||
|
|
||||||
print(f"⏱️ Basic search time: {basic_time:.3f} seconds")
|
|
||||||
print(">>> Basic search results <<<")
|
|
||||||
for i, res in enumerate(results, 1):
|
|
||||||
print(f" {i}. ID: {res.id}, Score: {res.score:.4f}, Text: '{res.text}', Metadata: {res.metadata}")
|
|
||||||
|
|
||||||
# --- 3. Recompute search demo ---
|
|
||||||
print(f"\n[PHASE 3] Recompute search using embedding server...")
|
|
||||||
|
|
||||||
print("\n--- Recompute search mode (get real embeddings via network) ---")
|
|
||||||
|
|
||||||
# Configure recompute parameters
|
|
||||||
recompute_params = {
|
|
||||||
"recompute_beighbor_embeddings": True, # Enable network recomputation
|
|
||||||
"USE_DEFERRED_FETCH": False, # Don't use deferred fetch
|
|
||||||
"skip_search_reorder": True, # Skip search reordering
|
|
||||||
"dedup_node_dis": True, # Enable node distance deduplication
|
|
||||||
"prune_ratio": 0.1, # Pruning ratio 10%
|
|
||||||
"batch_recompute": False, # Don't use batch recomputation
|
|
||||||
"global_pruning": False, # Don't use global pruning
|
|
||||||
"zmq_port": 5555, # ZMQ port
|
|
||||||
"embedding_model": "sentence-transformers/all-mpnet-base-v2"
|
|
||||||
}
|
|
||||||
|
|
||||||
print("Recompute parameter configuration:")
|
|
||||||
for key, value in recompute_params.items():
|
|
||||||
print(f" {key}: {value}")
|
|
||||||
|
|
||||||
print(f"\n🔄 Executing Recompute search...")
|
|
||||||
try:
|
|
||||||
start_time = time.time()
|
|
||||||
recompute_results = searcher.search(query, top_k=2, **recompute_params)
|
|
||||||
recompute_time = time.time() - start_time
|
|
||||||
|
|
||||||
print(f"⏱️ Recompute search time: {recompute_time:.3f} seconds")
|
|
||||||
print(">>> Recompute search results <<<")
|
|
||||||
for i, res in enumerate(recompute_results, 1):
|
|
||||||
print(f" {i}. ID: {res.id}, Score: {res.score:.4f}, Text: '{res.text}', Metadata: {res.metadata}")
|
|
||||||
|
|
||||||
# Compare results
|
|
||||||
print(f"\n--- Result comparison ---")
|
|
||||||
print(f"Basic search time: {basic_time:.3f} seconds")
|
|
||||||
print(f"Recompute time: {recompute_time:.3f} seconds")
|
|
||||||
|
|
||||||
print("\nBasic search vs Recompute results:")
|
|
||||||
for i in range(min(len(results), len(recompute_results))):
|
|
||||||
basic_score = results[i].score
|
|
||||||
recompute_score = recompute_results[i].score
|
|
||||||
score_diff = abs(basic_score - recompute_score)
|
|
||||||
print(f" Position {i+1}: PQ={basic_score:.4f}, Recompute={recompute_score:.4f}, Difference={score_diff:.4f}")
|
|
||||||
|
|
||||||
if recompute_time > basic_time:
|
|
||||||
print(f"✅ Recompute mode working correctly (more accurate but slower)")
|
|
||||||
else:
|
|
||||||
print(f"ℹ️ Recompute time is unusually fast, network recomputation may not be enabled")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"❌ Recompute search failed: {e}")
|
|
||||||
print("This usually indicates an embedding server connection issue")
|
|
||||||
|
|
||||||
# --- 4. Chat demo ---
|
|
||||||
print(f"\n[PHASE 4] Starting chat session...")
|
|
||||||
chat = LeannChat(index_path=INDEX_PATH)
|
|
||||||
chat_response = chat.ask(query)
|
|
||||||
print(f"You: {query}")
|
|
||||||
print(f"Leann: {chat_response}")
|
|
||||||
|
|
||||||
print("\n==========================================================")
|
|
||||||
print("✅ Demo finished successfully!")
|
|
||||||
print("==========================================================")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,122 +0,0 @@
|
|||||||
import os
|
|
||||||
import email
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import List, Any
|
|
||||||
from llama_index.core import Document
|
|
||||||
from llama_index.core.readers.base import BaseReader
|
|
||||||
|
|
||||||
def find_all_messages_directories(root: str = None) -> List[Path]:
|
|
||||||
"""
|
|
||||||
Recursively find all 'Messages' directories under the given root.
|
|
||||||
Returns a list of Path objects.
|
|
||||||
"""
|
|
||||||
if root is None:
|
|
||||||
# Auto-detect user's mail path
|
|
||||||
home_dir = os.path.expanduser("~")
|
|
||||||
root = os.path.join(home_dir, "Library", "Mail")
|
|
||||||
|
|
||||||
messages_dirs = []
|
|
||||||
for dirpath, dirnames, filenames in os.walk(root):
|
|
||||||
if os.path.basename(dirpath) == "Messages":
|
|
||||||
messages_dirs.append(Path(dirpath))
|
|
||||||
return messages_dirs
|
|
||||||
|
|
||||||
class EmlxReader(BaseReader):
|
|
||||||
"""
|
|
||||||
Apple Mail .emlx file reader with embedded metadata.
|
|
||||||
|
|
||||||
Reads individual .emlx files from Apple Mail's storage format.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, include_html: bool = False) -> None:
|
|
||||||
"""
|
|
||||||
Initialize.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
include_html: Whether to include HTML content in the email body (default: False)
|
|
||||||
"""
|
|
||||||
self.include_html = include_html
|
|
||||||
|
|
||||||
def load_data(self, input_dir: str, **load_kwargs: Any) -> List[Document]:
|
|
||||||
"""
|
|
||||||
Load data from the input directory containing .emlx files.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
input_dir: Directory containing .emlx files
|
|
||||||
**load_kwargs:
|
|
||||||
max_count (int): Maximum amount of messages to read.
|
|
||||||
"""
|
|
||||||
docs: List[Document] = []
|
|
||||||
max_count = load_kwargs.get('max_count', 1000)
|
|
||||||
count = 0
|
|
||||||
|
|
||||||
# Walk through the directory recursively
|
|
||||||
for dirpath, dirnames, filenames in os.walk(input_dir):
|
|
||||||
# Skip hidden directories
|
|
||||||
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
|
|
||||||
|
|
||||||
for filename in filenames:
|
|
||||||
if count >= max_count:
|
|
||||||
break
|
|
||||||
|
|
||||||
if filename.endswith(".emlx"):
|
|
||||||
filepath = os.path.join(dirpath, filename)
|
|
||||||
try:
|
|
||||||
# Read the .emlx file
|
|
||||||
with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
|
|
||||||
content = f.read()
|
|
||||||
|
|
||||||
# .emlx files have a length prefix followed by the email content
|
|
||||||
# The first line contains the length, followed by the email
|
|
||||||
lines = content.split('\n', 1)
|
|
||||||
if len(lines) >= 2:
|
|
||||||
email_content = lines[1]
|
|
||||||
|
|
||||||
# Parse the email using Python's email module
|
|
||||||
try:
|
|
||||||
msg = email.message_from_string(email_content)
|
|
||||||
|
|
||||||
# Extract email metadata
|
|
||||||
subject = msg.get('Subject', 'No Subject')
|
|
||||||
from_addr = msg.get('From', 'Unknown')
|
|
||||||
to_addr = msg.get('To', 'Unknown')
|
|
||||||
date = msg.get('Date', 'Unknown')
|
|
||||||
|
|
||||||
# Extract email body
|
|
||||||
body = ""
|
|
||||||
if msg.is_multipart():
|
|
||||||
for part in msg.walk():
|
|
||||||
if part.get_content_type() == "text/plain" or part.get_content_type() == "text/html":
|
|
||||||
if part.get_content_type() == "text/html" and not self.include_html:
|
|
||||||
continue
|
|
||||||
body += part.get_payload(decode=True).decode('utf-8', errors='ignore')
|
|
||||||
# break
|
|
||||||
else:
|
|
||||||
body = msg.get_payload(decode=True).decode('utf-8', errors='ignore')
|
|
||||||
|
|
||||||
# Create document content with metadata embedded in text
|
|
||||||
doc_content = f"""
|
|
||||||
[File]: {filename}
|
|
||||||
[From]: {from_addr}
|
|
||||||
[To]: {to_addr}
|
|
||||||
[Subject]: {subject}
|
|
||||||
[Date]: {date}
|
|
||||||
[EMAIL BODY Start]:
|
|
||||||
{body}
|
|
||||||
"""
|
|
||||||
|
|
||||||
# No separate metadata - everything is in the text
|
|
||||||
doc = Document(text=doc_content, metadata={})
|
|
||||||
docs.append(doc)
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error parsing email from {filepath}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error reading file {filepath}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
print(f"Loaded {len(docs)} email documents")
|
|
||||||
return docs
|
|
||||||
@@ -1,285 +0,0 @@
|
|||||||
import os
|
|
||||||
import asyncio
|
|
||||||
import argparse
|
|
||||||
try:
|
|
||||||
import dotenv
|
|
||||||
dotenv.load_dotenv()
|
|
||||||
except ModuleNotFoundError:
|
|
||||||
# python-dotenv is not installed; skip loading environment variables
|
|
||||||
dotenv = None
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import List, Any
|
|
||||||
from leann.api import LeannBuilder, LeannSearcher, LeannChat
|
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
|
||||||
|
|
||||||
# dotenv.load_dotenv() # handled above if python-dotenv is available
|
|
||||||
|
|
||||||
# Default Chrome profile path
|
|
||||||
DEFAULT_CHROME_PROFILE = os.path.expanduser("~/Library/Application Support/Google/Chrome/Default")
|
|
||||||
|
|
||||||
def create_leann_index_from_multiple_chrome_profiles(profile_dirs: List[Path], index_path: str = "chrome_history_index.leann", max_count: int = -1):
|
|
||||||
"""
|
|
||||||
Create LEANN index from multiple Chrome profile data sources.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
profile_dirs: List of Path objects pointing to Chrome profile directories
|
|
||||||
index_path: Path to save the LEANN index
|
|
||||||
max_count: Maximum number of history entries to process per profile
|
|
||||||
"""
|
|
||||||
print("Creating LEANN index from multiple Chrome profile data sources...")
|
|
||||||
|
|
||||||
# Load documents using ChromeHistoryReader from history_data
|
|
||||||
from history_data.history import ChromeHistoryReader
|
|
||||||
reader = ChromeHistoryReader()
|
|
||||||
|
|
||||||
INDEX_DIR = Path(index_path).parent
|
|
||||||
|
|
||||||
if not INDEX_DIR.exists():
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
all_documents = []
|
|
||||||
total_processed = 0
|
|
||||||
|
|
||||||
# Process each Chrome profile directory
|
|
||||||
for i, profile_dir in enumerate(profile_dirs):
|
|
||||||
print(f"\nProcessing Chrome profile {i+1}/{len(profile_dirs)}: {profile_dir}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
documents = reader.load_data(
|
|
||||||
chrome_profile_path=str(profile_dir),
|
|
||||||
max_count=max_count
|
|
||||||
)
|
|
||||||
if documents:
|
|
||||||
print(f"Loaded {len(documents)} history documents from {profile_dir}")
|
|
||||||
all_documents.extend(documents)
|
|
||||||
total_processed += len(documents)
|
|
||||||
|
|
||||||
# Check if we've reached the max count
|
|
||||||
if max_count > 0 and total_processed >= max_count:
|
|
||||||
print(f"Reached max count of {max_count} documents")
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
print(f"No documents loaded from {profile_dir}")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing {profile_dir}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not all_documents:
|
|
||||||
print("No documents loaded from any source. Exiting.")
|
|
||||||
# highlight info that you need to close all chrome browser before running this script and high light the instruction!!
|
|
||||||
print("\033[91mYou need to close or quit all chrome browser before running this script\033[0m")
|
|
||||||
return None
|
|
||||||
|
|
||||||
print(f"\nTotal loaded {len(all_documents)} history documents from {len(profile_dirs)} profiles")
|
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=128)
|
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
|
||||||
all_texts = []
|
|
||||||
for doc in all_documents:
|
|
||||||
# Split the document into chunks
|
|
||||||
nodes = text_splitter.get_nodes_from_documents([doc])
|
|
||||||
for node in nodes:
|
|
||||||
text = node.get_content()
|
|
||||||
# text = '[Title] ' + doc.metadata["title"] + '\n' + text
|
|
||||||
all_texts.append(text)
|
|
||||||
|
|
||||||
print(f"Created {len(all_texts)} text chunks from {len(all_documents)} documents")
|
|
||||||
|
|
||||||
# Create LEANN index directory
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Use HNSW backend for better macOS compatibility
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model="facebook/contriever",
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64,
|
|
||||||
is_compact=True,
|
|
||||||
is_recompute=True,
|
|
||||||
num_threads=1 # Force single-threaded mode
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Adding {len(all_texts)} history chunks to index...")
|
|
||||||
for chunk_text in all_texts:
|
|
||||||
builder.add_text(chunk_text)
|
|
||||||
|
|
||||||
builder.build_index(index_path)
|
|
||||||
print(f"\nLEANN index built at {index_path}!")
|
|
||||||
else:
|
|
||||||
print(f"--- Using existing index at {INDEX_DIR} ---")
|
|
||||||
|
|
||||||
return index_path
|
|
||||||
|
|
||||||
def create_leann_index(profile_path: str = None, index_path: str = "chrome_history_index.leann", max_count: int = 1000):
|
|
||||||
"""
|
|
||||||
Create LEANN index from Chrome history data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
profile_path: Path to the Chrome profile directory (optional, uses default if None)
|
|
||||||
index_path: Path to save the LEANN index
|
|
||||||
max_count: Maximum number of history entries to process
|
|
||||||
"""
|
|
||||||
print("Creating LEANN index from Chrome history data...")
|
|
||||||
INDEX_DIR = Path(index_path).parent
|
|
||||||
|
|
||||||
if not INDEX_DIR.exists():
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Load documents using ChromeHistoryReader from history_data
|
|
||||||
from history_data.history import ChromeHistoryReader
|
|
||||||
reader = ChromeHistoryReader()
|
|
||||||
|
|
||||||
documents = reader.load_data(
|
|
||||||
chrome_profile_path=profile_path,
|
|
||||||
max_count=max_count
|
|
||||||
)
|
|
||||||
|
|
||||||
if not documents:
|
|
||||||
print("No documents loaded. Exiting.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
print(f"Loaded {len(documents)} history documents")
|
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=25)
|
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
|
||||||
all_texts = []
|
|
||||||
for doc in documents:
|
|
||||||
# Split the document into chunks
|
|
||||||
nodes = text_splitter.get_nodes_from_documents([doc])
|
|
||||||
for node in nodes:
|
|
||||||
all_texts.append(node.get_content())
|
|
||||||
|
|
||||||
print(f"Created {len(all_texts)} text chunks from {len(documents)} documents")
|
|
||||||
|
|
||||||
# Create LEANN index directory
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Use HNSW backend for better macOS compatibility
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model="facebook/contriever",
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64,
|
|
||||||
is_compact=True,
|
|
||||||
is_recompute=True,
|
|
||||||
num_threads=1 # Force single-threaded mode
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Adding {len(all_texts)} history chunks to index...")
|
|
||||||
for chunk_text in all_texts:
|
|
||||||
builder.add_text(chunk_text)
|
|
||||||
|
|
||||||
builder.build_index(index_path)
|
|
||||||
print(f"\nLEANN index built at {index_path}!")
|
|
||||||
else:
|
|
||||||
print(f"--- Using existing index at {INDEX_DIR} ---")
|
|
||||||
|
|
||||||
return index_path
|
|
||||||
|
|
||||||
async def query_leann_index(index_path: str, query: str):
|
|
||||||
"""
|
|
||||||
Query the LEANN index.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
index_path: Path to the LEANN index
|
|
||||||
query: The query string
|
|
||||||
"""
|
|
||||||
print(f"\n[PHASE 2] Starting Leann chat session...")
|
|
||||||
chat = LeannChat(index_path=index_path)
|
|
||||||
|
|
||||||
print(f"You: {query}")
|
|
||||||
chat_response = chat.ask(
|
|
||||||
query,
|
|
||||||
top_k=10,
|
|
||||||
recompute_beighbor_embeddings=True,
|
|
||||||
complexity=32,
|
|
||||||
beam_width=1,
|
|
||||||
llm_config={
|
|
||||||
"type": "openai",
|
|
||||||
"model": "gpt-4o",
|
|
||||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
|
||||||
},
|
|
||||||
llm_kwargs={
|
|
||||||
"temperature": 0.0,
|
|
||||||
"max_tokens": 1000
|
|
||||||
}
|
|
||||||
)
|
|
||||||
print(f"Leann: {chat_response}")
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
# Parse command line arguments
|
|
||||||
parser = argparse.ArgumentParser(description='LEANN Chrome History Reader - Create and query browser history index')
|
|
||||||
parser.add_argument('--chrome-profile', type=str, default=DEFAULT_CHROME_PROFILE,
|
|
||||||
help=f'Path to Chrome profile directory (default: {DEFAULT_CHROME_PROFILE}), usually you dont need to change this')
|
|
||||||
parser.add_argument('--index-dir', type=str, default="./all_google_new",
|
|
||||||
help='Directory to store the LEANN index (default: ./chrome_history_index_leann_test)')
|
|
||||||
parser.add_argument('--max-entries', type=int, default=1000,
|
|
||||||
help='Maximum number of history entries to process (default: 1000)')
|
|
||||||
parser.add_argument('--query', type=str, default=None,
|
|
||||||
help='Single query to run (default: runs example queries)')
|
|
||||||
parser.add_argument('--auto-find-profiles', action='store_true', default=True,
|
|
||||||
help='Automatically find all Chrome profiles (default: True)')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
INDEX_DIR = Path(args.index_dir)
|
|
||||||
INDEX_PATH = str(INDEX_DIR / "chrome_history.leann")
|
|
||||||
|
|
||||||
print(f"Using Chrome profile: {args.chrome_profile}")
|
|
||||||
print(f"Index directory: {INDEX_DIR}")
|
|
||||||
print(f"Max entries: {args.max_entries}")
|
|
||||||
|
|
||||||
# Find Chrome profile directories
|
|
||||||
from history_data.history import ChromeHistoryReader
|
|
||||||
|
|
||||||
if args.auto_find_profiles:
|
|
||||||
profile_dirs = ChromeHistoryReader.find_chrome_profiles()
|
|
||||||
if not profile_dirs:
|
|
||||||
print("No Chrome profiles found automatically. Exiting.")
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
# Use single specified profile
|
|
||||||
profile_path = Path(args.chrome_profile)
|
|
||||||
if not profile_path.exists():
|
|
||||||
print(f"Chrome profile not found: {profile_path}")
|
|
||||||
return
|
|
||||||
profile_dirs = [profile_path]
|
|
||||||
|
|
||||||
# Create or load the LEANN index from all sources
|
|
||||||
index_path = create_leann_index_from_multiple_chrome_profiles(profile_dirs, INDEX_PATH, args.max_entries)
|
|
||||||
|
|
||||||
if index_path:
|
|
||||||
if args.query:
|
|
||||||
# Run single query
|
|
||||||
await query_leann_index(index_path, args.query)
|
|
||||||
else:
|
|
||||||
# Example queries
|
|
||||||
queries = [
|
|
||||||
"What websites did I visit about machine learning?",
|
|
||||||
"Find my search history about programming"
|
|
||||||
]
|
|
||||||
|
|
||||||
for query in queries:
|
|
||||||
print("\n" + "="*60)
|
|
||||||
await query_leann_index(index_path, query)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
||||||
@@ -1,288 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
import asyncio
|
|
||||||
import dotenv
|
|
||||||
import argparse
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import List, Any
|
|
||||||
|
|
||||||
# Add the project root to Python path so we can import from examples
|
|
||||||
project_root = Path(__file__).parent.parent
|
|
||||||
sys.path.insert(0, str(project_root))
|
|
||||||
|
|
||||||
from leann.api import LeannBuilder, LeannSearcher, LeannChat
|
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
|
||||||
|
|
||||||
dotenv.load_dotenv()
|
|
||||||
|
|
||||||
# Auto-detect user's mail path
|
|
||||||
def get_mail_path():
|
|
||||||
"""Get the mail path for the current user"""
|
|
||||||
home_dir = os.path.expanduser("~")
|
|
||||||
return os.path.join(home_dir, "Library", "Mail")
|
|
||||||
|
|
||||||
# Default mail path for macOS
|
|
||||||
# DEFAULT_MAIL_PATH = "/Users/yichuan/Library/Mail/V10/0FCA0879-FD8C-4B7E-83BF-FDDA930791C5/[Gmail].mbox/All Mail.mbox/78BA5BE1-8819-4F9A-9613-EB63772F1DD0/Data"
|
|
||||||
|
|
||||||
def create_leann_index_from_multiple_sources(messages_dirs: List[Path], index_path: str = "mail_index.leann", max_count: int = -1, include_html: bool = False, embedding_model: str = "facebook/contriever"):
|
|
||||||
"""
|
|
||||||
Create LEANN index from multiple mail data sources.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
messages_dirs: List of Path objects pointing to Messages directories
|
|
||||||
index_path: Path to save the LEANN index
|
|
||||||
max_count: Maximum number of emails to process per directory
|
|
||||||
include_html: Whether to include HTML content in email processing
|
|
||||||
"""
|
|
||||||
print("Creating LEANN index from multiple mail data sources...")
|
|
||||||
|
|
||||||
# Load documents using EmlxReader from LEANN_email_reader
|
|
||||||
from examples.email_data.LEANN_email_reader import EmlxReader
|
|
||||||
reader = EmlxReader(include_html=include_html)
|
|
||||||
# from email_data.email import EmlxMboxReader
|
|
||||||
# from pathlib import Path
|
|
||||||
# reader = EmlxMboxReader()
|
|
||||||
INDEX_DIR = Path(index_path).parent
|
|
||||||
|
|
||||||
if not INDEX_DIR.exists():
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
all_documents = []
|
|
||||||
total_processed = 0
|
|
||||||
|
|
||||||
# Process each Messages directory
|
|
||||||
for i, messages_dir in enumerate(messages_dirs):
|
|
||||||
print(f"\nProcessing Messages directory {i+1}/{len(messages_dirs)}: {messages_dir}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
documents = reader.load_data(messages_dir)
|
|
||||||
if documents:
|
|
||||||
print(f"Loaded {len(documents)} email documents from {messages_dir}")
|
|
||||||
all_documents.extend(documents)
|
|
||||||
total_processed += len(documents)
|
|
||||||
|
|
||||||
# Check if we've reached the max count
|
|
||||||
if max_count > 0 and total_processed >= max_count:
|
|
||||||
print(f"Reached max count of {max_count} documents")
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
print(f"No documents loaded from {messages_dir}")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing {messages_dir}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not all_documents:
|
|
||||||
print("No documents loaded from any source. Exiting.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
print(f"\nTotal loaded {len(all_documents)} email documents from {len(messages_dirs)} directories and starting to split them into chunks")
|
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=128)
|
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
|
||||||
all_texts = []
|
|
||||||
for doc in all_documents:
|
|
||||||
# Split the document into chunks
|
|
||||||
nodes = text_splitter.get_nodes_from_documents([doc])
|
|
||||||
for node in nodes:
|
|
||||||
text = node.get_content()
|
|
||||||
# text = '[subject] ' + doc.metadata["subject"] + '\n' + text
|
|
||||||
all_texts.append(text)
|
|
||||||
|
|
||||||
print(f"Finished splitting {len(all_documents)} documents into {len(all_texts)} text chunks")
|
|
||||||
|
|
||||||
# Create LEANN index directory
|
|
||||||
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Use HNSW backend for better macOS compatibility
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model=embedding_model,
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64,
|
|
||||||
is_compact=True,
|
|
||||||
is_recompute=True,
|
|
||||||
num_threads=1 # Force single-threaded mode
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Adding {len(all_texts)} email chunks to index...")
|
|
||||||
for chunk_text in all_texts:
|
|
||||||
builder.add_text(chunk_text)
|
|
||||||
|
|
||||||
builder.build_index(index_path)
|
|
||||||
print(f"\nLEANN index built at {index_path}!")
|
|
||||||
else:
|
|
||||||
print(f"--- Using existing index at {INDEX_DIR} ---")
|
|
||||||
|
|
||||||
return index_path
|
|
||||||
|
|
||||||
def create_leann_index(mail_path: str, index_path: str = "mail_index.leann", max_count: int = 1000, include_html: bool = False, embedding_model: str = "facebook/contriever"):
|
|
||||||
"""
|
|
||||||
Create LEANN index from mail data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
mail_path: Path to the mail directory
|
|
||||||
index_path: Path to save the LEANN index
|
|
||||||
max_count: Maximum number of emails to process
|
|
||||||
include_html: Whether to include HTML content in email processing
|
|
||||||
"""
|
|
||||||
print("Creating LEANN index from mail data...")
|
|
||||||
INDEX_DIR = Path(index_path).parent
|
|
||||||
|
|
||||||
if not INDEX_DIR.exists():
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Load documents using EmlxReader from LEANN_email_reader
|
|
||||||
from examples.email_data.LEANN_email_reader import EmlxReader
|
|
||||||
reader = EmlxReader(include_html=include_html)
|
|
||||||
# from email_data.email import EmlxMboxReader
|
|
||||||
# from pathlib import Path
|
|
||||||
# reader = EmlxMboxReader()
|
|
||||||
documents = reader.load_data(Path(mail_path))
|
|
||||||
|
|
||||||
if not documents:
|
|
||||||
print("No documents loaded. Exiting.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
print(f"Loaded {len(documents)} email documents")
|
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=25)
|
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
|
||||||
all_texts = []
|
|
||||||
for doc in documents:
|
|
||||||
# Split the document into chunks
|
|
||||||
nodes = text_splitter.get_nodes_from_documents([doc])
|
|
||||||
for node in nodes:
|
|
||||||
all_texts.append(node.get_content())
|
|
||||||
|
|
||||||
print(f"Created {len(all_texts)} text chunks from {len(documents)} documents")
|
|
||||||
|
|
||||||
# Create LEANN index directory
|
|
||||||
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Use HNSW backend for better macOS compatibility
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model=embedding_model,
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64,
|
|
||||||
is_compact=True,
|
|
||||||
is_recompute=True,
|
|
||||||
num_threads=1 # Force single-threaded mode
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Adding {len(all_texts)} email chunks to index...")
|
|
||||||
for chunk_text in all_texts:
|
|
||||||
builder.add_text(chunk_text)
|
|
||||||
|
|
||||||
builder.build_index(index_path)
|
|
||||||
print(f"\nLEANN index built at {index_path}!")
|
|
||||||
else:
|
|
||||||
print(f"--- Using existing index at {INDEX_DIR} ---")
|
|
||||||
|
|
||||||
return index_path
|
|
||||||
|
|
||||||
async def query_leann_index(index_path: str, query: str):
|
|
||||||
"""
|
|
||||||
Query the LEANN index.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
index_path: Path to the LEANN index
|
|
||||||
query: The query string
|
|
||||||
"""
|
|
||||||
print(f"\n[PHASE 2] Starting Leann chat session...")
|
|
||||||
chat = LeannChat(index_path=index_path,
|
|
||||||
llm_config={"type": "openai", "model": "gpt-4o"})
|
|
||||||
|
|
||||||
print(f"You: {query}")
|
|
||||||
import time
|
|
||||||
start_time = time.time()
|
|
||||||
chat_response = chat.ask(
|
|
||||||
query,
|
|
||||||
top_k=10,
|
|
||||||
recompute_beighbor_embeddings=True,
|
|
||||||
complexity=12,
|
|
||||||
beam_width=1,
|
|
||||||
|
|
||||||
)
|
|
||||||
end_time = time.time()
|
|
||||||
print(f"Time taken: {end_time - start_time} seconds")
|
|
||||||
print(f"Leann: {chat_response}")
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
# Parse command line arguments
|
|
||||||
parser = argparse.ArgumentParser(description='LEANN Mail Reader - Create and query email index')
|
|
||||||
# Remove --mail-path argument and auto-detect all Messages directories
|
|
||||||
# Remove DEFAULT_MAIL_PATH
|
|
||||||
parser.add_argument('--index-dir', type=str, default="./mail_index_leann_debug",
|
|
||||||
help='Directory to store the LEANN index (default: ./mail_index_leann_raw_text_all_dicts)')
|
|
||||||
parser.add_argument('--max-emails', type=int, default=1000,
|
|
||||||
help='Maximum number of emails to process (-1 means all)')
|
|
||||||
parser.add_argument('--query', type=str, default="Give me some funny advertisement about apple or other companies",
|
|
||||||
help='Single query to run (default: runs example queries)')
|
|
||||||
parser.add_argument('--include-html', action='store_true', default=False,
|
|
||||||
help='Include HTML content in email processing (default: False)')
|
|
||||||
parser.add_argument('--embedding-model', type=str, default="facebook/contriever",
|
|
||||||
help='Embedding model to use (default: facebook/contriever)')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
print(f"args: {args}")
|
|
||||||
|
|
||||||
# Automatically find all Messages directories under the current user's Mail directory
|
|
||||||
from examples.email_data.LEANN_email_reader import find_all_messages_directories
|
|
||||||
mail_path = get_mail_path()
|
|
||||||
print(f"Searching for email data in: {mail_path}")
|
|
||||||
messages_dirs = find_all_messages_directories(mail_path)
|
|
||||||
|
|
||||||
print('len(messages_dirs): ', len(messages_dirs))
|
|
||||||
|
|
||||||
|
|
||||||
if not messages_dirs:
|
|
||||||
print("No Messages directories found. Exiting.")
|
|
||||||
return
|
|
||||||
|
|
||||||
INDEX_DIR = Path(args.index_dir)
|
|
||||||
INDEX_PATH = str(INDEX_DIR / "mail_documents.leann")
|
|
||||||
print(f"Index directory: {INDEX_DIR}")
|
|
||||||
print(f"Found {len(messages_dirs)} Messages directories.")
|
|
||||||
|
|
||||||
# Create or load the LEANN index from all sources
|
|
||||||
index_path = create_leann_index_from_multiple_sources(messages_dirs, INDEX_PATH, args.max_emails, args.include_html, args.embedding_model)
|
|
||||||
|
|
||||||
if index_path:
|
|
||||||
if args.query:
|
|
||||||
# Run single query
|
|
||||||
await query_leann_index(index_path, args.query)
|
|
||||||
else:
|
|
||||||
# Example queries
|
|
||||||
queries = [
|
|
||||||
"Hows Berkeley Graduate Student Instructor",
|
|
||||||
"how's the icloud related advertisement saying",
|
|
||||||
"Whats the number of class recommend to take per semester for incoming EECS students"
|
|
||||||
]
|
|
||||||
for query in queries:
|
|
||||||
print("\n" + "="*60)
|
|
||||||
await query_leann_index(index_path, query)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
||||||
@@ -1,108 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
import argparse
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import List, Any
|
|
||||||
|
|
||||||
# Add the project root to Python path so we can import from examples
|
|
||||||
project_root = Path(__file__).parent.parent
|
|
||||||
sys.path.insert(0, str(project_root))
|
|
||||||
|
|
||||||
from llama_index.core import VectorStoreIndex, StorageContext
|
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
|
||||||
|
|
||||||
# --- EMBEDDING MODEL ---
|
|
||||||
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
|
||||||
import torch
|
|
||||||
|
|
||||||
# --- END EMBEDDING MODEL ---
|
|
||||||
|
|
||||||
# Import EmlxReader from the new module
|
|
||||||
from examples.email_data.LEANN_email_reader import EmlxReader
|
|
||||||
|
|
||||||
def create_and_save_index(mail_path: str, save_dir: str = "mail_index_embedded", max_count: int = 1000, include_html: bool = False):
|
|
||||||
print("Creating index from mail data with embedded metadata...")
|
|
||||||
documents = EmlxReader(include_html=include_html).load_data(mail_path, max_count=max_count)
|
|
||||||
if not documents:
|
|
||||||
print("No documents loaded. Exiting.")
|
|
||||||
return None
|
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=25)
|
|
||||||
# Use facebook/contriever as the embedder
|
|
||||||
embed_model = HuggingFaceEmbedding(model_name="facebook/contriever")
|
|
||||||
# set on device
|
|
||||||
import torch
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
embed_model._model.to("cuda")
|
|
||||||
# set mps
|
|
||||||
elif torch.backends.mps.is_available():
|
|
||||||
embed_model._model.to("mps")
|
|
||||||
else:
|
|
||||||
embed_model._model.to("cpu")
|
|
||||||
index = VectorStoreIndex.from_documents(
|
|
||||||
documents,
|
|
||||||
transformations=[text_splitter],
|
|
||||||
embed_model=embed_model
|
|
||||||
)
|
|
||||||
os.makedirs(save_dir, exist_ok=True)
|
|
||||||
index.storage_context.persist(persist_dir=save_dir)
|
|
||||||
print(f"Index saved to {save_dir}")
|
|
||||||
return index
|
|
||||||
|
|
||||||
def load_index(save_dir: str = "mail_index_embedded"):
|
|
||||||
try:
|
|
||||||
storage_context = StorageContext.from_defaults(persist_dir=save_dir)
|
|
||||||
index = VectorStoreIndex.from_vector_store(
|
|
||||||
storage_context.vector_store,
|
|
||||||
storage_context=storage_context
|
|
||||||
)
|
|
||||||
print(f"Index loaded from {save_dir}")
|
|
||||||
return index
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error loading index: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
def query_index(index, query: str):
|
|
||||||
if index is None:
|
|
||||||
print("No index available for querying.")
|
|
||||||
return
|
|
||||||
query_engine = index.as_query_engine()
|
|
||||||
response = query_engine.query(query)
|
|
||||||
print(f"Query: {query}")
|
|
||||||
print(f"Response: {response}")
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# Parse command line arguments
|
|
||||||
parser = argparse.ArgumentParser(description='LlamaIndex Mail Reader - Create and query email index')
|
|
||||||
parser.add_argument('--mail-path', type=str,
|
|
||||||
default="/Users/yichuan/Library/Mail/V10/0FCA0879-FD8C-4B7E-83BF-FDDA930791C5/[Gmail].mbox/All Mail.mbox/78BA5BE1-8819-4F9A-9613-EB63772F1DD0/Data/9/Messages",
|
|
||||||
help='Path to mail data directory')
|
|
||||||
parser.add_argument('--save-dir', type=str, default="mail_index_embedded",
|
|
||||||
help='Directory to store the index (default: mail_index_embedded)')
|
|
||||||
parser.add_argument('--max-emails', type=int, default=10000,
|
|
||||||
help='Maximum number of emails to process')
|
|
||||||
parser.add_argument('--include-html', action='store_true', default=False,
|
|
||||||
help='Include HTML content in email processing (default: False)')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
mail_path = args.mail_path
|
|
||||||
save_dir = args.save_dir
|
|
||||||
|
|
||||||
if os.path.exists(save_dir) and os.path.exists(os.path.join(save_dir, "vector_store.json")):
|
|
||||||
print("Loading existing index...")
|
|
||||||
index = load_index(save_dir)
|
|
||||||
else:
|
|
||||||
print("Creating new index...")
|
|
||||||
index = create_and_save_index(mail_path, save_dir, max_count=args.max_emails, include_html=args.include_html)
|
|
||||||
if index:
|
|
||||||
queries = [
|
|
||||||
"Hows Berkeley Graduate Student Instructor",
|
|
||||||
"how's the icloud related advertisement saying",
|
|
||||||
"Whats the number of class recommend to take per semester for incoming EECS students"
|
|
||||||
]
|
|
||||||
for query in queries:
|
|
||||||
print("\n" + "="*50)
|
|
||||||
query_index(index, query)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,115 +0,0 @@
|
|||||||
import argparse
|
|
||||||
from llama_index.core import SimpleDirectoryReader
|
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
|
||||||
import asyncio
|
|
||||||
import dotenv
|
|
||||||
from leann.api import LeannBuilder, LeannChat
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
dotenv.load_dotenv()
|
|
||||||
|
|
||||||
|
|
||||||
async def main(args):
|
|
||||||
INDEX_DIR = Path(args.index_dir)
|
|
||||||
INDEX_PATH = str(INDEX_DIR / "pdf_documents.leann")
|
|
||||||
|
|
||||||
if not INDEX_DIR.exists():
|
|
||||||
node_parser = SentenceSplitter(
|
|
||||||
chunk_size=256, chunk_overlap=128, separator=" ", paragraph_separator="\n\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
print("Loading documents...")
|
|
||||||
documents = SimpleDirectoryReader(
|
|
||||||
args.data_dir,
|
|
||||||
recursive=True,
|
|
||||||
encoding="utf-8",
|
|
||||||
required_exts=[".pdf", ".txt", ".md"],
|
|
||||||
).load_data(show_progress=True)
|
|
||||||
print("Documents loaded.")
|
|
||||||
all_texts = []
|
|
||||||
for doc in documents:
|
|
||||||
nodes = node_parser.get_nodes_from_documents([doc])
|
|
||||||
for node in nodes:
|
|
||||||
all_texts.append(node.get_content())
|
|
||||||
|
|
||||||
print("--- Index directory not found, building new index ---")
|
|
||||||
|
|
||||||
print("\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Use HNSW backend for better macOS compatibility
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model="facebook/contriever",
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64,
|
|
||||||
is_compact=True,
|
|
||||||
is_recompute=True,
|
|
||||||
num_threads=1, # Force single-threaded mode
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Loaded {len(all_texts)} text chunks from documents.")
|
|
||||||
for chunk_text in all_texts:
|
|
||||||
builder.add_text(chunk_text)
|
|
||||||
|
|
||||||
builder.build_index(INDEX_PATH)
|
|
||||||
print(f"\nLeann index built at {INDEX_PATH}!")
|
|
||||||
else:
|
|
||||||
print(f"--- Using existing index at {INDEX_DIR} ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 2] Starting Leann chat session...")
|
|
||||||
|
|
||||||
llm_config = {"type": "hf", "model": "Qwen/Qwen3-4B"}
|
|
||||||
llm_config = {"type": "ollama", "model": "qwen3:8b"}
|
|
||||||
llm_config = {"type": "openai", "model": "gpt-4o"}
|
|
||||||
|
|
||||||
chat = LeannChat(index_path=INDEX_PATH, llm_config=llm_config)
|
|
||||||
|
|
||||||
query = "Based on the paper, what are the main techniques LEANN explores to reduce the storage overhead and DLPM explore to achieve Fairness and Efiiciency trade-off?"
|
|
||||||
|
|
||||||
# query = (
|
|
||||||
# "什么是盘古大模型以及盘古开发过程中遇到了什么阴暗面,任务令一般在什么城市颁发"
|
|
||||||
# )
|
|
||||||
|
|
||||||
print(f"You: {query}")
|
|
||||||
chat_response = chat.ask(query, top_k=20, recompute_embeddings=True, complexity=32)
|
|
||||||
print(f"Leann: {chat_response}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Run Leann Chat with various LLM backends."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--llm",
|
|
||||||
type=str,
|
|
||||||
default="hf",
|
|
||||||
choices=["simulated", "ollama", "hf", "openai"],
|
|
||||||
help="The LLM backend to use.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--model",
|
|
||||||
type=str,
|
|
||||||
default="Qwen/Qwen3-0.6B",
|
|
||||||
help="The model name to use (e.g., 'llama3:8b' for ollama, 'deepseek-ai/deepseek-llm-7b-chat' for hf, 'gpt-4o' for openai).",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--host",
|
|
||||||
type=str,
|
|
||||||
default="http://localhost:11434",
|
|
||||||
help="The host for the Ollama API.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--index-dir",
|
|
||||||
type=str,
|
|
||||||
default="./test_doc_files",
|
|
||||||
help="Directory where the Leann index will be stored.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--data-dir",
|
|
||||||
type=str,
|
|
||||||
default="examples/data",
|
|
||||||
help="Directory containing documents to index (PDF, TXT, MD files).",
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
asyncio.run(main(args))
|
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
from leann.api import LeannBuilder, LeannSearcher, LeannChat
|
|
||||||
|
from leann.api import LeannBuilder, LeannChat
|
||||||
|
|
||||||
# Define the path for our new MLX-based index
|
# Define the path for our new MLX-based index
|
||||||
INDEX_PATH = "./mlx_diskann_index/leann"
|
INDEX_PATH = "./mlx_diskann_index/leann"
|
||||||
@@ -38,7 +39,5 @@ chat = LeannChat(index_path=INDEX_PATH)
|
|||||||
# add query
|
# add query
|
||||||
query = "MLX is an array framework for machine learning on Apple silicon."
|
query = "MLX is an array framework for machine learning on Apple silicon."
|
||||||
print(f"Query: {query}")
|
print(f"Query: {query}")
|
||||||
response = chat.ask(
|
response = chat.ask(query, top_k=3, recompute_beighbor_embeddings=True, complexity=3, beam_width=1)
|
||||||
query, top_k=3, recompute_beighbor_embeddings=True, complexity=3, beam_width=1
|
|
||||||
)
|
|
||||||
print(f"Response: {response}")
|
print(f"Response: {response}")
|
||||||
@@ -1,319 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
Multi-Vector Aggregator for Fat Embeddings
|
|
||||||
==========================================
|
|
||||||
|
|
||||||
This module implements aggregation strategies for multi-vector embeddings,
|
|
||||||
similar to ColPali's approach where multiple patch vectors represent a single document.
|
|
||||||
|
|
||||||
Key features:
|
|
||||||
- MaxSim aggregation (take maximum similarity across patches)
|
|
||||||
- Voting-based aggregation (count patch matches)
|
|
||||||
- Weighted aggregation (attention-score weighted)
|
|
||||||
- Spatial clustering of matching patches
|
|
||||||
- Document-level result consolidation
|
|
||||||
"""
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
from typing import List, Dict, Any, Tuple, Optional
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from collections import defaultdict
|
|
||||||
import json
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class PatchResult:
|
|
||||||
"""Represents a single patch search result."""
|
|
||||||
patch_id: int
|
|
||||||
image_name: str
|
|
||||||
image_path: str
|
|
||||||
coordinates: Tuple[int, int, int, int] # (x1, y1, x2, y2)
|
|
||||||
score: float
|
|
||||||
attention_score: float
|
|
||||||
scale: float
|
|
||||||
metadata: Dict[str, Any]
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AggregatedResult:
|
|
||||||
"""Represents an aggregated document-level result."""
|
|
||||||
image_name: str
|
|
||||||
image_path: str
|
|
||||||
doc_score: float
|
|
||||||
patch_count: int
|
|
||||||
best_patch: PatchResult
|
|
||||||
all_patches: List[PatchResult]
|
|
||||||
aggregation_method: str
|
|
||||||
spatial_clusters: Optional[List[List[PatchResult]]] = None
|
|
||||||
|
|
||||||
class MultiVectorAggregator:
|
|
||||||
"""
|
|
||||||
Aggregates multiple patch-level results into document-level results.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self,
|
|
||||||
aggregation_method: str = "maxsim",
|
|
||||||
spatial_clustering: bool = True,
|
|
||||||
cluster_distance_threshold: float = 100.0):
|
|
||||||
"""
|
|
||||||
Initialize the aggregator.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
aggregation_method: "maxsim", "voting", "weighted", or "mean"
|
|
||||||
spatial_clustering: Whether to cluster spatially close patches
|
|
||||||
cluster_distance_threshold: Distance threshold for spatial clustering
|
|
||||||
"""
|
|
||||||
self.aggregation_method = aggregation_method
|
|
||||||
self.spatial_clustering = spatial_clustering
|
|
||||||
self.cluster_distance_threshold = cluster_distance_threshold
|
|
||||||
|
|
||||||
def aggregate_results(self,
|
|
||||||
search_results: List[Dict[str, Any]],
|
|
||||||
top_k: int = 10) -> List[AggregatedResult]:
|
|
||||||
"""
|
|
||||||
Aggregate patch-level search results into document-level results.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
search_results: List of search results from LeannSearcher
|
|
||||||
top_k: Number of top documents to return
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of aggregated document results
|
|
||||||
"""
|
|
||||||
# Group results by image
|
|
||||||
image_groups = defaultdict(list)
|
|
||||||
|
|
||||||
for result in search_results:
|
|
||||||
metadata = result.metadata
|
|
||||||
if "image_name" in metadata and "patch_id" in metadata:
|
|
||||||
patch_result = PatchResult(
|
|
||||||
patch_id=metadata["patch_id"],
|
|
||||||
image_name=metadata["image_name"],
|
|
||||||
image_path=metadata["image_path"],
|
|
||||||
coordinates=tuple(metadata["coordinates"]),
|
|
||||||
score=result.score,
|
|
||||||
attention_score=metadata.get("attention_score", 0.0),
|
|
||||||
scale=metadata.get("scale", 1.0),
|
|
||||||
metadata=metadata
|
|
||||||
)
|
|
||||||
image_groups[metadata["image_name"]].append(patch_result)
|
|
||||||
|
|
||||||
# Aggregate each image group
|
|
||||||
aggregated_results = []
|
|
||||||
for image_name, patches in image_groups.items():
|
|
||||||
if len(patches) == 0:
|
|
||||||
continue
|
|
||||||
|
|
||||||
agg_result = self._aggregate_image_patches(image_name, patches)
|
|
||||||
aggregated_results.append(agg_result)
|
|
||||||
|
|
||||||
# Sort by aggregated score and return top-k
|
|
||||||
aggregated_results.sort(key=lambda x: x.doc_score, reverse=True)
|
|
||||||
return aggregated_results[:top_k]
|
|
||||||
|
|
||||||
def _aggregate_image_patches(self, image_name: str, patches: List[PatchResult]) -> AggregatedResult:
|
|
||||||
"""Aggregate patches for a single image."""
|
|
||||||
|
|
||||||
if self.aggregation_method == "maxsim":
|
|
||||||
doc_score = max(patch.score for patch in patches)
|
|
||||||
best_patch = max(patches, key=lambda p: p.score)
|
|
||||||
|
|
||||||
elif self.aggregation_method == "voting":
|
|
||||||
# Count patches above threshold
|
|
||||||
threshold = np.percentile([p.score for p in patches], 75)
|
|
||||||
doc_score = sum(1 for patch in patches if patch.score >= threshold)
|
|
||||||
best_patch = max(patches, key=lambda p: p.score)
|
|
||||||
|
|
||||||
elif self.aggregation_method == "weighted":
|
|
||||||
# Weight by attention scores
|
|
||||||
total_weighted_score = sum(p.score * p.attention_score for p in patches)
|
|
||||||
total_weights = sum(p.attention_score for p in patches)
|
|
||||||
doc_score = total_weighted_score / max(total_weights, 1e-8)
|
|
||||||
best_patch = max(patches, key=lambda p: p.score * p.attention_score)
|
|
||||||
|
|
||||||
elif self.aggregation_method == "mean":
|
|
||||||
doc_score = np.mean([patch.score for patch in patches])
|
|
||||||
best_patch = max(patches, key=lambda p: p.score)
|
|
||||||
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Unknown aggregation method: {self.aggregation_method}")
|
|
||||||
|
|
||||||
# Spatial clustering if enabled
|
|
||||||
spatial_clusters = None
|
|
||||||
if self.spatial_clustering:
|
|
||||||
spatial_clusters = self._cluster_patches_spatially(patches)
|
|
||||||
|
|
||||||
return AggregatedResult(
|
|
||||||
image_name=image_name,
|
|
||||||
image_path=patches[0].image_path,
|
|
||||||
doc_score=float(doc_score),
|
|
||||||
patch_count=len(patches),
|
|
||||||
best_patch=best_patch,
|
|
||||||
all_patches=sorted(patches, key=lambda p: p.score, reverse=True),
|
|
||||||
aggregation_method=self.aggregation_method,
|
|
||||||
spatial_clusters=spatial_clusters
|
|
||||||
)
|
|
||||||
|
|
||||||
def _cluster_patches_spatially(self, patches: List[PatchResult]) -> List[List[PatchResult]]:
|
|
||||||
"""Cluster patches that are spatially close to each other."""
|
|
||||||
if len(patches) <= 1:
|
|
||||||
return [patches]
|
|
||||||
|
|
||||||
clusters = []
|
|
||||||
remaining_patches = patches.copy()
|
|
||||||
|
|
||||||
while remaining_patches:
|
|
||||||
# Start new cluster with highest scoring remaining patch
|
|
||||||
seed_patch = max(remaining_patches, key=lambda p: p.score)
|
|
||||||
current_cluster = [seed_patch]
|
|
||||||
remaining_patches.remove(seed_patch)
|
|
||||||
|
|
||||||
# Add nearby patches to cluster
|
|
||||||
added_to_cluster = True
|
|
||||||
while added_to_cluster:
|
|
||||||
added_to_cluster = False
|
|
||||||
for patch in remaining_patches.copy():
|
|
||||||
if self._is_patch_nearby(patch, current_cluster):
|
|
||||||
current_cluster.append(patch)
|
|
||||||
remaining_patches.remove(patch)
|
|
||||||
added_to_cluster = True
|
|
||||||
|
|
||||||
clusters.append(current_cluster)
|
|
||||||
|
|
||||||
return sorted(clusters, key=lambda cluster: max(p.score for p in cluster), reverse=True)
|
|
||||||
|
|
||||||
def _is_patch_nearby(self, patch: PatchResult, cluster: List[PatchResult]) -> bool:
|
|
||||||
"""Check if a patch is spatially close to any patch in the cluster."""
|
|
||||||
patch_center = self._get_patch_center(patch.coordinates)
|
|
||||||
|
|
||||||
for cluster_patch in cluster:
|
|
||||||
cluster_center = self._get_patch_center(cluster_patch.coordinates)
|
|
||||||
distance = np.sqrt((patch_center[0] - cluster_center[0])**2 +
|
|
||||||
(patch_center[1] - cluster_center[1])**2)
|
|
||||||
|
|
||||||
if distance <= self.cluster_distance_threshold:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _get_patch_center(self, coordinates: Tuple[int, int, int, int]) -> Tuple[float, float]:
|
|
||||||
"""Get center point of a patch."""
|
|
||||||
x1, y1, x2, y2 = coordinates
|
|
||||||
return ((x1 + x2) / 2, (y1 + y2) / 2)
|
|
||||||
|
|
||||||
def print_aggregated_results(self, results: List[AggregatedResult], max_patches_per_doc: int = 3):
|
|
||||||
"""Pretty print aggregated results."""
|
|
||||||
print(f"\n🔍 Aggregated Results (method: {self.aggregation_method})")
|
|
||||||
print("=" * 80)
|
|
||||||
|
|
||||||
for i, result in enumerate(results):
|
|
||||||
print(f"\n{i+1}. {result.image_name}")
|
|
||||||
print(f" Doc Score: {result.doc_score:.4f} | Patches: {result.patch_count}")
|
|
||||||
print(f" Path: {result.image_path}")
|
|
||||||
|
|
||||||
# Show best patch
|
|
||||||
best = result.best_patch
|
|
||||||
print(f" 🌟 Best Patch: #{best.patch_id} at {best.coordinates} (score: {best.score:.4f})")
|
|
||||||
|
|
||||||
# Show top patches
|
|
||||||
print(f" 📍 Top Patches:")
|
|
||||||
for j, patch in enumerate(result.all_patches[:max_patches_per_doc]):
|
|
||||||
print(f" {j+1}. Patch #{patch.patch_id}: {patch.score:.4f} at {patch.coordinates}")
|
|
||||||
|
|
||||||
# Show spatial clusters if available
|
|
||||||
if result.spatial_clusters and len(result.spatial_clusters) > 1:
|
|
||||||
print(f" 🗂️ Spatial Clusters: {len(result.spatial_clusters)}")
|
|
||||||
for j, cluster in enumerate(result.spatial_clusters[:2]): # Show top 2 clusters
|
|
||||||
cluster_score = max(p.score for p in cluster)
|
|
||||||
print(f" Cluster {j+1}: {len(cluster)} patches (best: {cluster_score:.4f})")
|
|
||||||
|
|
||||||
def demo_aggregation():
|
|
||||||
"""Demonstrate the multi-vector aggregation functionality."""
|
|
||||||
print("=== Multi-Vector Aggregation Demo ===")
|
|
||||||
|
|
||||||
# Simulate some patch-level search results
|
|
||||||
# In real usage, these would come from LeannSearcher.search()
|
|
||||||
|
|
||||||
class MockResult:
|
|
||||||
def __init__(self, score, metadata):
|
|
||||||
self.score = score
|
|
||||||
self.metadata = metadata
|
|
||||||
|
|
||||||
# Simulate results for 2 images with multiple patches each
|
|
||||||
mock_results = [
|
|
||||||
# Image 1: cats_and_kitchen.jpg - 4 patches
|
|
||||||
MockResult(0.85, {
|
|
||||||
"image_name": "cats_and_kitchen.jpg",
|
|
||||||
"image_path": "/path/to/cats_and_kitchen.jpg",
|
|
||||||
"patch_id": 3,
|
|
||||||
"coordinates": [100, 50, 224, 174], # Kitchen area
|
|
||||||
"attention_score": 0.92,
|
|
||||||
"scale": 1.0
|
|
||||||
}),
|
|
||||||
MockResult(0.78, {
|
|
||||||
"image_name": "cats_and_kitchen.jpg",
|
|
||||||
"image_path": "/path/to/cats_and_kitchen.jpg",
|
|
||||||
"patch_id": 7,
|
|
||||||
"coordinates": [200, 300, 324, 424], # Cat area
|
|
||||||
"attention_score": 0.88,
|
|
||||||
"scale": 1.0
|
|
||||||
}),
|
|
||||||
MockResult(0.72, {
|
|
||||||
"image_name": "cats_and_kitchen.jpg",
|
|
||||||
"image_path": "/path/to/cats_and_kitchen.jpg",
|
|
||||||
"patch_id": 12,
|
|
||||||
"coordinates": [150, 100, 274, 224], # Appliances
|
|
||||||
"attention_score": 0.75,
|
|
||||||
"scale": 1.0
|
|
||||||
}),
|
|
||||||
MockResult(0.65, {
|
|
||||||
"image_name": "cats_and_kitchen.jpg",
|
|
||||||
"image_path": "/path/to/cats_and_kitchen.jpg",
|
|
||||||
"patch_id": 15,
|
|
||||||
"coordinates": [50, 250, 174, 374], # Furniture
|
|
||||||
"attention_score": 0.70,
|
|
||||||
"scale": 1.0
|
|
||||||
}),
|
|
||||||
|
|
||||||
# Image 2: city_street.jpg - 3 patches
|
|
||||||
MockResult(0.68, {
|
|
||||||
"image_name": "city_street.jpg",
|
|
||||||
"image_path": "/path/to/city_street.jpg",
|
|
||||||
"patch_id": 2,
|
|
||||||
"coordinates": [300, 100, 424, 224], # Buildings
|
|
||||||
"attention_score": 0.80,
|
|
||||||
"scale": 1.0
|
|
||||||
}),
|
|
||||||
MockResult(0.62, {
|
|
||||||
"image_name": "city_street.jpg",
|
|
||||||
"image_path": "/path/to/city_street.jpg",
|
|
||||||
"patch_id": 8,
|
|
||||||
"coordinates": [100, 350, 224, 474], # Street level
|
|
||||||
"attention_score": 0.75,
|
|
||||||
"scale": 1.0
|
|
||||||
}),
|
|
||||||
MockResult(0.55, {
|
|
||||||
"image_name": "city_street.jpg",
|
|
||||||
"image_path": "/path/to/city_street.jpg",
|
|
||||||
"patch_id": 11,
|
|
||||||
"coordinates": [400, 200, 524, 324], # Sky area
|
|
||||||
"attention_score": 0.60,
|
|
||||||
"scale": 1.0
|
|
||||||
}),
|
|
||||||
]
|
|
||||||
|
|
||||||
# Test different aggregation methods
|
|
||||||
methods = ["maxsim", "voting", "weighted", "mean"]
|
|
||||||
|
|
||||||
for method in methods:
|
|
||||||
print(f"\n{'='*20} {method.upper()} AGGREGATION {'='*20}")
|
|
||||||
|
|
||||||
aggregator = MultiVectorAggregator(
|
|
||||||
aggregation_method=method,
|
|
||||||
spatial_clustering=True,
|
|
||||||
cluster_distance_threshold=100.0
|
|
||||||
)
|
|
||||||
|
|
||||||
aggregated = aggregator.aggregate_results(mock_results, top_k=5)
|
|
||||||
aggregator.print_aggregated_results(aggregated)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
demo_aggregation()
|
|
||||||
@@ -1,108 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
OpenAI Embedding Example
|
|
||||||
|
|
||||||
Complete example showing how to build and search with OpenAI embeddings using HNSW backend.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import dotenv
|
|
||||||
from pathlib import Path
|
|
||||||
from leann.api import LeannBuilder, LeannSearcher
|
|
||||||
|
|
||||||
# Load environment variables
|
|
||||||
dotenv.load_dotenv()
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# Check if OpenAI API key is available
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
if not api_key:
|
|
||||||
print("ERROR: OPENAI_API_KEY environment variable not set")
|
|
||||||
return False
|
|
||||||
|
|
||||||
print(f"✅ OpenAI API key found: {api_key[:10]}...")
|
|
||||||
|
|
||||||
# Sample texts
|
|
||||||
sample_texts = [
|
|
||||||
"Machine learning is a powerful technology that enables computers to learn from data.",
|
|
||||||
"Natural language processing helps computers understand and generate human language.",
|
|
||||||
"Deep learning uses neural networks with multiple layers to solve complex problems.",
|
|
||||||
"Computer vision allows machines to interpret and understand visual information.",
|
|
||||||
"Reinforcement learning trains agents to make decisions through trial and error.",
|
|
||||||
"Data science combines statistics, math, and programming to extract insights from data.",
|
|
||||||
"Artificial intelligence aims to create machines that can perform human-like tasks.",
|
|
||||||
"Python is a popular programming language used extensively in data science and AI.",
|
|
||||||
"Neural networks are inspired by the structure and function of the human brain.",
|
|
||||||
"Big data refers to extremely large datasets that require special tools to process."
|
|
||||||
]
|
|
||||||
|
|
||||||
INDEX_DIR = Path("./simple_openai_test_index")
|
|
||||||
INDEX_PATH = str(INDEX_DIR / "simple_test.leann")
|
|
||||||
|
|
||||||
print(f"\n=== Building Index with OpenAI Embeddings ===")
|
|
||||||
print(f"Index path: {INDEX_PATH}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Use proper configuration for OpenAI embeddings
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model="text-embedding-3-small",
|
|
||||||
embedding_mode="openai",
|
|
||||||
# HNSW settings for OpenAI embeddings
|
|
||||||
M=16, # Smaller graph degree
|
|
||||||
efConstruction=64, # Smaller construction complexity
|
|
||||||
is_compact=True, # Enable compact storage for recompute
|
|
||||||
is_recompute=True, # MUST enable for OpenAI embeddings
|
|
||||||
num_threads=1,
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Adding {len(sample_texts)} texts to the index...")
|
|
||||||
for i, text in enumerate(sample_texts):
|
|
||||||
metadata = {"id": f"doc_{i}", "topic": "AI"}
|
|
||||||
builder.add_text(text, metadata)
|
|
||||||
|
|
||||||
print("Building index...")
|
|
||||||
builder.build_index(INDEX_PATH)
|
|
||||||
print(f"✅ Index built successfully!")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"❌ Error building index: {e}")
|
|
||||||
import traceback
|
|
||||||
traceback.print_exc()
|
|
||||||
return False
|
|
||||||
|
|
||||||
print(f"\n=== Testing Search ===")
|
|
||||||
|
|
||||||
try:
|
|
||||||
searcher = LeannSearcher(INDEX_PATH)
|
|
||||||
|
|
||||||
test_queries = [
|
|
||||||
"What is machine learning?",
|
|
||||||
"How do neural networks work?",
|
|
||||||
"Programming languages for data science"
|
|
||||||
]
|
|
||||||
|
|
||||||
for query in test_queries:
|
|
||||||
print(f"\n🔍 Query: '{query}'")
|
|
||||||
results = searcher.search(query, top_k=3)
|
|
||||||
|
|
||||||
print(f" Found {len(results)} results:")
|
|
||||||
for i, result in enumerate(results):
|
|
||||||
print(f" {i+1}. Score: {result.score:.4f}")
|
|
||||||
print(f" Text: {result.text[:80]}...")
|
|
||||||
|
|
||||||
print(f"\n✅ Search test completed successfully!")
|
|
||||||
return True
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"❌ Error during search: {e}")
|
|
||||||
import traceback
|
|
||||||
traceback.print_exc()
|
|
||||||
return False
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
success = main()
|
|
||||||
if success:
|
|
||||||
print(f"\n🎉 Simple OpenAI index test completed successfully!")
|
|
||||||
else:
|
|
||||||
print(f"\n💥 Simple OpenAI index test failed!")
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
from leann.api import LeannChat
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
INDEX_DIR = Path("./test_pdf_index_huawei")
|
|
||||||
INDEX_PATH = str(INDEX_DIR / "pdf_documents.leann")
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
print(f"\n[PHASE 2] Starting Leann chat session...")
|
|
||||||
chat = LeannChat(index_path=INDEX_PATH)
|
|
||||||
query = "What is the main idea of RL and give me 5 exapmle of classic RL algorithms?"
|
|
||||||
query = "Based on the paper, what are the main techniques LEANN explores to reduce the storage overhead and DLPM explore to achieve Fairness and Efiiciency trade-off?"
|
|
||||||
# query = "什么是盘古大模型以及盘古开发过程中遇到了什么阴暗面,任务令一般在什么城市颁发"
|
|
||||||
response = chat.ask(query,top_k=20,recompute_beighbor_embeddings=True,complexity=32,beam_width=1)
|
|
||||||
print(f"\n[PHASE 2] Response: {response}")
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
||||||
@@ -1,319 +0,0 @@
|
|||||||
import os
|
|
||||||
import asyncio
|
|
||||||
import dotenv
|
|
||||||
import argparse
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import List, Any, Optional
|
|
||||||
from leann.api import LeannBuilder, LeannSearcher, LeannChat
|
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
|
||||||
import requests
|
|
||||||
import time
|
|
||||||
|
|
||||||
dotenv.load_dotenv()
|
|
||||||
|
|
||||||
# Default WeChat export directory
|
|
||||||
DEFAULT_WECHAT_EXPORT_DIR = "./wechat_export_direct"
|
|
||||||
|
|
||||||
|
|
||||||
def create_leann_index_from_multiple_wechat_exports(
|
|
||||||
export_dirs: List[Path],
|
|
||||||
index_path: str = "wechat_history_index.leann",
|
|
||||||
max_count: int = -1,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Create LEANN index from multiple WeChat export data sources.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
export_dirs: List of Path objects pointing to WeChat export directories
|
|
||||||
index_path: Path to save the LEANN index
|
|
||||||
max_count: Maximum number of chat entries to process per export
|
|
||||||
"""
|
|
||||||
print("Creating LEANN index from multiple WeChat export data sources...")
|
|
||||||
|
|
||||||
# Load documents using WeChatHistoryReader from history_data
|
|
||||||
from history_data.wechat_history import WeChatHistoryReader
|
|
||||||
|
|
||||||
reader = WeChatHistoryReader()
|
|
||||||
|
|
||||||
INDEX_DIR = Path(index_path).parent
|
|
||||||
|
|
||||||
if not INDEX_DIR.exists():
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
all_documents = []
|
|
||||||
total_processed = 0
|
|
||||||
|
|
||||||
# Process each WeChat export directory
|
|
||||||
for i, export_dir in enumerate(export_dirs):
|
|
||||||
print(
|
|
||||||
f"\nProcessing WeChat export {i + 1}/{len(export_dirs)}: {export_dir}"
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
documents = reader.load_data(
|
|
||||||
wechat_export_dir=str(export_dir),
|
|
||||||
max_count=max_count,
|
|
||||||
concatenate_messages=True, # Disable concatenation - one message per document
|
|
||||||
)
|
|
||||||
if documents:
|
|
||||||
print(f"Loaded {len(documents)} chat documents from {export_dir}")
|
|
||||||
all_documents.extend(documents)
|
|
||||||
total_processed += len(documents)
|
|
||||||
|
|
||||||
# Check if we've reached the max count
|
|
||||||
if max_count > 0 and total_processed >= max_count:
|
|
||||||
print(f"Reached max count of {max_count} documents")
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
print(f"No documents loaded from {export_dir}")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing {export_dir}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not all_documents:
|
|
||||||
print("No documents loaded from any source. Exiting.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
print(
|
|
||||||
f"\nTotal loaded {len(all_documents)} chat documents from {len(export_dirs)} exports and starting to split them into chunks"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=128)
|
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
|
||||||
all_texts = []
|
|
||||||
for doc in all_documents:
|
|
||||||
# Split the document into chunks
|
|
||||||
nodes = text_splitter.get_nodes_from_documents([doc])
|
|
||||||
for node in nodes:
|
|
||||||
text = '[Contact] means the message is from: ' + doc.metadata["contact_name"] + '\n' + node.get_content()
|
|
||||||
all_texts.append(text)
|
|
||||||
|
|
||||||
print(
|
|
||||||
f"Finished splitting {len(all_documents)} documents into {len(all_texts)} text chunks"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create LEANN index directory
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Use HNSW backend for better macOS compatibility
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model="Qwen/Qwen3-Embedding-0.6B",
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64,
|
|
||||||
is_compact=True,
|
|
||||||
is_recompute=True,
|
|
||||||
num_threads=1, # Force single-threaded mode
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Adding {len(all_texts)} chat chunks to index...")
|
|
||||||
for chunk_text in all_texts:
|
|
||||||
builder.add_text(chunk_text)
|
|
||||||
|
|
||||||
builder.build_index(index_path)
|
|
||||||
print(f"\nLEANN index built at {index_path}!")
|
|
||||||
else:
|
|
||||||
print(f"--- Using existing index at {INDEX_DIR} ---")
|
|
||||||
|
|
||||||
return index_path
|
|
||||||
|
|
||||||
|
|
||||||
def create_leann_index(
|
|
||||||
export_dir: str = None,
|
|
||||||
index_path: str = "wechat_history_index.leann",
|
|
||||||
max_count: int = 1000,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Create LEANN index from WeChat chat history data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
export_dir: Path to the WeChat export directory (optional, uses default if None)
|
|
||||||
index_path: Path to save the LEANN index
|
|
||||||
max_count: Maximum number of chat entries to process
|
|
||||||
"""
|
|
||||||
print("Creating LEANN index from WeChat chat history data...")
|
|
||||||
INDEX_DIR = Path(index_path).parent
|
|
||||||
|
|
||||||
if not INDEX_DIR.exists():
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Load documents using WeChatHistoryReader from history_data
|
|
||||||
from history_data.wechat_history import WeChatHistoryReader
|
|
||||||
|
|
||||||
reader = WeChatHistoryReader()
|
|
||||||
|
|
||||||
documents = reader.load_data(
|
|
||||||
wechat_export_dir=export_dir,
|
|
||||||
max_count=max_count,
|
|
||||||
concatenate_messages=False, # Disable concatenation - one message per document
|
|
||||||
)
|
|
||||||
|
|
||||||
if not documents:
|
|
||||||
print("No documents loaded. Exiting.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
print(f"Loaded {len(documents)} chat documents")
|
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=25)
|
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
|
||||||
all_texts = []
|
|
||||||
for doc in documents:
|
|
||||||
# Split the document into chunks
|
|
||||||
nodes = text_splitter.get_nodes_from_documents([doc])
|
|
||||||
for node in nodes:
|
|
||||||
all_texts.append(node.get_content())
|
|
||||||
|
|
||||||
print(f"Created {len(all_texts)} text chunks from {len(documents)} documents")
|
|
||||||
|
|
||||||
# Create LEANN index directory
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Use HNSW backend for better macOS compatibility
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model="mlx-community/Qwen3-Embedding-0.6B-4bit-DWQ", # MLX-optimized model
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64,
|
|
||||||
is_compact=True,
|
|
||||||
is_recompute=True,
|
|
||||||
num_threads=1, # Force single-threaded mode
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Adding {len(all_texts)} chat chunks to index...")
|
|
||||||
for chunk_text in all_texts:
|
|
||||||
builder.add_text(chunk_text)
|
|
||||||
|
|
||||||
builder.build_index(index_path)
|
|
||||||
print(f"\nLEANN index built at {index_path}!")
|
|
||||||
else:
|
|
||||||
print(f"--- Using existing index at {INDEX_DIR} ---")
|
|
||||||
|
|
||||||
return index_path
|
|
||||||
|
|
||||||
|
|
||||||
async def query_leann_index(index_path: str, query: str):
|
|
||||||
"""
|
|
||||||
Query the LEANN index.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
index_path: Path to the LEANN index
|
|
||||||
query: The query string
|
|
||||||
"""
|
|
||||||
print(f"\n[PHASE 2] Starting Leann chat session...")
|
|
||||||
chat = LeannChat(index_path=index_path)
|
|
||||||
|
|
||||||
print(f"You: {query}")
|
|
||||||
chat_response = chat.ask(
|
|
||||||
query,
|
|
||||||
top_k=20,
|
|
||||||
recompute_beighbor_embeddings=True,
|
|
||||||
complexity=16,
|
|
||||||
beam_width=1,
|
|
||||||
llm_config={
|
|
||||||
"type": "openai",
|
|
||||||
"model": "gpt-4o",
|
|
||||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
|
||||||
},
|
|
||||||
llm_kwargs={"temperature": 0.0, "max_tokens": 1000},
|
|
||||||
)
|
|
||||||
print(f"Leann: {chat_response}")
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
"""Main function with integrated WeChat export functionality."""
|
|
||||||
|
|
||||||
# Parse command line arguments
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="LEANN WeChat History Reader - Create and query WeChat chat history index"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--export-dir",
|
|
||||||
type=str,
|
|
||||||
default=DEFAULT_WECHAT_EXPORT_DIR,
|
|
||||||
help=f"Directory to store WeChat exports (default: {DEFAULT_WECHAT_EXPORT_DIR})",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--index-dir",
|
|
||||||
type=str,
|
|
||||||
default="./wechat_history_magic_test_11Debug_new",
|
|
||||||
help="Directory to store the LEANN index (default: ./wechat_history_index_leann_test)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--max-entries",
|
|
||||||
type=int,
|
|
||||||
default=50,
|
|
||||||
help="Maximum number of chat entries to process (default: 5000)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--query",
|
|
||||||
type=str,
|
|
||||||
default=None,
|
|
||||||
help="Single query to run (default: runs example queries)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--force-export",
|
|
||||||
action="store_true",
|
|
||||||
default=False,
|
|
||||||
help="Force re-export of WeChat data even if exports exist",
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
INDEX_DIR = Path(args.index_dir)
|
|
||||||
INDEX_PATH = str(INDEX_DIR / "wechat_history.leann")
|
|
||||||
|
|
||||||
print(f"Using WeChat export directory: {args.export_dir}")
|
|
||||||
print(f"Index directory: {INDEX_DIR}")
|
|
||||||
print(f"Max entries: {args.max_entries}")
|
|
||||||
|
|
||||||
# Initialize WeChat reader with export capabilities
|
|
||||||
from history_data.wechat_history import WeChatHistoryReader
|
|
||||||
|
|
||||||
reader = WeChatHistoryReader()
|
|
||||||
|
|
||||||
# Find existing exports or create new ones using the centralized method
|
|
||||||
export_dirs = reader.find_or_export_wechat_data(args.export_dir)
|
|
||||||
if not export_dirs:
|
|
||||||
print("Failed to find or export WeChat data. Exiting.")
|
|
||||||
return
|
|
||||||
|
|
||||||
# Create or load the LEANN index from all sources
|
|
||||||
index_path = create_leann_index_from_multiple_wechat_exports(
|
|
||||||
export_dirs, INDEX_PATH, max_count=args.max_entries
|
|
||||||
)
|
|
||||||
|
|
||||||
if index_path:
|
|
||||||
if args.query:
|
|
||||||
# Run single query
|
|
||||||
await query_leann_index(index_path, args.query)
|
|
||||||
else:
|
|
||||||
# Example queries
|
|
||||||
queries = [
|
|
||||||
"我想买魔术师约翰逊的球衣,给我一些对应聊天记录?",
|
|
||||||
]
|
|
||||||
|
|
||||||
for query in queries:
|
|
||||||
print("\n" + "=" * 60)
|
|
||||||
await query_leann_index(index_path, query)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
|
|
||||||
|
|||||||
@@ -1,8 +0,0 @@
|
|||||||
# packages/leann-backend-diskann/CMakeLists.txt (simplified version)
|
|
||||||
|
|
||||||
cmake_minimum_required(VERSION 3.20)
|
|
||||||
project(leann_backend_diskann_wrapper)
|
|
||||||
|
|
||||||
# Tell CMake to directly enter the DiskANN submodule and execute its own CMakeLists.txt
|
|
||||||
# DiskANN will handle everything itself, including compiling Python bindings
|
|
||||||
add_subdirectory(src/third_party/DiskANN)
|
|
||||||
@@ -1 +1,7 @@
|
|||||||
from . import diskann_backend
|
from . import diskann_backend as diskann_backend
|
||||||
|
from . import graph_partition
|
||||||
|
|
||||||
|
# Export main classes and functions
|
||||||
|
from .graph_partition import GraphPartitioner, partition_graph
|
||||||
|
|
||||||
|
__all__ = ["GraphPartitioner", "diskann_backend", "graph_partition", "partition_graph"]
|
||||||
|
|||||||
@@ -1,20 +1,20 @@
|
|||||||
import numpy as np
|
import contextlib
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import struct
|
import struct
|
||||||
import sys
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, Any, List, Literal, Optional
|
from typing import Any, Literal, Optional
|
||||||
import contextlib
|
|
||||||
|
|
||||||
import logging
|
import numpy as np
|
||||||
|
import psutil
|
||||||
from leann.searcher_base import BaseSearcher
|
|
||||||
from leann.registry import register_backend
|
|
||||||
from leann.interface import (
|
from leann.interface import (
|
||||||
LeannBackendFactoryInterface,
|
|
||||||
LeannBackendBuilderInterface,
|
LeannBackendBuilderInterface,
|
||||||
|
LeannBackendFactoryInterface,
|
||||||
LeannBackendSearcherInterface,
|
LeannBackendSearcherInterface,
|
||||||
)
|
)
|
||||||
|
from leann.registry import register_backend
|
||||||
|
from leann.searcher_base import BaseSearcher
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -22,6 +22,11 @@ logger = logging.getLogger(__name__)
|
|||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def suppress_cpp_output_if_needed():
|
def suppress_cpp_output_if_needed():
|
||||||
"""Suppress C++ stdout/stderr based on LEANN_LOG_LEVEL"""
|
"""Suppress C++ stdout/stderr based on LEANN_LOG_LEVEL"""
|
||||||
|
# In CI we avoid fiddling with low-level file descriptors to prevent aborts
|
||||||
|
if os.getenv("CI") == "true":
|
||||||
|
yield
|
||||||
|
return
|
||||||
|
|
||||||
log_level = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
log_level = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
||||||
|
|
||||||
# Only suppress if log level is WARNING or higher (ERROR, CRITICAL)
|
# Only suppress if log level is WARNING or higher (ERROR, CRITICAL)
|
||||||
@@ -85,6 +90,43 @@ def _write_vectors_to_bin(data: np.ndarray, file_path: Path):
|
|||||||
f.write(data.tobytes())
|
f.write(data.tobytes())
|
||||||
|
|
||||||
|
|
||||||
|
def _calculate_smart_memory_config(data: np.ndarray) -> tuple[float, float]:
|
||||||
|
"""
|
||||||
|
Calculate smart memory configuration for DiskANN based on data size and system specs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: The embedding data array
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (search_memory_maximum, build_memory_maximum) in GB
|
||||||
|
"""
|
||||||
|
num_vectors, dim = data.shape
|
||||||
|
|
||||||
|
# Calculate embedding storage size
|
||||||
|
embedding_size_bytes = num_vectors * dim * 4 # float32 = 4 bytes
|
||||||
|
embedding_size_gb = embedding_size_bytes / (1024**3)
|
||||||
|
|
||||||
|
# search_memory_maximum: 1/10 of embedding size for optimal PQ compression
|
||||||
|
# This controls Product Quantization size - smaller means more compression
|
||||||
|
search_memory_gb = max(0.1, embedding_size_gb / 10) # At least 100MB
|
||||||
|
|
||||||
|
# build_memory_maximum: Based on available system RAM for sharding control
|
||||||
|
# This controls how much memory DiskANN uses during index construction
|
||||||
|
available_memory_gb = psutil.virtual_memory().available / (1024**3)
|
||||||
|
total_memory_gb = psutil.virtual_memory().total / (1024**3)
|
||||||
|
|
||||||
|
# Use 50% of available memory, but at least 2GB and at most 75% of total
|
||||||
|
build_memory_gb = max(2.0, min(available_memory_gb * 0.5, total_memory_gb * 0.75))
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Smart memory config - Data: {embedding_size_gb:.2f}GB, "
|
||||||
|
f"Search mem: {search_memory_gb:.2f}GB (PQ control), "
|
||||||
|
f"Build mem: {build_memory_gb:.2f}GB (sharding control)"
|
||||||
|
)
|
||||||
|
|
||||||
|
return search_memory_gb, build_memory_gb
|
||||||
|
|
||||||
|
|
||||||
@register_backend("diskann")
|
@register_backend("diskann")
|
||||||
class DiskannBackend(LeannBackendFactoryInterface):
|
class DiskannBackend(LeannBackendFactoryInterface):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -100,7 +142,72 @@ class DiskannBuilder(LeannBackendBuilderInterface):
|
|||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.build_params = kwargs
|
self.build_params = kwargs
|
||||||
|
|
||||||
def build(self, data: np.ndarray, ids: List[str], index_path: str, **kwargs):
|
def _safe_cleanup_after_partition(self, index_dir: Path, index_prefix: str):
|
||||||
|
"""
|
||||||
|
Safely cleanup files after partition.
|
||||||
|
In partition mode, C++ doesn't read _disk.index content,
|
||||||
|
so we can delete it if all derived files exist.
|
||||||
|
"""
|
||||||
|
disk_index_file = index_dir / f"{index_prefix}_disk.index"
|
||||||
|
beam_search_file = index_dir / f"{index_prefix}_disk_beam_search.index"
|
||||||
|
|
||||||
|
# Required files that C++ partition mode needs
|
||||||
|
# Note: C++ generates these with _disk.index suffix
|
||||||
|
disk_suffix = "_disk.index"
|
||||||
|
required_files = [
|
||||||
|
f"{index_prefix}{disk_suffix}_medoids.bin", # Critical: assert fails if missing
|
||||||
|
# Note: _centroids.bin is not created in single-shot build - C++ handles this automatically
|
||||||
|
f"{index_prefix}_pq_pivots.bin", # PQ table
|
||||||
|
f"{index_prefix}_pq_compressed.bin", # PQ compressed vectors
|
||||||
|
]
|
||||||
|
|
||||||
|
# Check if all required files exist
|
||||||
|
missing_files = []
|
||||||
|
for filename in required_files:
|
||||||
|
file_path = index_dir / filename
|
||||||
|
if not file_path.exists():
|
||||||
|
missing_files.append(filename)
|
||||||
|
|
||||||
|
if missing_files:
|
||||||
|
logger.warning(
|
||||||
|
f"Cannot safely delete _disk.index - missing required files: {missing_files}"
|
||||||
|
)
|
||||||
|
logger.info("Keeping all original files for safety")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Calculate space savings
|
||||||
|
space_saved = 0
|
||||||
|
files_to_delete = []
|
||||||
|
|
||||||
|
if disk_index_file.exists():
|
||||||
|
space_saved += disk_index_file.stat().st_size
|
||||||
|
files_to_delete.append(disk_index_file)
|
||||||
|
|
||||||
|
if beam_search_file.exists():
|
||||||
|
space_saved += beam_search_file.stat().st_size
|
||||||
|
files_to_delete.append(beam_search_file)
|
||||||
|
|
||||||
|
# Safe to delete!
|
||||||
|
for file_to_delete in files_to_delete:
|
||||||
|
try:
|
||||||
|
os.remove(file_to_delete)
|
||||||
|
logger.info(f"✅ Safely deleted: {file_to_delete.name}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to delete {file_to_delete.name}: {e}")
|
||||||
|
|
||||||
|
if space_saved > 0:
|
||||||
|
space_saved_mb = space_saved / (1024 * 1024)
|
||||||
|
logger.info(f"💾 Space saved: {space_saved_mb:.1f} MB")
|
||||||
|
|
||||||
|
# Show what files are kept
|
||||||
|
logger.info("📁 Kept essential files for partition mode:")
|
||||||
|
for filename in required_files:
|
||||||
|
file_path = index_dir / filename
|
||||||
|
if file_path.exists():
|
||||||
|
size_mb = file_path.stat().st_size / (1024 * 1024)
|
||||||
|
logger.info(f" - {filename} ({size_mb:.1f} MB)")
|
||||||
|
|
||||||
|
def build(self, data: np.ndarray, ids: list[str], index_path: str, **kwargs):
|
||||||
path = Path(index_path)
|
path = Path(index_path)
|
||||||
index_dir = path.parent
|
index_dir = path.parent
|
||||||
index_prefix = path.stem
|
index_prefix = path.stem
|
||||||
@@ -114,6 +221,17 @@ class DiskannBuilder(LeannBackendBuilderInterface):
|
|||||||
_write_vectors_to_bin(data, index_dir / data_filename)
|
_write_vectors_to_bin(data, index_dir / data_filename)
|
||||||
|
|
||||||
build_kwargs = {**self.build_params, **kwargs}
|
build_kwargs = {**self.build_params, **kwargs}
|
||||||
|
|
||||||
|
# Extract is_recompute from nested backend_kwargs if needed
|
||||||
|
is_recompute = build_kwargs.get("is_recompute", False)
|
||||||
|
if not is_recompute and "backend_kwargs" in build_kwargs:
|
||||||
|
is_recompute = build_kwargs["backend_kwargs"].get("is_recompute", False)
|
||||||
|
|
||||||
|
# Flatten all backend_kwargs parameters to top level for compatibility
|
||||||
|
if "backend_kwargs" in build_kwargs:
|
||||||
|
nested_params = build_kwargs.pop("backend_kwargs")
|
||||||
|
build_kwargs.update(nested_params)
|
||||||
|
|
||||||
metric_enum = _get_diskann_metrics().get(
|
metric_enum = _get_diskann_metrics().get(
|
||||||
build_kwargs.get("distance_metric", "mips").lower()
|
build_kwargs.get("distance_metric", "mips").lower()
|
||||||
)
|
)
|
||||||
@@ -122,6 +240,16 @@ class DiskannBuilder(LeannBackendBuilderInterface):
|
|||||||
f"Unsupported distance_metric '{build_kwargs.get('distance_metric', 'unknown')}'."
|
f"Unsupported distance_metric '{build_kwargs.get('distance_metric', 'unknown')}'."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Calculate smart memory configuration if not explicitly provided
|
||||||
|
if (
|
||||||
|
"search_memory_maximum" not in build_kwargs
|
||||||
|
or "build_memory_maximum" not in build_kwargs
|
||||||
|
):
|
||||||
|
smart_search_mem, smart_build_mem = _calculate_smart_memory_config(data)
|
||||||
|
else:
|
||||||
|
smart_search_mem = build_kwargs.get("search_memory_maximum", 4.0)
|
||||||
|
smart_build_mem = build_kwargs.get("build_memory_maximum", 8.0)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from . import _diskannpy as diskannpy # type: ignore
|
from . import _diskannpy as diskannpy # type: ignore
|
||||||
|
|
||||||
@@ -132,12 +260,36 @@ class DiskannBuilder(LeannBackendBuilderInterface):
|
|||||||
index_prefix,
|
index_prefix,
|
||||||
build_kwargs.get("complexity", 64),
|
build_kwargs.get("complexity", 64),
|
||||||
build_kwargs.get("graph_degree", 32),
|
build_kwargs.get("graph_degree", 32),
|
||||||
build_kwargs.get("search_memory_maximum", 4.0),
|
build_kwargs.get("search_memory_maximum", smart_search_mem),
|
||||||
build_kwargs.get("build_memory_maximum", 8.0),
|
build_kwargs.get("build_memory_maximum", smart_build_mem),
|
||||||
build_kwargs.get("num_threads", 8),
|
build_kwargs.get("num_threads", 8),
|
||||||
build_kwargs.get("pq_disk_bytes", 0),
|
build_kwargs.get("pq_disk_bytes", 0),
|
||||||
"",
|
"",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Auto-partition if is_recompute is enabled
|
||||||
|
if build_kwargs.get("is_recompute", False):
|
||||||
|
logger.info("is_recompute=True, starting automatic graph partitioning...")
|
||||||
|
from .graph_partition import partition_graph
|
||||||
|
|
||||||
|
# Partition the index using absolute paths
|
||||||
|
# Convert to absolute paths to avoid issues with working directory changes
|
||||||
|
absolute_index_dir = Path(index_dir).resolve()
|
||||||
|
absolute_index_prefix_path = str(absolute_index_dir / index_prefix)
|
||||||
|
disk_graph_path, partition_bin_path = partition_graph(
|
||||||
|
index_prefix_path=absolute_index_prefix_path,
|
||||||
|
output_dir=str(absolute_index_dir),
|
||||||
|
partition_prefix=index_prefix,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Safe cleanup: In partition mode, C++ doesn't read _disk.index content
|
||||||
|
# but still needs the derived files (_medoids.bin, _centroids.bin, etc.)
|
||||||
|
self._safe_cleanup_after_partition(index_dir, index_prefix)
|
||||||
|
|
||||||
|
logger.info("✅ Graph partitioning completed successfully!")
|
||||||
|
logger.info(f" - Disk graph: {disk_graph_path}")
|
||||||
|
logger.info(f" - Partition file: {partition_bin_path}")
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
temp_data_file = index_dir / data_filename
|
temp_data_file = index_dir / data_filename
|
||||||
if temp_data_file.exists():
|
if temp_data_file.exists():
|
||||||
@@ -164,18 +316,69 @@ class DiskannSearcher(BaseSearcher):
|
|||||||
|
|
||||||
self.num_threads = kwargs.get("num_threads", 8)
|
self.num_threads = kwargs.get("num_threads", 8)
|
||||||
|
|
||||||
fake_zmq_port = 6666
|
# For DiskANN, we need to reinitialize the index when zmq_port changes
|
||||||
full_index_prefix = str(self.index_dir / self.index_path.stem)
|
# Store the initialization parameters for later use
|
||||||
self._index = diskannpy.StaticDiskFloatIndex(
|
# Note: C++ load method expects the BASE path (without _disk.index suffix)
|
||||||
metric_enum,
|
# C++ internally constructs: index_prefix + "_disk.index"
|
||||||
full_index_prefix,
|
index_name = self.index_path.stem # "simple_test.leann" -> "simple_test"
|
||||||
self.num_threads,
|
diskann_index_prefix = str(self.index_dir / index_name) # /path/to/simple_test
|
||||||
kwargs.get("num_nodes_to_cache", 0),
|
full_index_prefix = diskann_index_prefix # /path/to/simple_test (base path)
|
||||||
1,
|
|
||||||
fake_zmq_port, # Initial port, can be updated at runtime
|
# Auto-detect partition files and set partition_prefix
|
||||||
"",
|
partition_graph_file = self.index_dir / f"{index_name}_disk_graph.index"
|
||||||
"",
|
partition_bin_file = self.index_dir / f"{index_name}_partition.bin"
|
||||||
)
|
|
||||||
|
partition_prefix = ""
|
||||||
|
if partition_graph_file.exists() and partition_bin_file.exists():
|
||||||
|
# C++ expects full path prefix, not just filename
|
||||||
|
partition_prefix = str(self.index_dir / index_name) # /path/to/simple_test
|
||||||
|
logger.info(
|
||||||
|
f"✅ Detected partition files, using partition_prefix='{partition_prefix}'"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.debug("No partition files detected, using standard index files")
|
||||||
|
|
||||||
|
self._init_params = {
|
||||||
|
"metric_enum": metric_enum,
|
||||||
|
"full_index_prefix": full_index_prefix,
|
||||||
|
"num_threads": self.num_threads,
|
||||||
|
"num_nodes_to_cache": kwargs.get("num_nodes_to_cache", 0),
|
||||||
|
"cache_mechanism": 1,
|
||||||
|
"pq_prefix": "",
|
||||||
|
"partition_prefix": partition_prefix,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Log partition configuration for debugging
|
||||||
|
if partition_prefix:
|
||||||
|
logger.info(
|
||||||
|
f"✅ Detected partition files, using partition_prefix='{partition_prefix}'"
|
||||||
|
)
|
||||||
|
self._diskannpy = diskannpy
|
||||||
|
self._current_zmq_port = None
|
||||||
|
self._index = None
|
||||||
|
logger.debug("DiskANN searcher initialized (index will be loaded on first search)")
|
||||||
|
|
||||||
|
def _ensure_index_loaded(self, zmq_port: int):
|
||||||
|
"""Ensure the index is loaded with the correct zmq_port."""
|
||||||
|
if self._index is None or self._current_zmq_port != zmq_port:
|
||||||
|
# Need to (re)load the index with the correct zmq_port
|
||||||
|
with suppress_cpp_output_if_needed():
|
||||||
|
if self._index is not None:
|
||||||
|
logger.debug(f"Reloading DiskANN index with new zmq_port: {zmq_port}")
|
||||||
|
else:
|
||||||
|
logger.debug(f"Loading DiskANN index with zmq_port: {zmq_port}")
|
||||||
|
|
||||||
|
self._index = self._diskannpy.StaticDiskFloatIndex(
|
||||||
|
self._init_params["metric_enum"],
|
||||||
|
self._init_params["full_index_prefix"],
|
||||||
|
self._init_params["num_threads"],
|
||||||
|
self._init_params["num_nodes_to_cache"],
|
||||||
|
self._init_params["cache_mechanism"],
|
||||||
|
zmq_port,
|
||||||
|
self._init_params["pq_prefix"],
|
||||||
|
self._init_params["partition_prefix"],
|
||||||
|
)
|
||||||
|
self._current_zmq_port = zmq_port
|
||||||
|
|
||||||
def search(
|
def search(
|
||||||
self,
|
self,
|
||||||
@@ -190,7 +393,7 @@ class DiskannSearcher(BaseSearcher):
|
|||||||
batch_recompute: bool = False,
|
batch_recompute: bool = False,
|
||||||
dedup_node_dis: bool = False,
|
dedup_node_dis: bool = False,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> Dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Search for nearest neighbors using DiskANN index.
|
Search for nearest neighbors using DiskANN index.
|
||||||
|
|
||||||
@@ -213,18 +416,15 @@ class DiskannSearcher(BaseSearcher):
|
|||||||
Returns:
|
Returns:
|
||||||
Dict with 'labels' (list of lists) and 'distances' (ndarray)
|
Dict with 'labels' (list of lists) and 'distances' (ndarray)
|
||||||
"""
|
"""
|
||||||
# Handle zmq_port compatibility: DiskANN can now update port at runtime
|
# Handle zmq_port compatibility: Ensure index is loaded with correct port
|
||||||
if recompute_embeddings:
|
if recompute_embeddings:
|
||||||
if zmq_port is None:
|
if zmq_port is None:
|
||||||
raise ValueError(
|
raise ValueError("zmq_port must be provided if recompute_embeddings is True")
|
||||||
"zmq_port must be provided if recompute_embeddings is True"
|
self._ensure_index_loaded(zmq_port)
|
||||||
)
|
else:
|
||||||
current_port = self._index.get_zmq_port()
|
# If not recomputing, we still need an index, use a default port
|
||||||
if zmq_port != current_port:
|
if self._index is None:
|
||||||
logger.debug(
|
self._ensure_index_loaded(6666) # Default port when not recomputing
|
||||||
f"Updating DiskANN zmq_port from {current_port} to {zmq_port}"
|
|
||||||
)
|
|
||||||
self._index.set_zmq_port(zmq_port)
|
|
||||||
|
|
||||||
# DiskANN doesn't support "proportional" strategy
|
# DiskANN doesn't support "proportional" strategy
|
||||||
if pruning_strategy == "proportional":
|
if pruning_strategy == "proportional":
|
||||||
@@ -241,7 +441,14 @@ class DiskannSearcher(BaseSearcher):
|
|||||||
else: # "global"
|
else: # "global"
|
||||||
use_global_pruning = True
|
use_global_pruning = True
|
||||||
|
|
||||||
# Perform search with suppressed C++ output based on log level
|
# Strategy:
|
||||||
|
# - Traversal always uses PQ distances
|
||||||
|
# - If recompute_embeddings=True, do a single final rerank via deferred fetch
|
||||||
|
# (fetch embeddings for the final candidate set only)
|
||||||
|
# - Do not recompute neighbor distances along the path
|
||||||
|
use_deferred_fetch = True if recompute_embeddings else False
|
||||||
|
recompute_neighors = False # Expected typo. For backward compatibility.
|
||||||
|
|
||||||
with suppress_cpp_output_if_needed():
|
with suppress_cpp_output_if_needed():
|
||||||
labels, distances = self._index.batch_search(
|
labels, distances = self._index.batch_search(
|
||||||
query,
|
query,
|
||||||
@@ -250,17 +457,15 @@ class DiskannSearcher(BaseSearcher):
|
|||||||
complexity,
|
complexity,
|
||||||
beam_width,
|
beam_width,
|
||||||
self.num_threads,
|
self.num_threads,
|
||||||
kwargs.get("USE_DEFERRED_FETCH", False),
|
use_deferred_fetch,
|
||||||
kwargs.get("skip_search_reorder", False),
|
kwargs.get("skip_search_reorder", False),
|
||||||
recompute_embeddings,
|
recompute_neighors,
|
||||||
dedup_node_dis,
|
dedup_node_dis,
|
||||||
prune_ratio,
|
prune_ratio,
|
||||||
batch_recompute,
|
batch_recompute,
|
||||||
use_global_pruning,
|
use_global_pruning,
|
||||||
)
|
)
|
||||||
|
|
||||||
string_labels = [
|
string_labels = [[str(int_label) for int_label in batch_labels] for batch_labels in labels]
|
||||||
[str(int_label) for int_label in batch_labels] for batch_labels in labels
|
|
||||||
]
|
|
||||||
|
|
||||||
return {"labels": string_labels, "distances": distances}
|
return {"labels": string_labels, "distances": distances}
|
||||||
|
|||||||
@@ -3,16 +3,17 @@ DiskANN-specific embedding server
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
import os
|
|
||||||
import zmq
|
|
||||||
import numpy as np
|
|
||||||
import json
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
import sys
|
|
||||||
import logging
|
import numpy as np
|
||||||
|
import zmq
|
||||||
|
|
||||||
# Set up logging based on environment variable
|
# Set up logging based on environment variable
|
||||||
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
||||||
@@ -36,6 +37,7 @@ def create_diskann_embedding_server(
|
|||||||
zmq_port: int = 5555,
|
zmq_port: int = 5555,
|
||||||
model_name: str = "sentence-transformers/all-mpnet-base-v2",
|
model_name: str = "sentence-transformers/all-mpnet-base-v2",
|
||||||
embedding_mode: str = "sentence-transformers",
|
embedding_mode: str = "sentence-transformers",
|
||||||
|
distance_metric: str = "l2",
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Create and start a ZMQ-based embedding server for DiskANN backend.
|
Create and start a ZMQ-based embedding server for DiskANN backend.
|
||||||
@@ -50,8 +52,8 @@ def create_diskann_embedding_server(
|
|||||||
sys.path.insert(0, str(leann_core_path))
|
sys.path.insert(0, str(leann_core_path))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from leann.embedding_compute import compute_embeddings
|
|
||||||
from leann.api import PassageManager
|
from leann.api import PassageManager
|
||||||
|
from leann.embedding_compute import compute_embeddings
|
||||||
|
|
||||||
logger.info("Successfully imported unified embedding computation module")
|
logger.info("Successfully imported unified embedding computation module")
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
@@ -76,13 +78,12 @@ def create_diskann_embedding_server(
|
|||||||
raise ValueError("Only metadata files (.meta.json) are supported")
|
raise ValueError("Only metadata files (.meta.json) are supported")
|
||||||
|
|
||||||
# Load metadata to get passage sources
|
# Load metadata to get passage sources
|
||||||
with open(passages_file, "r") as f:
|
with open(passages_file) as f:
|
||||||
meta = json.load(f)
|
meta = json.load(f)
|
||||||
|
|
||||||
passages = PassageManager(meta["passage_sources"])
|
logger.info(f"Loading PassageManager with metadata_file_path: {passages_file}")
|
||||||
logger.info(
|
passages = PassageManager(meta["passage_sources"], metadata_file_path=passages_file)
|
||||||
f"Loaded PassageManager with {len(passages.global_offset_map)} passages from metadata"
|
logger.info(f"Loaded PassageManager with {len(passages)} passages from metadata")
|
||||||
)
|
|
||||||
|
|
||||||
# Import protobuf after ensuring the path is correct
|
# Import protobuf after ensuring the path is correct
|
||||||
try:
|
try:
|
||||||
@@ -100,8 +101,9 @@ def create_diskann_embedding_server(
|
|||||||
socket.bind(f"tcp://*:{zmq_port}")
|
socket.bind(f"tcp://*:{zmq_port}")
|
||||||
logger.info(f"DiskANN ZMQ REP server listening on port {zmq_port}")
|
logger.info(f"DiskANN ZMQ REP server listening on port {zmq_port}")
|
||||||
|
|
||||||
socket.setsockopt(zmq.RCVTIMEO, 300000)
|
socket.setsockopt(zmq.RCVTIMEO, 1000)
|
||||||
socket.setsockopt(zmq.SNDTIMEO, 300000)
|
socket.setsockopt(zmq.SNDTIMEO, 1000)
|
||||||
|
socket.setsockopt(zmq.LINGER, 0)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
@@ -150,9 +152,7 @@ def create_diskann_embedding_server(
|
|||||||
):
|
):
|
||||||
texts = request
|
texts = request
|
||||||
is_text_request = True
|
is_text_request = True
|
||||||
logger.info(
|
logger.info(f"✅ MSGPACK: Direct text request for {len(texts)} texts")
|
||||||
f"✅ MSGPACK: Direct text request for {len(texts)} texts"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
raise ValueError("Not a valid msgpack text request")
|
raise ValueError("Not a valid msgpack text request")
|
||||||
except Exception as msgpack_error:
|
except Exception as msgpack_error:
|
||||||
@@ -167,9 +167,7 @@ def create_diskann_embedding_server(
|
|||||||
passage_data = passages.get_passage(str(nid))
|
passage_data = passages.get_passage(str(nid))
|
||||||
txt = passage_data["text"]
|
txt = passage_data["text"]
|
||||||
if not txt:
|
if not txt:
|
||||||
raise RuntimeError(
|
raise RuntimeError(f"FATAL: Empty text for passage ID {nid}")
|
||||||
f"FATAL: Empty text for passage ID {nid}"
|
|
||||||
)
|
|
||||||
texts.append(txt)
|
texts.append(txt)
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
logger.error(f"Passage ID {nid} not found: {e}")
|
logger.error(f"Passage ID {nid} not found: {e}")
|
||||||
@@ -180,9 +178,7 @@ def create_diskann_embedding_server(
|
|||||||
|
|
||||||
# Debug logging
|
# Debug logging
|
||||||
logger.debug(f"Processing {len(texts)} texts")
|
logger.debug(f"Processing {len(texts)} texts")
|
||||||
logger.debug(
|
logger.debug(f"Text lengths: {[len(t) for t in texts[:5]]}") # Show first 5
|
||||||
f"Text lengths: {[len(t) for t in texts[:5]]}"
|
|
||||||
) # Show first 5
|
|
||||||
|
|
||||||
# Process embeddings using unified computation
|
# Process embeddings using unified computation
|
||||||
embeddings = compute_embeddings(texts, model_name, mode=embedding_mode)
|
embeddings = compute_embeddings(texts, model_name, mode=embedding_mode)
|
||||||
@@ -199,9 +195,7 @@ def create_diskann_embedding_server(
|
|||||||
else:
|
else:
|
||||||
# For DiskANN C++ compatibility: return protobuf format
|
# For DiskANN C++ compatibility: return protobuf format
|
||||||
resp_proto = embedding_pb2.NodeEmbeddingResponse()
|
resp_proto = embedding_pb2.NodeEmbeddingResponse()
|
||||||
hidden_contiguous = np.ascontiguousarray(
|
hidden_contiguous = np.ascontiguousarray(embeddings, dtype=np.float32)
|
||||||
embeddings, dtype=np.float32
|
|
||||||
)
|
|
||||||
|
|
||||||
# Serialize embeddings data
|
# Serialize embeddings data
|
||||||
resp_proto.embeddings_data = hidden_contiguous.tobytes()
|
resp_proto.embeddings_data = hidden_contiguous.tobytes()
|
||||||
@@ -226,30 +220,217 @@ def create_diskann_embedding_server(
|
|||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
zmq_thread = threading.Thread(target=zmq_server_thread, daemon=True)
|
def zmq_server_thread_with_shutdown(shutdown_event):
|
||||||
|
"""ZMQ server thread that respects shutdown signal.
|
||||||
|
|
||||||
|
This creates its own REP socket, binds to zmq_port, and periodically
|
||||||
|
checks shutdown_event using recv timeouts to exit cleanly.
|
||||||
|
"""
|
||||||
|
logger.info("DiskANN ZMQ server thread started with shutdown support")
|
||||||
|
|
||||||
|
context = zmq.Context()
|
||||||
|
rep_socket = context.socket(zmq.REP)
|
||||||
|
rep_socket.bind(f"tcp://*:{zmq_port}")
|
||||||
|
logger.info(f"DiskANN ZMQ REP server listening on port {zmq_port}")
|
||||||
|
|
||||||
|
# Set receive timeout so we can check shutdown_event periodically
|
||||||
|
rep_socket.setsockopt(zmq.RCVTIMEO, 1000) # 1 second timeout
|
||||||
|
rep_socket.setsockopt(zmq.SNDTIMEO, 1000)
|
||||||
|
rep_socket.setsockopt(zmq.LINGER, 0)
|
||||||
|
|
||||||
|
try:
|
||||||
|
while not shutdown_event.is_set():
|
||||||
|
try:
|
||||||
|
e2e_start = time.time()
|
||||||
|
# REP socket receives single-part messages
|
||||||
|
message = rep_socket.recv()
|
||||||
|
|
||||||
|
# Check for empty messages - REP socket requires response to every request
|
||||||
|
if not message:
|
||||||
|
logger.warning("Received empty message, sending empty response")
|
||||||
|
rep_socket.send(b"")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Try protobuf first (same logic as original)
|
||||||
|
texts = []
|
||||||
|
is_text_request = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
req_proto = embedding_pb2.NodeEmbeddingRequest()
|
||||||
|
req_proto.ParseFromString(message)
|
||||||
|
node_ids = list(req_proto.node_ids)
|
||||||
|
|
||||||
|
# Look up texts by node IDs
|
||||||
|
for nid in node_ids:
|
||||||
|
try:
|
||||||
|
passage_data = passages.get_passage(str(nid))
|
||||||
|
txt = passage_data["text"]
|
||||||
|
if not txt:
|
||||||
|
raise RuntimeError(f"FATAL: Empty text for passage ID {nid}")
|
||||||
|
texts.append(txt)
|
||||||
|
except KeyError:
|
||||||
|
raise RuntimeError(f"FATAL: Passage with ID {nid} not found")
|
||||||
|
|
||||||
|
logger.info(f"ZMQ received protobuf request for {len(node_ids)} node IDs")
|
||||||
|
except Exception:
|
||||||
|
# Fallback to msgpack for text requests
|
||||||
|
try:
|
||||||
|
import msgpack
|
||||||
|
|
||||||
|
request = msgpack.unpackb(message)
|
||||||
|
if isinstance(request, list) and all(
|
||||||
|
isinstance(item, str) for item in request
|
||||||
|
):
|
||||||
|
texts = request
|
||||||
|
is_text_request = True
|
||||||
|
logger.info(
|
||||||
|
f"ZMQ received msgpack text request for {len(texts)} texts"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError("Not a valid msgpack text request")
|
||||||
|
except Exception:
|
||||||
|
logger.error("Both protobuf and msgpack parsing failed!")
|
||||||
|
# Send error response
|
||||||
|
resp_proto = embedding_pb2.NodeEmbeddingResponse()
|
||||||
|
rep_socket.send(resp_proto.SerializeToString())
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Process the request
|
||||||
|
embeddings = compute_embeddings(texts, model_name, mode=embedding_mode)
|
||||||
|
logger.info(f"Computed embeddings shape: {embeddings.shape}")
|
||||||
|
|
||||||
|
# Validation
|
||||||
|
if np.isnan(embeddings).any() or np.isinf(embeddings).any():
|
||||||
|
logger.error("NaN or Inf detected in embeddings!")
|
||||||
|
# Send error response
|
||||||
|
if is_text_request:
|
||||||
|
import msgpack
|
||||||
|
|
||||||
|
response_data = msgpack.packb([])
|
||||||
|
else:
|
||||||
|
resp_proto = embedding_pb2.NodeEmbeddingResponse()
|
||||||
|
response_data = resp_proto.SerializeToString()
|
||||||
|
rep_socket.send(response_data)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Prepare response based on request type
|
||||||
|
if is_text_request:
|
||||||
|
# For direct text requests, return msgpack
|
||||||
|
import msgpack
|
||||||
|
|
||||||
|
response_data = msgpack.packb(embeddings.tolist())
|
||||||
|
else:
|
||||||
|
# For protobuf requests, return protobuf
|
||||||
|
resp_proto = embedding_pb2.NodeEmbeddingResponse()
|
||||||
|
hidden_contiguous = np.ascontiguousarray(embeddings, dtype=np.float32)
|
||||||
|
|
||||||
|
resp_proto.embeddings_data = hidden_contiguous.tobytes()
|
||||||
|
resp_proto.dimensions.append(hidden_contiguous.shape[0])
|
||||||
|
resp_proto.dimensions.append(hidden_contiguous.shape[1])
|
||||||
|
|
||||||
|
response_data = resp_proto.SerializeToString()
|
||||||
|
|
||||||
|
# Send response back to the client
|
||||||
|
rep_socket.send(response_data)
|
||||||
|
|
||||||
|
e2e_end = time.time()
|
||||||
|
logger.info(f"⏱️ ZMQ E2E time: {e2e_end - e2e_start:.6f}s")
|
||||||
|
|
||||||
|
except zmq.Again:
|
||||||
|
# Timeout - check shutdown_event and continue
|
||||||
|
continue
|
||||||
|
except Exception as e:
|
||||||
|
if not shutdown_event.is_set():
|
||||||
|
logger.error(f"Error in ZMQ server loop: {e}")
|
||||||
|
try:
|
||||||
|
# Send error response for REP socket
|
||||||
|
resp_proto = embedding_pb2.NodeEmbeddingResponse()
|
||||||
|
rep_socket.send(resp_proto.SerializeToString())
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
logger.info("Shutdown in progress, ignoring ZMQ error")
|
||||||
|
break
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
rep_socket.close(0)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
context.term()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
logger.info("DiskANN ZMQ server thread exiting gracefully")
|
||||||
|
|
||||||
|
# Add shutdown coordination
|
||||||
|
shutdown_event = threading.Event()
|
||||||
|
|
||||||
|
def shutdown_zmq_server():
|
||||||
|
"""Gracefully shutdown ZMQ server."""
|
||||||
|
logger.info("Initiating graceful shutdown...")
|
||||||
|
shutdown_event.set()
|
||||||
|
|
||||||
|
if zmq_thread.is_alive():
|
||||||
|
logger.info("Waiting for ZMQ thread to finish...")
|
||||||
|
zmq_thread.join(timeout=5)
|
||||||
|
if zmq_thread.is_alive():
|
||||||
|
logger.warning("ZMQ thread did not finish in time")
|
||||||
|
|
||||||
|
# Clean up ZMQ resources
|
||||||
|
try:
|
||||||
|
# Note: socket and context are cleaned up by thread exit
|
||||||
|
logger.info("ZMQ resources cleaned up")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error cleaning ZMQ resources: {e}")
|
||||||
|
|
||||||
|
# Clean up other resources
|
||||||
|
try:
|
||||||
|
import gc
|
||||||
|
|
||||||
|
gc.collect()
|
||||||
|
logger.info("Additional resources cleaned up")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error cleaning additional resources: {e}")
|
||||||
|
|
||||||
|
logger.info("Graceful shutdown completed")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
# Register signal handlers within this function scope
|
||||||
|
import signal
|
||||||
|
|
||||||
|
def signal_handler(sig, frame):
|
||||||
|
logger.info(f"Received signal {sig}, shutting down gracefully...")
|
||||||
|
shutdown_zmq_server()
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, signal_handler)
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
|
||||||
|
# Start ZMQ thread (NOT daemon!)
|
||||||
|
zmq_thread = threading.Thread(
|
||||||
|
target=lambda: zmq_server_thread_with_shutdown(shutdown_event),
|
||||||
|
daemon=False, # Not daemon - we want to wait for it
|
||||||
|
)
|
||||||
zmq_thread.start()
|
zmq_thread.start()
|
||||||
logger.info(f"Started DiskANN ZMQ server thread on port {zmq_port}")
|
logger.info(f"Started DiskANN ZMQ server thread on port {zmq_port}")
|
||||||
|
|
||||||
# Keep the main thread alive
|
# Keep the main thread alive
|
||||||
try:
|
try:
|
||||||
while True:
|
while not shutdown_event.is_set():
|
||||||
time.sleep(1)
|
time.sleep(0.1) # Check shutdown more frequently
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
logger.info("DiskANN Server shutting down...")
|
logger.info("DiskANN Server shutting down...")
|
||||||
|
shutdown_zmq_server()
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# If we reach here, shutdown was triggered by signal
|
||||||
|
logger.info("Main loop exited, process should be shutting down")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import signal
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
def signal_handler(sig, frame):
|
# Signal handlers are now registered within create_diskann_embedding_server
|
||||||
logger.info(f"Received signal {sig}, shutting down gracefully...")
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
# Register signal handlers for graceful shutdown
|
|
||||||
signal.signal(signal.SIGTERM, signal_handler)
|
|
||||||
signal.signal(signal.SIGINT, signal_handler)
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description="DiskANN Embedding service")
|
parser = argparse.ArgumentParser(description="DiskANN Embedding service")
|
||||||
parser.add_argument("--zmq-port", type=int, default=5555, help="ZMQ port to run on")
|
parser.add_argument("--zmq-port", type=int, default=5555, help="ZMQ port to run on")
|
||||||
@@ -268,9 +449,16 @@ if __name__ == "__main__":
|
|||||||
"--embedding-mode",
|
"--embedding-mode",
|
||||||
type=str,
|
type=str,
|
||||||
default="sentence-transformers",
|
default="sentence-transformers",
|
||||||
choices=["sentence-transformers", "openai", "mlx"],
|
choices=["sentence-transformers", "openai", "mlx", "ollama"],
|
||||||
help="Embedding backend mode",
|
help="Embedding backend mode",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--distance-metric",
|
||||||
|
type=str,
|
||||||
|
default="l2",
|
||||||
|
choices=["l2", "mips", "cosine"],
|
||||||
|
help="Distance metric for similarity computation",
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
@@ -280,4 +468,5 @@ if __name__ == "__main__":
|
|||||||
zmq_port=args.zmq_port,
|
zmq_port=args.zmq_port,
|
||||||
model_name=args.model_name,
|
model_name=args.model_name,
|
||||||
embedding_mode=args.embedding_mode,
|
embedding_mode=args.embedding_mode,
|
||||||
|
distance_metric=args.distance_metric,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,27 +1,28 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||||
# source: embedding.proto
|
# source: embedding.proto
|
||||||
|
# ruff: noqa
|
||||||
"""Generated protocol buffer code."""
|
"""Generated protocol buffer code."""
|
||||||
from google.protobuf.internal import builder as _builder
|
|
||||||
from google.protobuf import descriptor as _descriptor
|
from google.protobuf import descriptor as _descriptor
|
||||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||||
from google.protobuf import symbol_database as _symbol_database
|
from google.protobuf import symbol_database as _symbol_database
|
||||||
|
from google.protobuf.internal import builder as _builder
|
||||||
|
|
||||||
# @@protoc_insertion_point(imports)
|
# @@protoc_insertion_point(imports)
|
||||||
|
|
||||||
_sym_db = _symbol_database.Default()
|
_sym_db = _symbol_database.Default()
|
||||||
|
|
||||||
|
|
||||||
|
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
|
||||||
|
b'\n\x0f\x65mbedding.proto\x12\x0eprotoembedding"(\n\x14NodeEmbeddingRequest\x12\x10\n\x08node_ids\x18\x01 \x03(\r"Y\n\x15NodeEmbeddingResponse\x12\x17\n\x0f\x65mbeddings_data\x18\x01 \x01(\x0c\x12\x12\n\ndimensions\x18\x02 \x03(\x05\x12\x13\n\x0bmissing_ids\x18\x03 \x03(\rb\x06proto3'
|
||||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0f\x65mbedding.proto\x12\x0eprotoembedding\"(\n\x14NodeEmbeddingRequest\x12\x10\n\x08node_ids\x18\x01 \x03(\r\"Y\n\x15NodeEmbeddingResponse\x12\x17\n\x0f\x65mbeddings_data\x18\x01 \x01(\x0c\x12\x12\n\ndimensions\x18\x02 \x03(\x05\x12\x13\n\x0bmissing_ids\x18\x03 \x03(\rb\x06proto3')
|
)
|
||||||
|
|
||||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'embedding_pb2', globals())
|
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "embedding_pb2", globals())
|
||||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
if not _descriptor._USE_C_DESCRIPTORS:
|
||||||
|
DESCRIPTOR._options = None
|
||||||
DESCRIPTOR._options = None
|
_NODEEMBEDDINGREQUEST._serialized_start = 35
|
||||||
_NODEEMBEDDINGREQUEST._serialized_start=35
|
_NODEEMBEDDINGREQUEST._serialized_end = 75
|
||||||
_NODEEMBEDDINGREQUEST._serialized_end=75
|
_NODEEMBEDDINGRESPONSE._serialized_start = 77
|
||||||
_NODEEMBEDDINGRESPONSE._serialized_start=77
|
_NODEEMBEDDINGRESPONSE._serialized_end = 166
|
||||||
_NODEEMBEDDINGRESPONSE._serialized_end=166
|
|
||||||
# @@protoc_insertion_point(module_scope)
|
# @@protoc_insertion_point(module_scope)
|
||||||
|
|||||||
@@ -0,0 +1,299 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Graph Partition Module for LEANN DiskANN Backend
|
||||||
|
|
||||||
|
This module provides Python bindings for the graph partition functionality
|
||||||
|
of DiskANN, allowing users to partition disk-based indices for better
|
||||||
|
performance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
class GraphPartitioner:
|
||||||
|
"""
|
||||||
|
A Python interface for DiskANN's graph partition functionality.
|
||||||
|
|
||||||
|
This class provides methods to partition disk-based indices for improved
|
||||||
|
search performance and memory efficiency.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, build_type: str = "release"):
|
||||||
|
"""
|
||||||
|
Initialize the GraphPartitioner.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
build_type: Build type for the executables ("debug" or "release")
|
||||||
|
"""
|
||||||
|
self.build_type = build_type
|
||||||
|
self._ensure_executables()
|
||||||
|
|
||||||
|
def _get_executable_path(self, name: str) -> str:
|
||||||
|
"""Get the path to a graph partition executable."""
|
||||||
|
# Get the directory where this Python module is located
|
||||||
|
module_dir = Path(__file__).parent
|
||||||
|
# Navigate to the graph_partition directory
|
||||||
|
graph_partition_dir = module_dir.parent / "third_party" / "DiskANN" / "graph_partition"
|
||||||
|
executable_path = graph_partition_dir / "build" / self.build_type / "graph_partition" / name
|
||||||
|
|
||||||
|
if not executable_path.exists():
|
||||||
|
raise FileNotFoundError(f"Executable {name} not found at {executable_path}")
|
||||||
|
|
||||||
|
return str(executable_path)
|
||||||
|
|
||||||
|
def _ensure_executables(self):
|
||||||
|
"""Ensure that the required executables are built."""
|
||||||
|
try:
|
||||||
|
self._get_executable_path("partitioner")
|
||||||
|
self._get_executable_path("index_relayout")
|
||||||
|
except FileNotFoundError:
|
||||||
|
# Try to build the executables automatically
|
||||||
|
print("Executables not found, attempting to build them...")
|
||||||
|
self._build_executables()
|
||||||
|
|
||||||
|
def _build_executables(self):
|
||||||
|
"""Build the required executables."""
|
||||||
|
graph_partition_dir = (
|
||||||
|
Path(__file__).parent.parent / "third_party" / "DiskANN" / "graph_partition"
|
||||||
|
)
|
||||||
|
original_dir = os.getcwd()
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.chdir(graph_partition_dir)
|
||||||
|
|
||||||
|
# Clean any existing build
|
||||||
|
if (graph_partition_dir / "build").exists():
|
||||||
|
shutil.rmtree(graph_partition_dir / "build")
|
||||||
|
|
||||||
|
# Run the build script
|
||||||
|
cmd = ["./build.sh", self.build_type, "split_graph", "/tmp/dummy"]
|
||||||
|
subprocess.run(cmd, capture_output=True, text=True, cwd=graph_partition_dir)
|
||||||
|
|
||||||
|
# Check if executables were created
|
||||||
|
partitioner_path = self._get_executable_path("partitioner")
|
||||||
|
relayout_path = self._get_executable_path("index_relayout")
|
||||||
|
|
||||||
|
print(f"✅ Built partitioner: {partitioner_path}")
|
||||||
|
print(f"✅ Built index_relayout: {relayout_path}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise RuntimeError(f"Failed to build executables: {e}")
|
||||||
|
finally:
|
||||||
|
os.chdir(original_dir)
|
||||||
|
|
||||||
|
def partition_graph(
|
||||||
|
self,
|
||||||
|
index_prefix_path: str,
|
||||||
|
output_dir: Optional[str] = None,
|
||||||
|
partition_prefix: Optional[str] = None,
|
||||||
|
**kwargs,
|
||||||
|
) -> tuple[str, str]:
|
||||||
|
"""
|
||||||
|
Partition a disk-based index for improved performance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
index_prefix_path: Path to the index prefix (e.g., "/path/to/index")
|
||||||
|
output_dir: Output directory for results (defaults to parent of index_prefix_path)
|
||||||
|
partition_prefix: Prefix for output files (defaults to basename of index_prefix_path)
|
||||||
|
**kwargs: Additional parameters for graph partitioning:
|
||||||
|
- gp_times: Number of LDG partition iterations (default: 10)
|
||||||
|
- lock_nums: Number of lock nodes (default: 10)
|
||||||
|
- cut: Cut adjacency list degree (default: 100)
|
||||||
|
- scale_factor: Scale factor (default: 1)
|
||||||
|
- data_type: Data type (default: "float")
|
||||||
|
- thread_nums: Number of threads (default: 10)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (disk_graph_index_path, partition_bin_path)
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError: If the partitioning process fails
|
||||||
|
"""
|
||||||
|
# Set default parameters
|
||||||
|
params = {
|
||||||
|
"gp_times": 10,
|
||||||
|
"lock_nums": 10,
|
||||||
|
"cut": 100,
|
||||||
|
"scale_factor": 1,
|
||||||
|
"data_type": "float",
|
||||||
|
"thread_nums": 10,
|
||||||
|
**kwargs,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Determine output directory
|
||||||
|
if output_dir is None:
|
||||||
|
output_dir = str(Path(index_prefix_path).parent)
|
||||||
|
|
||||||
|
# Create output directory if it doesn't exist
|
||||||
|
Path(output_dir).mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Determine partition prefix
|
||||||
|
if partition_prefix is None:
|
||||||
|
partition_prefix = Path(index_prefix_path).name
|
||||||
|
|
||||||
|
# Get executable paths
|
||||||
|
partitioner_path = self._get_executable_path("partitioner")
|
||||||
|
relayout_path = self._get_executable_path("index_relayout")
|
||||||
|
|
||||||
|
# Create temporary directory for processing
|
||||||
|
with tempfile.TemporaryDirectory() as temp_dir:
|
||||||
|
# Change to the graph_partition directory for temporary files
|
||||||
|
graph_partition_dir = (
|
||||||
|
Path(__file__).parent.parent / "third_party" / "DiskANN" / "graph_partition"
|
||||||
|
)
|
||||||
|
original_dir = os.getcwd()
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.chdir(graph_partition_dir)
|
||||||
|
|
||||||
|
# Create temporary data directory
|
||||||
|
temp_data_dir = Path(temp_dir) / "data"
|
||||||
|
temp_data_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Set up paths for temporary files
|
||||||
|
graph_path = temp_data_dir / "starling" / "_M_R_L_B" / "GRAPH"
|
||||||
|
graph_gp_path = (
|
||||||
|
graph_path
|
||||||
|
/ f"GP_TIMES_{params['gp_times']}_LOCK_{params['lock_nums']}_GP_USE_FREQ0_CUT{params['cut']}_SCALE{params['scale_factor']}"
|
||||||
|
)
|
||||||
|
graph_gp_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Find input index file
|
||||||
|
old_index_file = f"{index_prefix_path}_disk_beam_search.index"
|
||||||
|
if not os.path.exists(old_index_file):
|
||||||
|
old_index_file = f"{index_prefix_path}_disk.index"
|
||||||
|
|
||||||
|
if not os.path.exists(old_index_file):
|
||||||
|
raise RuntimeError(f"Index file not found: {old_index_file}")
|
||||||
|
|
||||||
|
# Run partitioner
|
||||||
|
gp_file_path = graph_gp_path / "_part.bin"
|
||||||
|
partitioner_cmd = [
|
||||||
|
partitioner_path,
|
||||||
|
"--index_file",
|
||||||
|
old_index_file,
|
||||||
|
"--data_type",
|
||||||
|
params["data_type"],
|
||||||
|
"--gp_file",
|
||||||
|
str(gp_file_path),
|
||||||
|
"-T",
|
||||||
|
str(params["thread_nums"]),
|
||||||
|
"--ldg_times",
|
||||||
|
str(params["gp_times"]),
|
||||||
|
"--scale",
|
||||||
|
str(params["scale_factor"]),
|
||||||
|
"--mode",
|
||||||
|
"1",
|
||||||
|
]
|
||||||
|
|
||||||
|
print(f"Running partitioner: {' '.join(partitioner_cmd)}")
|
||||||
|
result = subprocess.run(
|
||||||
|
partitioner_cmd, capture_output=True, text=True, cwd=graph_partition_dir
|
||||||
|
)
|
||||||
|
|
||||||
|
if result.returncode != 0:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Partitioner failed with return code {result.returncode}.\n"
|
||||||
|
f"stdout: {result.stdout}\n"
|
||||||
|
f"stderr: {result.stderr}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run relayout
|
||||||
|
part_tmp_index = graph_gp_path / "_part_tmp.index"
|
||||||
|
relayout_cmd = [
|
||||||
|
relayout_path,
|
||||||
|
old_index_file,
|
||||||
|
str(gp_file_path),
|
||||||
|
params["data_type"],
|
||||||
|
"1",
|
||||||
|
]
|
||||||
|
|
||||||
|
print(f"Running relayout: {' '.join(relayout_cmd)}")
|
||||||
|
result = subprocess.run(
|
||||||
|
relayout_cmd, capture_output=True, text=True, cwd=graph_partition_dir
|
||||||
|
)
|
||||||
|
|
||||||
|
if result.returncode != 0:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Relayout failed with return code {result.returncode}.\n"
|
||||||
|
f"stdout: {result.stdout}\n"
|
||||||
|
f"stderr: {result.stderr}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Copy results to output directory
|
||||||
|
disk_graph_path = Path(output_dir) / f"{partition_prefix}_disk_graph.index"
|
||||||
|
partition_bin_path = Path(output_dir) / f"{partition_prefix}_partition.bin"
|
||||||
|
|
||||||
|
shutil.copy2(part_tmp_index, disk_graph_path)
|
||||||
|
shutil.copy2(gp_file_path, partition_bin_path)
|
||||||
|
|
||||||
|
print(f"Results copied to: {output_dir}")
|
||||||
|
return str(disk_graph_path), str(partition_bin_path)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
os.chdir(original_dir)
|
||||||
|
|
||||||
|
def get_partition_info(self, partition_bin_path: str) -> dict:
|
||||||
|
"""
|
||||||
|
Get information about a partition file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
partition_bin_path: Path to the partition binary file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary containing partition information
|
||||||
|
"""
|
||||||
|
if not os.path.exists(partition_bin_path):
|
||||||
|
raise FileNotFoundError(f"Partition file not found: {partition_bin_path}")
|
||||||
|
|
||||||
|
# For now, return basic file information
|
||||||
|
# In the future, this could parse the binary file for detailed info
|
||||||
|
stat = os.stat(partition_bin_path)
|
||||||
|
return {
|
||||||
|
"file_size": stat.st_size,
|
||||||
|
"file_path": partition_bin_path,
|
||||||
|
"modified_time": stat.st_mtime,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def partition_graph(
|
||||||
|
index_prefix_path: str,
|
||||||
|
output_dir: Optional[str] = None,
|
||||||
|
partition_prefix: Optional[str] = None,
|
||||||
|
build_type: str = "release",
|
||||||
|
**kwargs,
|
||||||
|
) -> tuple[str, str]:
|
||||||
|
"""
|
||||||
|
Convenience function to partition a graph index.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
index_prefix_path: Path to the index prefix
|
||||||
|
output_dir: Output directory (defaults to parent of index_prefix_path)
|
||||||
|
partition_prefix: Prefix for output files (defaults to basename of index_prefix_path)
|
||||||
|
build_type: Build type for executables ("debug" or "release")
|
||||||
|
**kwargs: Additional parameters for graph partitioning
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (disk_graph_index_path, partition_bin_path)
|
||||||
|
"""
|
||||||
|
partitioner = GraphPartitioner(build_type=build_type)
|
||||||
|
return partitioner.partition_graph(index_prefix_path, output_dir, partition_prefix, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
# Example usage:
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Example: partition an index
|
||||||
|
try:
|
||||||
|
disk_graph_path, partition_bin_path = partition_graph(
|
||||||
|
"/path/to/your/index_prefix", gp_times=10, lock_nums=10, cut=100
|
||||||
|
)
|
||||||
|
print("Partitioning completed successfully!")
|
||||||
|
print(f"Disk graph index: {disk_graph_path}")
|
||||||
|
print(f"Partition binary: {partition_bin_path}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Partitioning failed: {e}")
|
||||||
@@ -4,8 +4,8 @@ build-backend = "scikit_build_core.build"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "leann-backend-diskann"
|
name = "leann-backend-diskann"
|
||||||
version = "0.1.0"
|
version = "0.3.2"
|
||||||
dependencies = ["leann-core==0.1.0", "numpy"]
|
dependencies = ["leann-core==0.3.2", "numpy", "protobuf>=3.19.0"]
|
||||||
|
|
||||||
[tool.scikit-build]
|
[tool.scikit-build]
|
||||||
# Key: simplified CMake path
|
# Key: simplified CMake path
|
||||||
@@ -17,3 +17,5 @@ editable.mode = "redirect"
|
|||||||
cmake.build-type = "Release"
|
cmake.build-type = "Release"
|
||||||
build.verbose = true
|
build.verbose = true
|
||||||
build.tool-args = ["-j8"]
|
build.tool-args = ["-j8"]
|
||||||
|
# Let CMake find packages via Homebrew prefix
|
||||||
|
cmake.define = {CMAKE_PREFIX_PATH = {env = "CMAKE_PREFIX_PATH"}, OpenMP_ROOT = {env = "OpenMP_ROOT"}}
|
||||||
|
|||||||
Submodule packages/leann-backend-diskann/third_party/DiskANN updated: af2a26481e...c593831474
@@ -5,11 +5,28 @@ set(CMAKE_CXX_COMPILER_WORKS 1)
|
|||||||
|
|
||||||
# Set OpenMP path for macOS
|
# Set OpenMP path for macOS
|
||||||
if(APPLE)
|
if(APPLE)
|
||||||
set(OpenMP_C_FLAGS "-Xpreprocessor -fopenmp -I/opt/homebrew/opt/libomp/include")
|
# Detect Homebrew installation path (Apple Silicon vs Intel)
|
||||||
set(OpenMP_CXX_FLAGS "-Xpreprocessor -fopenmp -I/opt/homebrew/opt/libomp/include")
|
if(EXISTS "/opt/homebrew/opt/libomp")
|
||||||
|
set(HOMEBREW_PREFIX "/opt/homebrew")
|
||||||
|
elseif(EXISTS "/usr/local/opt/libomp")
|
||||||
|
set(HOMEBREW_PREFIX "/usr/local")
|
||||||
|
else()
|
||||||
|
message(FATAL_ERROR "Could not find libomp installation. Please install with: brew install libomp")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(OpenMP_C_FLAGS "-Xpreprocessor -fopenmp -I${HOMEBREW_PREFIX}/opt/libomp/include")
|
||||||
|
set(OpenMP_CXX_FLAGS "-Xpreprocessor -fopenmp -I${HOMEBREW_PREFIX}/opt/libomp/include")
|
||||||
set(OpenMP_C_LIB_NAMES "omp")
|
set(OpenMP_C_LIB_NAMES "omp")
|
||||||
set(OpenMP_CXX_LIB_NAMES "omp")
|
set(OpenMP_CXX_LIB_NAMES "omp")
|
||||||
set(OpenMP_omp_LIBRARY "/opt/homebrew/opt/libomp/lib/libomp.dylib")
|
set(OpenMP_omp_LIBRARY "${HOMEBREW_PREFIX}/opt/libomp/lib/libomp.dylib")
|
||||||
|
|
||||||
|
# Force use of system libc++ to avoid version mismatch
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||||
|
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stdlib=libc++")
|
||||||
|
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -stdlib=libc++")
|
||||||
|
|
||||||
|
# Set minimum macOS version for better compatibility
|
||||||
|
set(CMAKE_OSX_DEPLOYMENT_TARGET "11.0" CACHE STRING "Minimum macOS version")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Use system ZeroMQ instead of building from source
|
# Use system ZeroMQ instead of building from source
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
from . import hnsw_backend
|
from . import hnsw_backend as hnsw_backend
|
||||||
|
|||||||
@@ -1,87 +1,122 @@
|
|||||||
|
import argparse
|
||||||
|
import gc # Import garbage collector interface
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
import struct
|
import struct
|
||||||
import sys
|
import sys
|
||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
import argparse
|
|
||||||
import gc # Import garbage collector interface
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
# Set up logging to avoid print buffer issues
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
||||||
|
log_level = getattr(logging, LOG_LEVEL, logging.WARNING)
|
||||||
|
logger.setLevel(log_level)
|
||||||
|
|
||||||
# --- FourCCs (add more if needed) ---
|
# --- FourCCs (add more if needed) ---
|
||||||
INDEX_HNSW_FLAT_FOURCC = int.from_bytes(b'IHNf', 'little')
|
INDEX_HNSW_FLAT_FOURCC = int.from_bytes(b"IHNf", "little")
|
||||||
# Add other HNSW fourccs if you expect different storage types inside HNSW
|
# Add other HNSW fourccs if you expect different storage types inside HNSW
|
||||||
# INDEX_HNSW_PQ_FOURCC = int.from_bytes(b'IHNp', 'little')
|
# INDEX_HNSW_PQ_FOURCC = int.from_bytes(b'IHNp', 'little')
|
||||||
# INDEX_HNSW_SQ_FOURCC = int.from_bytes(b'IHNs', 'little')
|
# INDEX_HNSW_SQ_FOURCC = int.from_bytes(b'IHNs', 'little')
|
||||||
# INDEX_HNSW_CAGRA_FOURCC = int.from_bytes(b'IHNc', 'little') # Example
|
# INDEX_HNSW_CAGRA_FOURCC = int.from_bytes(b'IHNc', 'little') # Example
|
||||||
|
|
||||||
EXPECTED_HNSW_FOURCCS = {INDEX_HNSW_FLAT_FOURCC} # Modify if needed
|
EXPECTED_HNSW_FOURCCS = {INDEX_HNSW_FLAT_FOURCC} # Modify if needed
|
||||||
NULL_INDEX_FOURCC = int.from_bytes(b'null', 'little')
|
NULL_INDEX_FOURCC = int.from_bytes(b"null", "little")
|
||||||
|
|
||||||
# --- Helper functions for reading/writing binary data ---
|
# --- Helper functions for reading/writing binary data ---
|
||||||
|
|
||||||
|
|
||||||
def read_struct(f, fmt):
|
def read_struct(f, fmt):
|
||||||
"""Reads data according to the struct format."""
|
"""Reads data according to the struct format."""
|
||||||
size = struct.calcsize(fmt)
|
size = struct.calcsize(fmt)
|
||||||
data = f.read(size)
|
data = f.read(size)
|
||||||
if len(data) != size:
|
if len(data) != size:
|
||||||
raise EOFError(f"File ended unexpectedly reading struct fmt '{fmt}'. Expected {size} bytes, got {len(data)}.")
|
raise EOFError(
|
||||||
|
f"File ended unexpectedly reading struct fmt '{fmt}'. Expected {size} bytes, got {len(data)}."
|
||||||
|
)
|
||||||
return struct.unpack(fmt, data)[0]
|
return struct.unpack(fmt, data)[0]
|
||||||
|
|
||||||
|
|
||||||
def read_vector_raw(f, element_fmt_char):
|
def read_vector_raw(f, element_fmt_char):
|
||||||
"""Reads a vector (size followed by data), returns count and raw bytes."""
|
"""Reads a vector (size followed by data), returns count and raw bytes."""
|
||||||
count = -1 # Initialize count
|
count = -1 # Initialize count
|
||||||
total_bytes = -1 # Initialize total_bytes
|
total_bytes = -1 # Initialize total_bytes
|
||||||
try:
|
try:
|
||||||
count = read_struct(f, '<Q') # size_t usually 64-bit unsigned
|
count = read_struct(f, "<Q") # size_t usually 64-bit unsigned
|
||||||
element_size = struct.calcsize(element_fmt_char)
|
element_size = struct.calcsize(element_fmt_char)
|
||||||
# --- FIX for MemoryError: Check for unreasonably large count ---
|
# --- FIX for MemoryError: Check for unreasonably large count ---
|
||||||
max_reasonable_count = 10 * (10**9) # ~10 billion elements limit
|
max_reasonable_count = 10 * (10**9) # ~10 billion elements limit
|
||||||
if count > max_reasonable_count or count < 0:
|
if count > max_reasonable_count or count < 0:
|
||||||
raise MemoryError(f"Vector count {count} seems unreasonably large, possibly due to file corruption or incorrect format read.")
|
raise MemoryError(
|
||||||
|
f"Vector count {count} seems unreasonably large, possibly due to file corruption or incorrect format read."
|
||||||
|
)
|
||||||
|
|
||||||
total_bytes = count * element_size
|
total_bytes = count * element_size
|
||||||
# --- FIX for MemoryError: Check for huge byte size before allocation ---
|
# --- FIX for MemoryError: Check for huge byte size before allocation ---
|
||||||
max_reasonable_bytes = 50 * (1024**3) # ~50 GB limit
|
max_reasonable_bytes = 50 * (1024**3) # ~50 GB limit
|
||||||
if total_bytes > max_reasonable_bytes or total_bytes < 0: # Check for overflow
|
if total_bytes > max_reasonable_bytes or total_bytes < 0: # Check for overflow
|
||||||
raise MemoryError(f"Attempting to read {total_bytes} bytes ({count} elements * {element_size} bytes/element), which exceeds the safety limit. File might be corrupted or format mismatch.")
|
raise MemoryError(
|
||||||
|
f"Attempting to read {total_bytes} bytes ({count} elements * {element_size} bytes/element), which exceeds the safety limit. File might be corrupted or format mismatch."
|
||||||
|
)
|
||||||
|
|
||||||
data_bytes = f.read(total_bytes)
|
data_bytes = f.read(total_bytes)
|
||||||
|
|
||||||
if len(data_bytes) != total_bytes:
|
if len(data_bytes) != total_bytes:
|
||||||
raise EOFError(f"File ended unexpectedly reading vector data. Expected {total_bytes} bytes, got {len(data_bytes)}.")
|
raise EOFError(
|
||||||
|
f"File ended unexpectedly reading vector data. Expected {total_bytes} bytes, got {len(data_bytes)}."
|
||||||
|
)
|
||||||
return count, data_bytes
|
return count, data_bytes
|
||||||
except (MemoryError, OverflowError) as e:
|
except (MemoryError, OverflowError) as e:
|
||||||
# Add context to the error message
|
# Add context to the error message
|
||||||
print(f"\nError during raw vector read (element_fmt='{element_fmt_char}', count={count}, total_bytes={total_bytes}): {e}", file=sys.stderr)
|
print(
|
||||||
raise e # Re-raise the original error type
|
f"\nError during raw vector read (element_fmt='{element_fmt_char}', count={count}, total_bytes={total_bytes}): {e}",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
raise e # Re-raise the original error type
|
||||||
|
|
||||||
|
|
||||||
def read_numpy_vector(f, np_dtype, struct_fmt_char):
|
def read_numpy_vector(f, np_dtype, struct_fmt_char):
|
||||||
"""Reads a vector into a NumPy array."""
|
"""Reads a vector into a NumPy array."""
|
||||||
count = -1 # Initialize count for robust error handling
|
count = -1 # Initialize count for robust error handling
|
||||||
print(f" Reading vector (dtype={np_dtype}, fmt='{struct_fmt_char}')... ", end='', flush=True)
|
print(
|
||||||
|
f" Reading vector (dtype={np_dtype}, fmt='{struct_fmt_char}')... ",
|
||||||
|
end="",
|
||||||
|
flush=True,
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
count, data_bytes = read_vector_raw(f, struct_fmt_char)
|
count, data_bytes = read_vector_raw(f, struct_fmt_char)
|
||||||
print(f"Count={count}, Bytes={len(data_bytes)}")
|
print(f"Count={count}, Bytes={len(data_bytes)}")
|
||||||
if count > 0 and len(data_bytes) > 0:
|
if count > 0 and len(data_bytes) > 0:
|
||||||
arr = np.frombuffer(data_bytes, dtype=np_dtype)
|
arr = np.frombuffer(data_bytes, dtype=np_dtype)
|
||||||
if arr.size != count:
|
if arr.size != count:
|
||||||
raise ValueError(f"Inconsistent array size after reading. Expected {count}, got {arr.size}")
|
raise ValueError(
|
||||||
|
f"Inconsistent array size after reading. Expected {count}, got {arr.size}"
|
||||||
|
)
|
||||||
return arr
|
return arr
|
||||||
elif count == 0:
|
elif count == 0:
|
||||||
return np.array([], dtype=np_dtype)
|
return np.array([], dtype=np_dtype)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Read zero bytes but count > 0.")
|
raise ValueError("Read zero bytes but count > 0.")
|
||||||
except MemoryError as e:
|
except MemoryError as e:
|
||||||
# Now count should be defined (or -1 if error was in read_struct)
|
# Now count should be defined (or -1 if error was in read_struct)
|
||||||
print(f"\nMemoryError creating NumPy array (dtype={np_dtype}, count={count}). {e}", file=sys.stderr)
|
print(
|
||||||
|
f"\nMemoryError creating NumPy array (dtype={np_dtype}, count={count}). {e}",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
raise e
|
raise e
|
||||||
except Exception as e: # Catch other potential errors like ValueError
|
except Exception as e: # Catch other potential errors like ValueError
|
||||||
print(f"\nError reading numpy vector (dtype={np_dtype}, fmt='{struct_fmt_char}', count={count}): {e}", file=sys.stderr)
|
print(
|
||||||
|
f"\nError reading numpy vector (dtype={np_dtype}, fmt='{struct_fmt_char}', count={count}): {e}",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
|
||||||
def write_numpy_vector(f, arr, struct_fmt_char):
|
def write_numpy_vector(f, arr, struct_fmt_char):
|
||||||
"""Writes a NumPy array as a vector (size followed by data)."""
|
"""Writes a NumPy array as a vector (size followed by data)."""
|
||||||
count = arr.size
|
count = arr.size
|
||||||
f.write(struct.pack('<Q', count))
|
f.write(struct.pack("<Q", count))
|
||||||
try:
|
try:
|
||||||
expected_dtype = np.dtype(struct_fmt_char)
|
expected_dtype = np.dtype(struct_fmt_char)
|
||||||
if arr.dtype != expected_dtype:
|
if arr.dtype != expected_dtype:
|
||||||
@@ -89,23 +124,30 @@ def write_numpy_vector(f, arr, struct_fmt_char):
|
|||||||
else:
|
else:
|
||||||
data_to_write = arr.tobytes()
|
data_to_write = arr.tobytes()
|
||||||
f.write(data_to_write)
|
f.write(data_to_write)
|
||||||
del data_to_write # Hint GC
|
del data_to_write # Hint GC
|
||||||
except MemoryError as e:
|
except MemoryError as e:
|
||||||
print(f"\nMemoryError converting NumPy array to bytes for writing (size={count}, dtype={arr.dtype}). {e}", file=sys.stderr)
|
print(
|
||||||
raise e
|
f"\nMemoryError converting NumPy array to bytes for writing (size={count}, dtype={arr.dtype}). {e}",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
|
||||||
def write_list_vector(f, lst, struct_fmt_char):
|
def write_list_vector(f, lst, struct_fmt_char):
|
||||||
"""Writes a Python list as a vector iteratively."""
|
"""Writes a Python list as a vector iteratively."""
|
||||||
count = len(lst)
|
count = len(lst)
|
||||||
f.write(struct.pack('<Q', count))
|
f.write(struct.pack("<Q", count))
|
||||||
fmt = '<' + struct_fmt_char
|
fmt = "<" + struct_fmt_char
|
||||||
chunk_size = 1024 * 1024
|
chunk_size = 1024 * 1024
|
||||||
element_size = struct.calcsize(fmt)
|
element_size = struct.calcsize(fmt)
|
||||||
# Allocate buffer outside the loop if possible, or handle MemoryError during allocation
|
# Allocate buffer outside the loop if possible, or handle MemoryError during allocation
|
||||||
try:
|
try:
|
||||||
buffer = bytearray(chunk_size * element_size)
|
buffer = bytearray(chunk_size * element_size)
|
||||||
except MemoryError:
|
except MemoryError:
|
||||||
print(f"MemoryError: Cannot allocate buffer for writing list vector chunk (size {chunk_size * element_size} bytes).", file=sys.stderr)
|
print(
|
||||||
|
f"MemoryError: Cannot allocate buffer for writing list vector chunk (size {chunk_size * element_size} bytes).",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
raise
|
raise
|
||||||
buffer_count = 0
|
buffer_count = 0
|
||||||
|
|
||||||
@@ -116,65 +158,79 @@ def write_list_vector(f, lst, struct_fmt_char):
|
|||||||
buffer_count += 1
|
buffer_count += 1
|
||||||
|
|
||||||
if buffer_count == chunk_size or i == count - 1:
|
if buffer_count == chunk_size or i == count - 1:
|
||||||
f.write(buffer[:buffer_count * element_size])
|
f.write(buffer[: buffer_count * element_size])
|
||||||
buffer_count = 0
|
buffer_count = 0
|
||||||
|
|
||||||
except struct.error as e:
|
except struct.error as e:
|
||||||
print(f"\nStruct packing error for item {item} at index {i} with format '{fmt}'. {e}", file=sys.stderr)
|
print(
|
||||||
|
f"\nStruct packing error for item {item} at index {i} with format '{fmt}'. {e}",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
|
||||||
def get_cum_neighbors(cum_nneighbor_per_level_np, level):
|
def get_cum_neighbors(cum_nneighbor_per_level_np, level):
|
||||||
"""Helper to get cumulative neighbors count, matching C++ logic."""
|
"""Helper to get cumulative neighbors count, matching C++ logic."""
|
||||||
if level < 0: return 0
|
if level < 0:
|
||||||
|
return 0
|
||||||
if level < len(cum_nneighbor_per_level_np):
|
if level < len(cum_nneighbor_per_level_np):
|
||||||
return cum_nneighbor_per_level_np[level]
|
return cum_nneighbor_per_level_np[level]
|
||||||
else:
|
else:
|
||||||
return cum_nneighbor_per_level_np[-1] if len(cum_nneighbor_per_level_np) > 0 else 0
|
return cum_nneighbor_per_level_np[-1] if len(cum_nneighbor_per_level_np) > 0 else 0
|
||||||
|
|
||||||
def write_compact_format(f_out, original_hnsw_data, assign_probas_np, cum_nneighbor_per_level_np,
|
|
||||||
levels_np, compact_level_ptr, compact_node_offsets_np,
|
def write_compact_format(
|
||||||
compact_neighbors_data, storage_fourcc, storage_data):
|
f_out,
|
||||||
|
original_hnsw_data,
|
||||||
|
assign_probas_np,
|
||||||
|
cum_nneighbor_per_level_np,
|
||||||
|
levels_np,
|
||||||
|
compact_level_ptr,
|
||||||
|
compact_node_offsets_np,
|
||||||
|
compact_neighbors_data,
|
||||||
|
storage_fourcc,
|
||||||
|
storage_data,
|
||||||
|
):
|
||||||
"""Write HNSW data in compact format following C++ read order exactly."""
|
"""Write HNSW data in compact format following C++ read order exactly."""
|
||||||
# Write IndexHNSW Header
|
# Write IndexHNSW Header
|
||||||
f_out.write(struct.pack('<I', original_hnsw_data['index_fourcc']))
|
f_out.write(struct.pack("<I", original_hnsw_data["index_fourcc"]))
|
||||||
f_out.write(struct.pack('<i', original_hnsw_data['d']))
|
f_out.write(struct.pack("<i", original_hnsw_data["d"]))
|
||||||
f_out.write(struct.pack('<q', original_hnsw_data['ntotal']))
|
f_out.write(struct.pack("<q", original_hnsw_data["ntotal"]))
|
||||||
f_out.write(struct.pack('<q', original_hnsw_data['dummy1']))
|
f_out.write(struct.pack("<q", original_hnsw_data["dummy1"]))
|
||||||
f_out.write(struct.pack('<q', original_hnsw_data['dummy2']))
|
f_out.write(struct.pack("<q", original_hnsw_data["dummy2"]))
|
||||||
f_out.write(struct.pack('<?', original_hnsw_data['is_trained']))
|
f_out.write(struct.pack("<?", original_hnsw_data["is_trained"]))
|
||||||
f_out.write(struct.pack('<i', original_hnsw_data['metric_type']))
|
f_out.write(struct.pack("<i", original_hnsw_data["metric_type"]))
|
||||||
if original_hnsw_data['metric_type'] > 1:
|
if original_hnsw_data["metric_type"] > 1:
|
||||||
f_out.write(struct.pack('<f', original_hnsw_data['metric_arg']))
|
f_out.write(struct.pack("<f", original_hnsw_data["metric_arg"]))
|
||||||
|
|
||||||
# Write HNSW struct parts (standard order)
|
# Write HNSW struct parts (standard order)
|
||||||
write_numpy_vector(f_out, assign_probas_np, 'd')
|
write_numpy_vector(f_out, assign_probas_np, "d")
|
||||||
write_numpy_vector(f_out, cum_nneighbor_per_level_np, 'i')
|
write_numpy_vector(f_out, cum_nneighbor_per_level_np, "i")
|
||||||
write_numpy_vector(f_out, levels_np, 'i')
|
write_numpy_vector(f_out, levels_np, "i")
|
||||||
|
|
||||||
# Write compact format flag
|
# Write compact format flag
|
||||||
f_out.write(struct.pack('<?', True)) # storage_is_compact = True
|
f_out.write(struct.pack("<?", True)) # storage_is_compact = True
|
||||||
|
|
||||||
# Write compact data in CORRECT C++ read order: level_ptr, node_offsets FIRST
|
# Write compact data in CORRECT C++ read order: level_ptr, node_offsets FIRST
|
||||||
if isinstance(compact_level_ptr, np.ndarray):
|
if isinstance(compact_level_ptr, np.ndarray):
|
||||||
write_numpy_vector(f_out, compact_level_ptr, 'Q')
|
write_numpy_vector(f_out, compact_level_ptr, "Q")
|
||||||
else:
|
else:
|
||||||
write_list_vector(f_out, compact_level_ptr, 'Q')
|
write_list_vector(f_out, compact_level_ptr, "Q")
|
||||||
|
|
||||||
write_numpy_vector(f_out, compact_node_offsets_np, 'Q')
|
write_numpy_vector(f_out, compact_node_offsets_np, "Q")
|
||||||
|
|
||||||
# Write HNSW scalar parameters
|
# Write HNSW scalar parameters
|
||||||
f_out.write(struct.pack('<i', original_hnsw_data['entry_point']))
|
f_out.write(struct.pack("<i", original_hnsw_data["entry_point"]))
|
||||||
f_out.write(struct.pack('<i', original_hnsw_data['max_level']))
|
f_out.write(struct.pack("<i", original_hnsw_data["max_level"]))
|
||||||
f_out.write(struct.pack('<i', original_hnsw_data['efConstruction']))
|
f_out.write(struct.pack("<i", original_hnsw_data["efConstruction"]))
|
||||||
f_out.write(struct.pack('<i', original_hnsw_data['efSearch']))
|
f_out.write(struct.pack("<i", original_hnsw_data["efSearch"]))
|
||||||
f_out.write(struct.pack('<i', original_hnsw_data['dummy_upper_beam']))
|
f_out.write(struct.pack("<i", original_hnsw_data["dummy_upper_beam"]))
|
||||||
|
|
||||||
# Write storage fourcc (this determines how to read what follows)
|
# Write storage fourcc (this determines how to read what follows)
|
||||||
f_out.write(struct.pack('<I', storage_fourcc))
|
f_out.write(struct.pack("<I", storage_fourcc))
|
||||||
|
|
||||||
# Write compact neighbors data AFTER storage fourcc
|
# Write compact neighbors data AFTER storage fourcc
|
||||||
write_list_vector(f_out, compact_neighbors_data, 'i')
|
write_list_vector(f_out, compact_neighbors_data, "i")
|
||||||
|
|
||||||
# Write storage data if not NULL (only after neighbors)
|
# Write storage data if not NULL (only after neighbors)
|
||||||
if storage_fourcc != NULL_INDEX_FOURCC and storage_data:
|
if storage_fourcc != NULL_INDEX_FOURCC and storage_data:
|
||||||
@@ -183,6 +239,7 @@ def write_compact_format(f_out, original_hnsw_data, assign_probas_np, cum_nneigh
|
|||||||
|
|
||||||
# --- Main Conversion Logic ---
|
# --- Main Conversion Logic ---
|
||||||
|
|
||||||
|
|
||||||
def convert_hnsw_graph_to_csr(input_filename, output_filename, prune_embeddings=True):
|
def convert_hnsw_graph_to_csr(input_filename, output_filename, prune_embeddings=True):
|
||||||
"""
|
"""
|
||||||
Converts an HNSW graph file to the CSR format.
|
Converts an HNSW graph file to the CSR format.
|
||||||
@@ -193,94 +250,120 @@ def convert_hnsw_graph_to_csr(input_filename, output_filename, prune_embeddings=
|
|||||||
output_filename: Output CSR index file
|
output_filename: Output CSR index file
|
||||||
prune_embeddings: Whether to prune embedding storage (write NULL storage marker)
|
prune_embeddings: Whether to prune embedding storage (write NULL storage marker)
|
||||||
"""
|
"""
|
||||||
|
# Keep prints simple; rely on CI runner to flush output as needed
|
||||||
|
|
||||||
print(f"Starting conversion: {input_filename} -> {output_filename}")
|
print(f"Starting conversion: {input_filename} -> {output_filename}")
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
original_hnsw_data = {}
|
original_hnsw_data = {}
|
||||||
neighbors_np = None # Initialize to allow check in finally block
|
neighbors_np = None # Initialize to allow check in finally block
|
||||||
try:
|
try:
|
||||||
with open(input_filename, 'rb') as f_in, open(output_filename, 'wb') as f_out:
|
with open(input_filename, "rb") as f_in, open(output_filename, "wb") as f_out:
|
||||||
|
|
||||||
# --- Read IndexHNSW FourCC and Header ---
|
# --- Read IndexHNSW FourCC and Header ---
|
||||||
print(f"[{time.time() - start_time:.2f}s] Reading Index HNSW header...")
|
print(f"[{time.time() - start_time:.2f}s] Reading Index HNSW header...")
|
||||||
# ... (Keep the header reading logic as before) ...
|
# ... (Keep the header reading logic as before) ...
|
||||||
hnsw_index_fourcc = read_struct(f_in, '<I')
|
hnsw_index_fourcc = read_struct(f_in, "<I")
|
||||||
if hnsw_index_fourcc not in EXPECTED_HNSW_FOURCCS:
|
if hnsw_index_fourcc not in EXPECTED_HNSW_FOURCCS:
|
||||||
print(f"Error: Expected HNSW Index FourCC ({list(EXPECTED_HNSW_FOURCCS)}), got {hnsw_index_fourcc:08x}.", file=sys.stderr)
|
print(
|
||||||
return False
|
f"Error: Expected HNSW Index FourCC ({list(EXPECTED_HNSW_FOURCCS)}), got {hnsw_index_fourcc:08x}.",
|
||||||
original_hnsw_data['index_fourcc'] = hnsw_index_fourcc
|
file=sys.stderr,
|
||||||
original_hnsw_data['d'] = read_struct(f_in, '<i')
|
)
|
||||||
original_hnsw_data['ntotal'] = read_struct(f_in, '<q')
|
return False
|
||||||
original_hnsw_data['dummy1'] = read_struct(f_in, '<q')
|
original_hnsw_data["index_fourcc"] = hnsw_index_fourcc
|
||||||
original_hnsw_data['dummy2'] = read_struct(f_in, '<q')
|
original_hnsw_data["d"] = read_struct(f_in, "<i")
|
||||||
original_hnsw_data['is_trained'] = read_struct(f_in, '?')
|
original_hnsw_data["ntotal"] = read_struct(f_in, "<q")
|
||||||
original_hnsw_data['metric_type'] = read_struct(f_in, '<i')
|
original_hnsw_data["dummy1"] = read_struct(f_in, "<q")
|
||||||
original_hnsw_data['metric_arg'] = 0.0
|
original_hnsw_data["dummy2"] = read_struct(f_in, "<q")
|
||||||
if original_hnsw_data['metric_type'] > 1:
|
original_hnsw_data["is_trained"] = read_struct(f_in, "?")
|
||||||
original_hnsw_data['metric_arg'] = read_struct(f_in, '<f')
|
original_hnsw_data["metric_type"] = read_struct(f_in, "<i")
|
||||||
print(f"[{time.time() - start_time:.2f}s] Header read: d={original_hnsw_data['d']}, ntotal={original_hnsw_data['ntotal']}")
|
original_hnsw_data["metric_arg"] = 0.0
|
||||||
|
if original_hnsw_data["metric_type"] > 1:
|
||||||
|
original_hnsw_data["metric_arg"] = read_struct(f_in, "<f")
|
||||||
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Header read: d={original_hnsw_data['d']}, ntotal={original_hnsw_data['ntotal']}"
|
||||||
|
)
|
||||||
|
|
||||||
# --- Read original HNSW struct data ---
|
# --- Read original HNSW struct data ---
|
||||||
print(f"[{time.time() - start_time:.2f}s] Reading HNSW struct vectors...")
|
print(f"[{time.time() - start_time:.2f}s] Reading HNSW struct vectors...")
|
||||||
assign_probas_np = read_numpy_vector(f_in, np.float64, 'd')
|
assign_probas_np = read_numpy_vector(f_in, np.float64, "d")
|
||||||
print(f"[{time.time() - start_time:.2f}s] Read assign_probas ({assign_probas_np.size})")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Read assign_probas ({assign_probas_np.size})"
|
||||||
|
)
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|
||||||
cum_nneighbor_per_level_np = read_numpy_vector(f_in, np.int32, 'i')
|
cum_nneighbor_per_level_np = read_numpy_vector(f_in, np.int32, "i")
|
||||||
print(f"[{time.time() - start_time:.2f}s] Read cum_nneighbor_per_level ({cum_nneighbor_per_level_np.size})")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Read cum_nneighbor_per_level ({cum_nneighbor_per_level_np.size})"
|
||||||
|
)
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|
||||||
levels_np = read_numpy_vector(f_in, np.int32, 'i')
|
levels_np = read_numpy_vector(f_in, np.int32, "i")
|
||||||
print(f"[{time.time() - start_time:.2f}s] Read levels ({levels_np.size})")
|
print(f"[{time.time() - start_time:.2f}s] Read levels ({levels_np.size})")
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|
||||||
ntotal = len(levels_np)
|
ntotal = len(levels_np)
|
||||||
if ntotal != original_hnsw_data['ntotal']:
|
if ntotal != original_hnsw_data["ntotal"]:
|
||||||
print(f"Warning: ntotal mismatch! Header says {original_hnsw_data['ntotal']}, levels vector size is {ntotal}. Using levels vector size.", file=sys.stderr)
|
print(
|
||||||
original_hnsw_data['ntotal'] = ntotal
|
f"Warning: ntotal mismatch! Header says {original_hnsw_data['ntotal']}, levels vector size is {ntotal}. Using levels vector size.",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
original_hnsw_data["ntotal"] = ntotal
|
||||||
|
|
||||||
# --- Check for compact format flag ---
|
# --- Check for compact format flag ---
|
||||||
print(f"[{time.time() - start_time:.2f}s] Probing for compact storage flag...")
|
print(f"[{time.time() - start_time:.2f}s] Probing for compact storage flag...")
|
||||||
pos_before_compact = f_in.tell()
|
pos_before_compact = f_in.tell()
|
||||||
try:
|
try:
|
||||||
is_compact_flag = read_struct(f_in, '<?')
|
is_compact_flag = read_struct(f_in, "<?")
|
||||||
print(f"[{time.time() - start_time:.2f}s] Found compact flag: {is_compact_flag}")
|
print(f"[{time.time() - start_time:.2f}s] Found compact flag: {is_compact_flag}")
|
||||||
|
|
||||||
if is_compact_flag:
|
if is_compact_flag:
|
||||||
# Input is already in compact format - read compact data
|
# Input is already in compact format - read compact data
|
||||||
print(f"[{time.time() - start_time:.2f}s] Input is already in compact format, reading compact data...")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Input is already in compact format, reading compact data..."
|
||||||
|
)
|
||||||
|
|
||||||
compact_level_ptr = read_numpy_vector(f_in, np.uint64, 'Q')
|
compact_level_ptr = read_numpy_vector(f_in, np.uint64, "Q")
|
||||||
print(f"[{time.time() - start_time:.2f}s] Read compact_level_ptr ({compact_level_ptr.size})")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Read compact_level_ptr ({compact_level_ptr.size})"
|
||||||
|
)
|
||||||
|
|
||||||
compact_node_offsets_np = read_numpy_vector(f_in, np.uint64, 'Q')
|
compact_node_offsets_np = read_numpy_vector(f_in, np.uint64, "Q")
|
||||||
print(f"[{time.time() - start_time:.2f}s] Read compact_node_offsets ({compact_node_offsets_np.size})")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Read compact_node_offsets ({compact_node_offsets_np.size})"
|
||||||
|
)
|
||||||
|
|
||||||
# Read scalar parameters
|
# Read scalar parameters
|
||||||
original_hnsw_data['entry_point'] = read_struct(f_in, '<i')
|
original_hnsw_data["entry_point"] = read_struct(f_in, "<i")
|
||||||
original_hnsw_data['max_level'] = read_struct(f_in, '<i')
|
original_hnsw_data["max_level"] = read_struct(f_in, "<i")
|
||||||
original_hnsw_data['efConstruction'] = read_struct(f_in, '<i')
|
original_hnsw_data["efConstruction"] = read_struct(f_in, "<i")
|
||||||
original_hnsw_data['efSearch'] = read_struct(f_in, '<i')
|
original_hnsw_data["efSearch"] = read_struct(f_in, "<i")
|
||||||
original_hnsw_data['dummy_upper_beam'] = read_struct(f_in, '<i')
|
original_hnsw_data["dummy_upper_beam"] = read_struct(f_in, "<i")
|
||||||
print(f"[{time.time() - start_time:.2f}s] Read scalar params (ep={original_hnsw_data['entry_point']}, max_lvl={original_hnsw_data['max_level']})")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Read scalar params (ep={original_hnsw_data['entry_point']}, max_lvl={original_hnsw_data['max_level']})"
|
||||||
|
)
|
||||||
|
|
||||||
# Read storage fourcc
|
# Read storage fourcc
|
||||||
storage_fourcc = read_struct(f_in, '<I')
|
storage_fourcc = read_struct(f_in, "<I")
|
||||||
print(f"[{time.time() - start_time:.2f}s] Found storage fourcc: {storage_fourcc:08x}")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Found storage fourcc: {storage_fourcc:08x}"
|
||||||
|
)
|
||||||
|
|
||||||
if prune_embeddings and storage_fourcc != NULL_INDEX_FOURCC:
|
if prune_embeddings and storage_fourcc != NULL_INDEX_FOURCC:
|
||||||
# Read compact neighbors data
|
# Read compact neighbors data
|
||||||
compact_neighbors_data_np = read_numpy_vector(f_in, np.int32, 'i')
|
compact_neighbors_data_np = read_numpy_vector(f_in, np.int32, "i")
|
||||||
print(f"[{time.time() - start_time:.2f}s] Read compact neighbors data ({compact_neighbors_data_np.size})")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Read compact neighbors data ({compact_neighbors_data_np.size})"
|
||||||
|
)
|
||||||
compact_neighbors_data = compact_neighbors_data_np.tolist()
|
compact_neighbors_data = compact_neighbors_data_np.tolist()
|
||||||
del compact_neighbors_data_np
|
del compact_neighbors_data_np
|
||||||
|
|
||||||
# Skip storage data and write with NULL marker
|
# Skip storage data and write with NULL marker
|
||||||
print(f"[{time.time() - start_time:.2f}s] Pruning embeddings: Writing NULL storage marker.")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Pruning embeddings: Writing NULL storage marker."
|
||||||
|
)
|
||||||
storage_fourcc = NULL_INDEX_FOURCC
|
storage_fourcc = NULL_INDEX_FOURCC
|
||||||
elif not prune_embeddings:
|
elif not prune_embeddings:
|
||||||
# Read and preserve compact neighbors and storage
|
# Read and preserve compact neighbors and storage
|
||||||
compact_neighbors_data_np = read_numpy_vector(f_in, np.int32, 'i')
|
compact_neighbors_data_np = read_numpy_vector(f_in, np.int32, "i")
|
||||||
compact_neighbors_data = compact_neighbors_data_np.tolist()
|
compact_neighbors_data = compact_neighbors_data_np.tolist()
|
||||||
del compact_neighbors_data_np
|
del compact_neighbors_data_np
|
||||||
|
|
||||||
@@ -288,16 +371,25 @@ def convert_hnsw_graph_to_csr(input_filename, output_filename, prune_embeddings=
|
|||||||
storage_data = f_in.read()
|
storage_data = f_in.read()
|
||||||
else:
|
else:
|
||||||
# Already pruned (NULL storage)
|
# Already pruned (NULL storage)
|
||||||
compact_neighbors_data_np = read_numpy_vector(f_in, np.int32, 'i')
|
compact_neighbors_data_np = read_numpy_vector(f_in, np.int32, "i")
|
||||||
compact_neighbors_data = compact_neighbors_data_np.tolist()
|
compact_neighbors_data = compact_neighbors_data_np.tolist()
|
||||||
del compact_neighbors_data_np
|
del compact_neighbors_data_np
|
||||||
storage_data = b''
|
storage_data = b""
|
||||||
|
|
||||||
# Write the updated compact format
|
# Write the updated compact format
|
||||||
print(f"[{time.time() - start_time:.2f}s] Writing updated compact format...")
|
print(f"[{time.time() - start_time:.2f}s] Writing updated compact format...")
|
||||||
write_compact_format(f_out, original_hnsw_data, assign_probas_np, cum_nneighbor_per_level_np,
|
write_compact_format(
|
||||||
levels_np, compact_level_ptr, compact_node_offsets_np,
|
f_out,
|
||||||
compact_neighbors_data, storage_fourcc, storage_data if not prune_embeddings else b'')
|
original_hnsw_data,
|
||||||
|
assign_probas_np,
|
||||||
|
cum_nneighbor_per_level_np,
|
||||||
|
levels_np,
|
||||||
|
compact_level_ptr,
|
||||||
|
compact_node_offsets_np,
|
||||||
|
compact_neighbors_data,
|
||||||
|
storage_fourcc,
|
||||||
|
storage_data if not prune_embeddings else b"",
|
||||||
|
)
|
||||||
|
|
||||||
print(f"[{time.time() - start_time:.2f}s] Conversion complete.")
|
print(f"[{time.time() - start_time:.2f}s] Conversion complete.")
|
||||||
return True
|
return True
|
||||||
@@ -305,63 +397,86 @@ def convert_hnsw_graph_to_csr(input_filename, output_filename, prune_embeddings=
|
|||||||
else:
|
else:
|
||||||
# is_compact=False, rewind and read original format
|
# is_compact=False, rewind and read original format
|
||||||
f_in.seek(pos_before_compact)
|
f_in.seek(pos_before_compact)
|
||||||
print(f"[{time.time() - start_time:.2f}s] Compact flag is False, reading original format...")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Compact flag is False, reading original format..."
|
||||||
|
)
|
||||||
|
|
||||||
except EOFError:
|
except EOFError:
|
||||||
# No compact flag found, assume original format
|
# No compact flag found, assume original format
|
||||||
f_in.seek(pos_before_compact)
|
f_in.seek(pos_before_compact)
|
||||||
print(f"[{time.time() - start_time:.2f}s] No compact flag found, assuming original format...")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] No compact flag found, assuming original format..."
|
||||||
|
)
|
||||||
|
|
||||||
# --- Handle potential extra byte in original format (like C++ code) ---
|
# --- Handle potential extra byte in original format (like C++ code) ---
|
||||||
print(f"[{time.time() - start_time:.2f}s] Probing for potential extra byte before non-compact offsets...")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Probing for potential extra byte before non-compact offsets..."
|
||||||
|
)
|
||||||
pos_before_probe = f_in.tell()
|
pos_before_probe = f_in.tell()
|
||||||
try:
|
try:
|
||||||
suspected_flag = read_struct(f_in, '<B') # Read 1 byte
|
suspected_flag = read_struct(f_in, "<B") # Read 1 byte
|
||||||
if suspected_flag == 0x00:
|
if suspected_flag == 0x00:
|
||||||
print(f"[{time.time() - start_time:.2f}s] Found and consumed an unexpected 0x00 byte.")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Found and consumed an unexpected 0x00 byte."
|
||||||
|
)
|
||||||
elif suspected_flag == 0x01:
|
elif suspected_flag == 0x01:
|
||||||
print(f"[{time.time() - start_time:.2f}s] ERROR: Found 0x01 but is_compact should be False")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] ERROR: Found 0x01 but is_compact should be False"
|
||||||
|
)
|
||||||
raise ValueError("Inconsistent compact flag state")
|
raise ValueError("Inconsistent compact flag state")
|
||||||
else:
|
else:
|
||||||
# Rewind - this byte is part of offsets data
|
# Rewind - this byte is part of offsets data
|
||||||
f_in.seek(pos_before_probe)
|
f_in.seek(pos_before_probe)
|
||||||
print(f"[{time.time() - start_time:.2f}s] Rewound to original position (byte was 0x{suspected_flag:02x})")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Rewound to original position (byte was 0x{suspected_flag:02x})"
|
||||||
|
)
|
||||||
except EOFError:
|
except EOFError:
|
||||||
f_in.seek(pos_before_probe)
|
f_in.seek(pos_before_probe)
|
||||||
print(f"[{time.time() - start_time:.2f}s] No extra byte found (EOF), proceeding with offsets read")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] No extra byte found (EOF), proceeding with offsets read"
|
||||||
|
)
|
||||||
|
|
||||||
# --- Read original format data ---
|
# --- Read original format data ---
|
||||||
offsets_np = read_numpy_vector(f_in, np.uint64, 'Q')
|
offsets_np = read_numpy_vector(f_in, np.uint64, "Q")
|
||||||
print(f"[{time.time() - start_time:.2f}s] Read offsets ({offsets_np.size})")
|
print(f"[{time.time() - start_time:.2f}s] Read offsets ({offsets_np.size})")
|
||||||
if len(offsets_np) != ntotal + 1:
|
if len(offsets_np) != ntotal + 1:
|
||||||
raise ValueError(f"Inconsistent offsets size: len(levels)={ntotal} but len(offsets)={len(offsets_np)}")
|
raise ValueError(
|
||||||
|
f"Inconsistent offsets size: len(levels)={ntotal} but len(offsets)={len(offsets_np)}"
|
||||||
|
)
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|
||||||
print(f"[{time.time() - start_time:.2f}s] Attempting to read neighbors vector...")
|
print(f"[{time.time() - start_time:.2f}s] Attempting to read neighbors vector...")
|
||||||
neighbors_np = read_numpy_vector(f_in, np.int32, 'i')
|
neighbors_np = read_numpy_vector(f_in, np.int32, "i")
|
||||||
print(f"[{time.time() - start_time:.2f}s] Read neighbors ({neighbors_np.size})")
|
print(f"[{time.time() - start_time:.2f}s] Read neighbors ({neighbors_np.size})")
|
||||||
expected_neighbors_size = offsets_np[-1] if ntotal > 0 else 0
|
expected_neighbors_size = offsets_np[-1] if ntotal > 0 else 0
|
||||||
if neighbors_np.size != expected_neighbors_size:
|
if neighbors_np.size != expected_neighbors_size:
|
||||||
print(f"Warning: neighbors vector size mismatch. Expected {expected_neighbors_size} based on offsets, got {neighbors_np.size}.")
|
print(
|
||||||
|
f"Warning: neighbors vector size mismatch. Expected {expected_neighbors_size} based on offsets, got {neighbors_np.size}."
|
||||||
|
)
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|
||||||
original_hnsw_data['entry_point'] = read_struct(f_in, '<i')
|
original_hnsw_data["entry_point"] = read_struct(f_in, "<i")
|
||||||
original_hnsw_data['max_level'] = read_struct(f_in, '<i')
|
original_hnsw_data["max_level"] = read_struct(f_in, "<i")
|
||||||
original_hnsw_data['efConstruction'] = read_struct(f_in, '<i')
|
original_hnsw_data["efConstruction"] = read_struct(f_in, "<i")
|
||||||
original_hnsw_data['efSearch'] = read_struct(f_in, '<i')
|
original_hnsw_data["efSearch"] = read_struct(f_in, "<i")
|
||||||
original_hnsw_data['dummy_upper_beam'] = read_struct(f_in, '<i')
|
original_hnsw_data["dummy_upper_beam"] = read_struct(f_in, "<i")
|
||||||
print(f"[{time.time() - start_time:.2f}s] Read scalar params (ep={original_hnsw_data['entry_point']}, max_lvl={original_hnsw_data['max_level']})")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Read scalar params (ep={original_hnsw_data['entry_point']}, max_lvl={original_hnsw_data['max_level']})"
|
||||||
|
)
|
||||||
|
|
||||||
print(f"[{time.time() - start_time:.2f}s] Checking for storage data...")
|
print(f"[{time.time() - start_time:.2f}s] Checking for storage data...")
|
||||||
storage_fourcc = None
|
storage_fourcc = None
|
||||||
try:
|
try:
|
||||||
storage_fourcc = read_struct(f_in, '<I')
|
storage_fourcc = read_struct(f_in, "<I")
|
||||||
print(f"[{time.time() - start_time:.2f}s] Found storage fourcc: {storage_fourcc:08x}.")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Found storage fourcc: {storage_fourcc:08x}."
|
||||||
|
)
|
||||||
except EOFError:
|
except EOFError:
|
||||||
print(f"[{time.time() - start_time:.2f}s] No storage data found (EOF).")
|
print(f"[{time.time() - start_time:.2f}s] No storage data found (EOF).")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"[{time.time() - start_time:.2f}s] Error reading potential storage data: {e}")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Error reading potential storage data: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
# --- Perform Conversion ---
|
# --- Perform Conversion ---
|
||||||
print(f"[{time.time() - start_time:.2f}s] Converting to CSR format...")
|
print(f"[{time.time() - start_time:.2f}s] Converting to CSR format...")
|
||||||
@@ -373,17 +488,21 @@ def convert_hnsw_graph_to_csr(input_filename, output_filename, prune_embeddings=
|
|||||||
|
|
||||||
current_level_ptr_idx = 0
|
current_level_ptr_idx = 0
|
||||||
current_data_idx = 0
|
current_data_idx = 0
|
||||||
total_valid_neighbors_counted = 0 # For validation
|
total_valid_neighbors_counted = 0 # For validation
|
||||||
|
|
||||||
# Optimize calculation by getting slices once per node if possible
|
# Optimize calculation by getting slices once per node if possible
|
||||||
for i in range(ntotal):
|
for i in range(ntotal):
|
||||||
if i > 0 and i % (ntotal // 100 or 1) == 0: # Log progress roughly every 1%
|
if i > 0 and i % (ntotal // 100 or 1) == 0: # Log progress roughly every 1%
|
||||||
progress = (i / ntotal) * 100
|
progress = (i / ntotal) * 100
|
||||||
elapsed = time.time() - start_time
|
elapsed = time.time() - start_time
|
||||||
print(f"\r[{elapsed:.2f}s] Converting node {i}/{ntotal} ({progress:.1f}%)...", end="")
|
print(
|
||||||
|
f"\r[{elapsed:.2f}s] Converting node {i}/{ntotal} ({progress:.1f}%)...",
|
||||||
|
end="",
|
||||||
|
)
|
||||||
|
|
||||||
node_max_level = levels_np[i] - 1
|
node_max_level = levels_np[i] - 1
|
||||||
if node_max_level < -1: node_max_level = -1
|
if node_max_level < -1:
|
||||||
|
node_max_level = -1
|
||||||
|
|
||||||
node_ptr_start_index = current_level_ptr_idx
|
node_ptr_start_index = current_level_ptr_idx
|
||||||
compact_node_offsets_np[i] = node_ptr_start_index
|
compact_node_offsets_np[i] = node_ptr_start_index
|
||||||
@@ -394,13 +513,17 @@ def convert_hnsw_graph_to_csr(input_filename, output_filename, prune_embeddings=
|
|||||||
for level in range(node_max_level + 1):
|
for level in range(node_max_level + 1):
|
||||||
compact_level_ptr.append(current_data_idx)
|
compact_level_ptr.append(current_data_idx)
|
||||||
|
|
||||||
begin_orig_np = original_offset_start + get_cum_neighbors(cum_nneighbor_per_level_np, level)
|
begin_orig_np = original_offset_start + get_cum_neighbors(
|
||||||
end_orig_np = original_offset_start + get_cum_neighbors(cum_nneighbor_per_level_np, level + 1)
|
cum_nneighbor_per_level_np, level
|
||||||
|
)
|
||||||
|
end_orig_np = original_offset_start + get_cum_neighbors(
|
||||||
|
cum_nneighbor_per_level_np, level + 1
|
||||||
|
)
|
||||||
|
|
||||||
begin_orig = int(begin_orig_np)
|
begin_orig = int(begin_orig_np)
|
||||||
end_orig = int(end_orig_np)
|
end_orig = int(end_orig_np)
|
||||||
|
|
||||||
neighbors_len = len(neighbors_np) # Cache length
|
neighbors_len = len(neighbors_np) # Cache length
|
||||||
begin_orig = min(max(0, begin_orig), neighbors_len)
|
begin_orig = min(max(0, begin_orig), neighbors_len)
|
||||||
end_orig = min(max(begin_orig, end_orig), neighbors_len)
|
end_orig = min(max(begin_orig, end_orig), neighbors_len)
|
||||||
|
|
||||||
@@ -413,82 +536,116 @@ def convert_hnsw_graph_to_csr(input_filename, output_filename, prune_embeddings=
|
|||||||
|
|
||||||
if num_valid > 0:
|
if num_valid > 0:
|
||||||
# Append valid neighbors
|
# Append valid neighbors
|
||||||
compact_neighbors_data.extend(level_neighbors_slice[valid_neighbors_mask])
|
compact_neighbors_data.extend(
|
||||||
|
level_neighbors_slice[valid_neighbors_mask]
|
||||||
|
)
|
||||||
current_data_idx += num_valid
|
current_data_idx += num_valid
|
||||||
total_valid_neighbors_counted += num_valid
|
total_valid_neighbors_counted += num_valid
|
||||||
|
|
||||||
|
|
||||||
compact_level_ptr.append(current_data_idx)
|
compact_level_ptr.append(current_data_idx)
|
||||||
current_level_ptr_idx += num_pointers_expected
|
current_level_ptr_idx += num_pointers_expected
|
||||||
|
|
||||||
compact_node_offsets_np[ntotal] = current_level_ptr_idx
|
compact_node_offsets_np[ntotal] = current_level_ptr_idx
|
||||||
print(f"\r[{time.time() - start_time:.2f}s] Conversion loop finished. ") # Clear progress line
|
print(
|
||||||
|
f"\r[{time.time() - start_time:.2f}s] Conversion loop finished. "
|
||||||
|
) # Clear progress line
|
||||||
|
|
||||||
# --- Validation Checks ---
|
# --- Validation Checks ---
|
||||||
print(f"[{time.time() - start_time:.2f}s] Running validation checks...")
|
print(f"[{time.time() - start_time:.2f}s] Running validation checks...")
|
||||||
valid_check_passed = True
|
valid_check_passed = True
|
||||||
# Check 1: Total valid neighbors count
|
# Check 1: Total valid neighbors count
|
||||||
print(f" Checking total valid neighbor count...")
|
print(" Checking total valid neighbor count...")
|
||||||
expected_valid_count = np.sum(neighbors_np >= 0)
|
expected_valid_count = np.sum(neighbors_np >= 0)
|
||||||
if total_valid_neighbors_counted != len(compact_neighbors_data):
|
if total_valid_neighbors_counted != len(compact_neighbors_data):
|
||||||
print(f"Error: Mismatch between counted valid neighbors ({total_valid_neighbors_counted}) and final compact_data size ({len(compact_neighbors_data)})!", file=sys.stderr)
|
print(
|
||||||
valid_check_passed = False
|
f"Error: Mismatch between counted valid neighbors ({total_valid_neighbors_counted}) and final compact_data size ({len(compact_neighbors_data)})!",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
valid_check_passed = False
|
||||||
if expected_valid_count != len(compact_neighbors_data):
|
if expected_valid_count != len(compact_neighbors_data):
|
||||||
print(f"Error: Mismatch between NumPy count of valid neighbors ({expected_valid_count}) and final compact_data size ({len(compact_neighbors_data)})!", file=sys.stderr)
|
print(
|
||||||
valid_check_passed = False
|
f"Error: Mismatch between NumPy count of valid neighbors ({expected_valid_count}) and final compact_data size ({len(compact_neighbors_data)})!",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
valid_check_passed = False
|
||||||
else:
|
else:
|
||||||
print(f" OK: Total valid neighbors = {len(compact_neighbors_data)}")
|
print(f" OK: Total valid neighbors = {len(compact_neighbors_data)}")
|
||||||
|
|
||||||
# Check 2: Final pointer indices consistency
|
# Check 2: Final pointer indices consistency
|
||||||
print(f" Checking final pointer indices...")
|
print(" Checking final pointer indices...")
|
||||||
if compact_node_offsets_np[ntotal] != len(compact_level_ptr):
|
if compact_node_offsets_np[ntotal] != len(compact_level_ptr):
|
||||||
print(f"Error: Final node offset ({compact_node_offsets_np[ntotal]}) doesn't match level_ptr size ({len(compact_level_ptr)})!", file=sys.stderr)
|
print(
|
||||||
valid_check_passed = False
|
f"Error: Final node offset ({compact_node_offsets_np[ntotal]}) doesn't match level_ptr size ({len(compact_level_ptr)})!",
|
||||||
if (len(compact_level_ptr) > 0 and compact_level_ptr[-1] != len(compact_neighbors_data)) or \
|
file=sys.stderr,
|
||||||
(len(compact_level_ptr) == 0 and len(compact_neighbors_data) != 0):
|
)
|
||||||
last_ptr = compact_level_ptr[-1] if len(compact_level_ptr) > 0 else -1
|
valid_check_passed = False
|
||||||
print(f"Error: Last level pointer ({last_ptr}) doesn't match compact_data size ({len(compact_neighbors_data)})!", file=sys.stderr)
|
if (
|
||||||
valid_check_passed = False
|
len(compact_level_ptr) > 0 and compact_level_ptr[-1] != len(compact_neighbors_data)
|
||||||
|
) or (len(compact_level_ptr) == 0 and len(compact_neighbors_data) != 0):
|
||||||
|
last_ptr = compact_level_ptr[-1] if len(compact_level_ptr) > 0 else -1
|
||||||
|
print(
|
||||||
|
f"Error: Last level pointer ({last_ptr}) doesn't match compact_data size ({len(compact_neighbors_data)})!",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
valid_check_passed = False
|
||||||
else:
|
else:
|
||||||
print(f" OK: Final pointers match data size.")
|
print(" OK: Final pointers match data size.")
|
||||||
|
|
||||||
if not valid_check_passed:
|
if not valid_check_passed:
|
||||||
print("Error: Validation checks failed. Output file might be incorrect.", file=sys.stderr)
|
print(
|
||||||
|
"Error: Validation checks failed. Output file might be incorrect.",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
# Optional: Exit here if validation fails
|
# Optional: Exit here if validation fails
|
||||||
# return False
|
# return False
|
||||||
|
|
||||||
# --- Explicitly delete large intermediate arrays ---
|
# --- Explicitly delete large intermediate arrays ---
|
||||||
print(f"[{time.time() - start_time:.2f}s] Deleting original neighbors and offsets arrays...")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Deleting original neighbors and offsets arrays..."
|
||||||
|
)
|
||||||
del neighbors_np
|
del neighbors_np
|
||||||
del offsets_np
|
del offsets_np
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|
||||||
print(f" CSR Stats: |data|={len(compact_neighbors_data)}, |level_ptr|={len(compact_level_ptr)}")
|
print(
|
||||||
|
f" CSR Stats: |data|={len(compact_neighbors_data)}, |level_ptr|={len(compact_level_ptr)}"
|
||||||
|
)
|
||||||
|
|
||||||
# --- Write CSR HNSW graph data using unified function ---
|
# --- Write CSR HNSW graph data using unified function ---
|
||||||
print(f"[{time.time() - start_time:.2f}s] Writing CSR HNSW graph data in FAISS-compatible order...")
|
print(
|
||||||
|
f"[{time.time() - start_time:.2f}s] Writing CSR HNSW graph data in FAISS-compatible order..."
|
||||||
|
)
|
||||||
|
|
||||||
# Determine storage fourcc and data based on prune_embeddings
|
# Determine storage fourcc and data based on prune_embeddings
|
||||||
if prune_embeddings:
|
if prune_embeddings:
|
||||||
print(f" Pruning embeddings: Writing NULL storage marker.")
|
print(" Pruning embeddings: Writing NULL storage marker.")
|
||||||
output_storage_fourcc = NULL_INDEX_FOURCC
|
output_storage_fourcc = NULL_INDEX_FOURCC
|
||||||
storage_data = b''
|
storage_data = b""
|
||||||
else:
|
else:
|
||||||
# Keep embeddings - read and preserve original storage data
|
# Keep embeddings - read and preserve original storage data
|
||||||
if storage_fourcc and storage_fourcc != NULL_INDEX_FOURCC:
|
if storage_fourcc and storage_fourcc != NULL_INDEX_FOURCC:
|
||||||
print(f" Preserving embeddings: Reading original storage data...")
|
print(" Preserving embeddings: Reading original storage data...")
|
||||||
storage_data = f_in.read() # Read remaining storage data
|
storage_data = f_in.read() # Read remaining storage data
|
||||||
output_storage_fourcc = storage_fourcc
|
output_storage_fourcc = storage_fourcc
|
||||||
print(f" Read {len(storage_data)} bytes of storage data")
|
print(f" Read {len(storage_data)} bytes of storage data")
|
||||||
else:
|
else:
|
||||||
print(f" No embeddings found in original file (NULL storage)")
|
print(" No embeddings found in original file (NULL storage)")
|
||||||
output_storage_fourcc = NULL_INDEX_FOURCC
|
output_storage_fourcc = NULL_INDEX_FOURCC
|
||||||
storage_data = b''
|
storage_data = b""
|
||||||
|
|
||||||
# Use the unified write function
|
# Use the unified write function
|
||||||
write_compact_format(f_out, original_hnsw_data, assign_probas_np, cum_nneighbor_per_level_np,
|
write_compact_format(
|
||||||
levels_np, compact_level_ptr, compact_node_offsets_np,
|
f_out,
|
||||||
compact_neighbors_data, output_storage_fourcc, storage_data)
|
original_hnsw_data,
|
||||||
|
assign_probas_np,
|
||||||
|
cum_nneighbor_per_level_np,
|
||||||
|
levels_np,
|
||||||
|
compact_level_ptr,
|
||||||
|
compact_node_offsets_np,
|
||||||
|
compact_neighbors_data,
|
||||||
|
output_storage_fourcc,
|
||||||
|
storage_data,
|
||||||
|
)
|
||||||
|
|
||||||
# Clean up memory
|
# Clean up memory
|
||||||
del assign_probas_np, cum_nneighbor_per_level_np, levels_np
|
del assign_probas_np, cum_nneighbor_per_level_np, levels_np
|
||||||
@@ -503,40 +660,66 @@ def convert_hnsw_graph_to_csr(input_filename, output_filename, prune_embeddings=
|
|||||||
print(f"Error: Input file not found: {input_filename}", file=sys.stderr)
|
print(f"Error: Input file not found: {input_filename}", file=sys.stderr)
|
||||||
return False
|
return False
|
||||||
except MemoryError as e:
|
except MemoryError as e:
|
||||||
print(f"\nFatal MemoryError during conversion: {e}. Insufficient RAM.", file=sys.stderr)
|
print(
|
||||||
# Clean up potentially partially written output file?
|
f"\nFatal MemoryError during conversion: {e}. Insufficient RAM.",
|
||||||
try: os.remove(output_filename)
|
file=sys.stderr,
|
||||||
except OSError: pass
|
)
|
||||||
return False
|
# Clean up potentially partially written output file?
|
||||||
|
try:
|
||||||
|
os.remove(output_filename)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
return False
|
||||||
except EOFError as e:
|
except EOFError as e:
|
||||||
print(f"Error: Reached end of file unexpectedly reading {input_filename}. {e}", file=sys.stderr)
|
print(
|
||||||
try: os.remove(output_filename)
|
f"Error: Reached end of file unexpectedly reading {input_filename}. {e}",
|
||||||
except OSError: pass
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
os.remove(output_filename)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
return False
|
return False
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"An unexpected error occurred during conversion: {e}", file=sys.stderr)
|
print(f"An unexpected error occurred during conversion: {e}", file=sys.stderr)
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
try:
|
try:
|
||||||
os.remove(output_filename)
|
os.remove(output_filename)
|
||||||
except OSError: pass
|
except OSError:
|
||||||
|
pass
|
||||||
return False
|
return False
|
||||||
# Ensure neighbors_np is deleted even if an error occurs after its allocation
|
# Ensure neighbors_np is deleted even if an error occurs after its allocation
|
||||||
finally:
|
finally:
|
||||||
if 'neighbors_np' in locals() and neighbors_np is not None:
|
try:
|
||||||
del neighbors_np
|
if "neighbors_np" in locals() and neighbors_np is not None:
|
||||||
gc.collect()
|
del neighbors_np
|
||||||
|
gc.collect()
|
||||||
|
except NameError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
# --- Script Execution ---
|
# --- Script Execution ---
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
parser = argparse.ArgumentParser(description="Convert a Faiss IndexHNSWFlat file to a CSR-based HNSW graph file.")
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Convert a Faiss IndexHNSWFlat file to a CSR-based HNSW graph file."
|
||||||
|
)
|
||||||
parser.add_argument("input_index_file", help="Path to the input IndexHNSWFlat file")
|
parser.add_argument("input_index_file", help="Path to the input IndexHNSWFlat file")
|
||||||
parser.add_argument("output_csr_graph_file", help="Path to write the output CSR HNSW graph file")
|
parser.add_argument(
|
||||||
parser.add_argument("--prune-embeddings", action="store_true", default=True,
|
"output_csr_graph_file", help="Path to write the output CSR HNSW graph file"
|
||||||
help="Prune embedding storage (write NULL storage marker)")
|
)
|
||||||
parser.add_argument("--keep-embeddings", action="store_true",
|
parser.add_argument(
|
||||||
help="Keep embedding storage (overrides --prune-embeddings)")
|
"--prune-embeddings",
|
||||||
|
action="store_true",
|
||||||
|
default=True,
|
||||||
|
help="Prune embedding storage (write NULL storage marker)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--keep-embeddings",
|
||||||
|
action="store_true",
|
||||||
|
help="Keep embedding storage (overrides --prune-embeddings)",
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
@@ -545,10 +728,12 @@ if __name__ == "__main__":
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if os.path.abspath(args.input_index_file) == os.path.abspath(args.output_csr_graph_file):
|
if os.path.abspath(args.input_index_file) == os.path.abspath(args.output_csr_graph_file):
|
||||||
print(f"Error: Input and output filenames cannot be the same.", file=sys.stderr)
|
print("Error: Input and output filenames cannot be the same.", file=sys.stderr)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
prune_embeddings = args.prune_embeddings and not args.keep_embeddings
|
prune_embeddings = args.prune_embeddings and not args.keep_embeddings
|
||||||
success = convert_hnsw_graph_to_csr(args.input_index_file, args.output_csr_graph_file, prune_embeddings)
|
success = convert_hnsw_graph_to_csr(
|
||||||
|
args.input_index_file, args.output_csr_graph_file, prune_embeddings
|
||||||
|
)
|
||||||
if not success:
|
if not success:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@@ -1,19 +1,19 @@
|
|||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Dict, Any, List, Literal, Optional
|
|
||||||
import shutil
|
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Literal, Optional
|
||||||
|
|
||||||
from leann.searcher_base import BaseSearcher
|
import numpy as np
|
||||||
from .convert_to_csr import convert_hnsw_graph_to_csr
|
|
||||||
|
|
||||||
from leann.registry import register_backend
|
|
||||||
from leann.interface import (
|
from leann.interface import (
|
||||||
LeannBackendFactoryInterface,
|
|
||||||
LeannBackendBuilderInterface,
|
LeannBackendBuilderInterface,
|
||||||
|
LeannBackendFactoryInterface,
|
||||||
LeannBackendSearcherInterface,
|
LeannBackendSearcherInterface,
|
||||||
)
|
)
|
||||||
|
from leann.registry import register_backend
|
||||||
|
from leann.searcher_base import BaseSearcher
|
||||||
|
|
||||||
|
from .convert_to_csr import convert_hnsw_graph_to_csr
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -28,6 +28,12 @@ def get_metric_map():
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_l2(data: np.ndarray) -> np.ndarray:
|
||||||
|
norms = np.linalg.norm(data, axis=1, keepdims=True)
|
||||||
|
norms[norms == 0] = 1 # Avoid division by zero
|
||||||
|
return data / norms
|
||||||
|
|
||||||
|
|
||||||
@register_backend("hnsw")
|
@register_backend("hnsw")
|
||||||
class HNSWBackend(LeannBackendFactoryInterface):
|
class HNSWBackend(LeannBackendFactoryInterface):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -48,8 +54,15 @@ class HNSWBuilder(LeannBackendBuilderInterface):
|
|||||||
self.efConstruction = self.build_params.setdefault("efConstruction", 200)
|
self.efConstruction = self.build_params.setdefault("efConstruction", 200)
|
||||||
self.distance_metric = self.build_params.setdefault("distance_metric", "mips")
|
self.distance_metric = self.build_params.setdefault("distance_metric", "mips")
|
||||||
self.dimensions = self.build_params.get("dimensions")
|
self.dimensions = self.build_params.get("dimensions")
|
||||||
|
if not self.is_recompute and self.is_compact:
|
||||||
|
# Auto-correct: non-recompute requires non-compact storage for HNSW
|
||||||
|
logger.warning(
|
||||||
|
"is_recompute=False requires non-compact HNSW. Forcing is_compact=False."
|
||||||
|
)
|
||||||
|
self.is_compact = False
|
||||||
|
self.build_params["is_compact"] = False
|
||||||
|
|
||||||
def build(self, data: np.ndarray, ids: List[str], index_path: str, **kwargs):
|
def build(self, data: np.ndarray, ids: list[str], index_path: str, **kwargs):
|
||||||
from . import faiss # type: ignore
|
from . import faiss # type: ignore
|
||||||
|
|
||||||
path = Path(index_path)
|
path = Path(index_path)
|
||||||
@@ -70,7 +83,7 @@ class HNSWBuilder(LeannBackendBuilderInterface):
|
|||||||
index.hnsw.efConstruction = self.efConstruction
|
index.hnsw.efConstruction = self.efConstruction
|
||||||
|
|
||||||
if self.distance_metric.lower() == "cosine":
|
if self.distance_metric.lower() == "cosine":
|
||||||
faiss.normalize_L2(data)
|
data = normalize_l2(data)
|
||||||
|
|
||||||
index.add(data.shape[0], faiss.swig_ptr(data))
|
index.add(data.shape[0], faiss.swig_ptr(data))
|
||||||
index_file = index_dir / f"{index_prefix}.index"
|
index_file = index_dir / f"{index_prefix}.index"
|
||||||
@@ -92,19 +105,15 @@ class HNSWBuilder(LeannBackendBuilderInterface):
|
|||||||
|
|
||||||
if success:
|
if success:
|
||||||
logger.info("✅ CSR conversion successful.")
|
logger.info("✅ CSR conversion successful.")
|
||||||
index_file_old = index_file.with_suffix(".old")
|
# index_file_old = index_file.with_suffix(".old")
|
||||||
shutil.move(str(index_file), str(index_file_old))
|
# shutil.move(str(index_file), str(index_file_old))
|
||||||
shutil.move(str(csr_temp_file), str(index_file))
|
shutil.move(str(csr_temp_file), str(index_file))
|
||||||
logger.info(
|
logger.info(f"INFO: Replaced original index with {mode_str} version at '{index_file}'")
|
||||||
f"INFO: Replaced original index with {mode_str} version at '{index_file}'"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
# Clean up and fail fast
|
# Clean up and fail fast
|
||||||
if csr_temp_file.exists():
|
if csr_temp_file.exists():
|
||||||
os.remove(csr_temp_file)
|
os.remove(csr_temp_file)
|
||||||
raise RuntimeError(
|
raise RuntimeError("CSR conversion failed - cannot proceed with compact format")
|
||||||
"CSR conversion failed - cannot proceed with compact format"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class HNSWSearcher(BaseSearcher):
|
class HNSWSearcher(BaseSearcher):
|
||||||
@@ -116,7 +125,9 @@ class HNSWSearcher(BaseSearcher):
|
|||||||
)
|
)
|
||||||
from . import faiss # type: ignore
|
from . import faiss # type: ignore
|
||||||
|
|
||||||
self.distance_metric = self.meta.get("distance_metric", "mips").lower()
|
self.distance_metric = (
|
||||||
|
self.meta.get("backend_kwargs", {}).get("distance_metric", "mips").lower()
|
||||||
|
)
|
||||||
metric_enum = get_metric_map().get(self.distance_metric)
|
metric_enum = get_metric_map().get(self.distance_metric)
|
||||||
if metric_enum is None:
|
if metric_enum is None:
|
||||||
raise ValueError(f"Unsupported distance_metric '{self.distance_metric}'.")
|
raise ValueError(f"Unsupported distance_metric '{self.distance_metric}'.")
|
||||||
@@ -150,7 +161,7 @@ class HNSWSearcher(BaseSearcher):
|
|||||||
pruning_strategy: Literal["global", "local", "proportional"] = "global",
|
pruning_strategy: Literal["global", "local", "proportional"] = "global",
|
||||||
batch_size: int = 0,
|
batch_size: int = 0,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> Dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Search for nearest neighbors using HNSW index.
|
Search for nearest neighbors using HNSW index.
|
||||||
|
|
||||||
@@ -174,28 +185,36 @@ class HNSWSearcher(BaseSearcher):
|
|||||||
"""
|
"""
|
||||||
from . import faiss # type: ignore
|
from . import faiss # type: ignore
|
||||||
|
|
||||||
if not recompute_embeddings:
|
if not recompute_embeddings and self.is_pruned:
|
||||||
if self.is_pruned:
|
raise RuntimeError(
|
||||||
raise RuntimeError("Recompute is required for pruned index.")
|
"Recompute is required for pruned/compact HNSW index. "
|
||||||
|
"Re-run search with --recompute, or rebuild with --no-recompute and --no-compact."
|
||||||
|
)
|
||||||
if recompute_embeddings:
|
if recompute_embeddings:
|
||||||
if zmq_port is None:
|
if zmq_port is None:
|
||||||
raise ValueError(
|
raise ValueError("zmq_port must be provided if recompute_embeddings is True")
|
||||||
"zmq_port must be provided if recompute_embeddings is True"
|
|
||||||
)
|
|
||||||
|
|
||||||
if query.dtype != np.float32:
|
if query.dtype != np.float32:
|
||||||
query = query.astype(np.float32)
|
query = query.astype(np.float32)
|
||||||
if self.distance_metric == "cosine":
|
if self.distance_metric == "cosine":
|
||||||
faiss.normalize_L2(query)
|
query = normalize_l2(query)
|
||||||
|
|
||||||
params = faiss.SearchParametersHNSW()
|
params = faiss.SearchParametersHNSW()
|
||||||
if zmq_port is not None:
|
if zmq_port is not None:
|
||||||
params.zmq_port = (
|
params.zmq_port = zmq_port # C++ code won't use this if recompute_embeddings is False
|
||||||
zmq_port # C++ code won't use this if recompute_embeddings is False
|
|
||||||
)
|
|
||||||
params.efSearch = complexity
|
params.efSearch = complexity
|
||||||
params.beam_size = beam_width
|
params.beam_size = beam_width
|
||||||
|
|
||||||
|
# For OpenAI embeddings with cosine distance, disable relative distance check
|
||||||
|
# This prevents early termination when all scores are in a narrow range
|
||||||
|
embedding_model = self.meta.get("embedding_model", "").lower()
|
||||||
|
if self.distance_metric == "cosine" and any(
|
||||||
|
openai_model in embedding_model for openai_model in ["text-embedding", "openai"]
|
||||||
|
):
|
||||||
|
params.check_relative_distance = False
|
||||||
|
else:
|
||||||
|
params.check_relative_distance = True
|
||||||
|
|
||||||
# PQ pruning: direct mapping to HNSW's pq_pruning_ratio
|
# PQ pruning: direct mapping to HNSW's pq_pruning_ratio
|
||||||
params.pq_pruning_ratio = prune_ratio
|
params.pq_pruning_ratio = prune_ratio
|
||||||
|
|
||||||
@@ -205,9 +224,7 @@ class HNSWSearcher(BaseSearcher):
|
|||||||
params.send_neigh_times_ratio = 0.0
|
params.send_neigh_times_ratio = 0.0
|
||||||
elif pruning_strategy == "proportional":
|
elif pruning_strategy == "proportional":
|
||||||
params.local_prune = False
|
params.local_prune = False
|
||||||
params.send_neigh_times_ratio = (
|
params.send_neigh_times_ratio = 1.0 # Any value > 1e-6 triggers proportional mode
|
||||||
1.0 # Any value > 1e-6 triggers proportional mode
|
|
||||||
)
|
|
||||||
else: # "global"
|
else: # "global"
|
||||||
params.local_prune = False
|
params.local_prune = False
|
||||||
params.send_neigh_times_ratio = 0.0
|
params.send_neigh_times_ratio = 0.0
|
||||||
@@ -228,8 +245,6 @@ class HNSWSearcher(BaseSearcher):
|
|||||||
params,
|
params,
|
||||||
)
|
)
|
||||||
|
|
||||||
string_labels = [
|
string_labels = [[str(int_label) for int_label in batch_labels] for batch_labels in labels]
|
||||||
[str(int_label) for int_label in batch_labels] for batch_labels in labels
|
|
||||||
]
|
|
||||||
|
|
||||||
return {"labels": string_labels, "distances": distances}
|
return {"labels": string_labels, "distances": distances}
|
||||||
|
|||||||
@@ -3,17 +3,18 @@ HNSW-specific embedding server
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
import os
|
|
||||||
import zmq
|
|
||||||
import numpy as np
|
|
||||||
import msgpack
|
|
||||||
import json
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
import sys
|
|
||||||
import logging
|
import msgpack
|
||||||
|
import numpy as np
|
||||||
|
import zmq
|
||||||
|
|
||||||
# Set up logging based on environment variable
|
# Set up logging based on environment variable
|
||||||
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
||||||
@@ -52,8 +53,8 @@ def create_hnsw_embedding_server(
|
|||||||
sys.path.insert(0, str(leann_core_path))
|
sys.path.insert(0, str(leann_core_path))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from leann.embedding_compute import compute_embeddings
|
|
||||||
from leann.api import PassageManager
|
from leann.api import PassageManager
|
||||||
|
from leann.embedding_compute import compute_embeddings
|
||||||
|
|
||||||
logger.info("Successfully imported unified embedding computation module")
|
logger.info("Successfully imported unified embedding computation module")
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
@@ -78,206 +79,318 @@ def create_hnsw_embedding_server(
|
|||||||
raise ValueError("Only metadata files (.meta.json) are supported")
|
raise ValueError("Only metadata files (.meta.json) are supported")
|
||||||
|
|
||||||
# Load metadata to get passage sources
|
# Load metadata to get passage sources
|
||||||
with open(passages_file, "r") as f:
|
with open(passages_file) as f:
|
||||||
meta = json.load(f)
|
meta = json.load(f)
|
||||||
|
|
||||||
passages = PassageManager(meta["passage_sources"])
|
# Let PassageManager handle path resolution uniformly. It supports fallback order:
|
||||||
logger.info(
|
# 1) path/index_path; 2) *_relative; 3) standard siblings next to meta
|
||||||
f"Loaded PassageManager with {len(passages.global_offset_map)} passages from metadata"
|
passages = PassageManager(meta["passage_sources"], metadata_file_path=passages_file)
|
||||||
)
|
# Dimension from metadata for shaping responses
|
||||||
|
try:
|
||||||
|
embedding_dim: int = int(meta.get("dimensions", 0))
|
||||||
|
except Exception:
|
||||||
|
embedding_dim = 0
|
||||||
|
logger.info(f"Loaded PassageManager with {len(passages)} passages from metadata")
|
||||||
|
|
||||||
|
# (legacy ZMQ thread removed; using shutdown-capable server only)
|
||||||
|
|
||||||
|
def zmq_server_thread_with_shutdown(shutdown_event):
|
||||||
|
"""ZMQ server thread that respects shutdown signal.
|
||||||
|
|
||||||
|
Creates its own REP socket bound to zmq_port and polls with timeouts
|
||||||
|
to allow graceful shutdown.
|
||||||
|
"""
|
||||||
|
logger.info("ZMQ server thread started with shutdown support")
|
||||||
|
|
||||||
def zmq_server_thread():
|
|
||||||
"""ZMQ server thread"""
|
|
||||||
context = zmq.Context()
|
context = zmq.Context()
|
||||||
socket = context.socket(zmq.REP)
|
rep_socket = context.socket(zmq.REP)
|
||||||
socket.bind(f"tcp://*:{zmq_port}")
|
rep_socket.bind(f"tcp://*:{zmq_port}")
|
||||||
logger.info(f"HNSW ZMQ server listening on port {zmq_port}")
|
logger.info(f"HNSW ZMQ REP server listening on port {zmq_port}")
|
||||||
|
rep_socket.setsockopt(zmq.RCVTIMEO, 1000)
|
||||||
|
# Keep sends from blocking during shutdown; fail fast and drop on close
|
||||||
|
rep_socket.setsockopt(zmq.SNDTIMEO, 1000)
|
||||||
|
rep_socket.setsockopt(zmq.LINGER, 0)
|
||||||
|
|
||||||
socket.setsockopt(zmq.RCVTIMEO, 300000)
|
# Track last request type/length for shape-correct fallbacks
|
||||||
socket.setsockopt(zmq.SNDTIMEO, 300000)
|
last_request_type = "unknown" # 'text' | 'distance' | 'embedding' | 'unknown'
|
||||||
|
last_request_length = 0
|
||||||
|
|
||||||
while True:
|
try:
|
||||||
try:
|
while not shutdown_event.is_set():
|
||||||
message_bytes = socket.recv()
|
try:
|
||||||
logger.debug(f"Received ZMQ request of size {len(message_bytes)} bytes")
|
e2e_start = time.time()
|
||||||
|
logger.debug("🔍 Waiting for ZMQ message...")
|
||||||
|
request_bytes = rep_socket.recv()
|
||||||
|
|
||||||
e2e_start = time.time()
|
# Rest of the processing logic (same as original)
|
||||||
request_payload = msgpack.unpackb(message_bytes)
|
request = msgpack.unpackb(request_bytes)
|
||||||
|
|
||||||
# Handle direct text embedding request
|
if len(request) == 1 and request[0] == "__QUERY_MODEL__":
|
||||||
if isinstance(request_payload, list) and len(request_payload) > 0:
|
response_bytes = msgpack.packb([model_name])
|
||||||
# Check if this is a direct text request (list of strings)
|
rep_socket.send(response_bytes)
|
||||||
if all(isinstance(item, str) for item in request_payload):
|
|
||||||
logger.info(
|
|
||||||
f"Processing direct text embedding request for {len(request_payload)} texts in {embedding_mode} mode"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Use unified embedding computation (now with model caching)
|
|
||||||
embeddings = compute_embeddings(
|
|
||||||
request_payload, model_name, mode=embedding_mode
|
|
||||||
)
|
|
||||||
|
|
||||||
response = embeddings.tolist()
|
|
||||||
socket.send(msgpack.packb(response))
|
|
||||||
e2e_end = time.time()
|
|
||||||
logger.info(
|
|
||||||
f"⏱️ Text embedding E2E time: {e2e_end - e2e_start:.6f}s"
|
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Handle distance calculation requests
|
# Handle direct text embedding request
|
||||||
if (
|
if (
|
||||||
isinstance(request_payload, list)
|
isinstance(request, list)
|
||||||
and len(request_payload) == 2
|
and request
|
||||||
and isinstance(request_payload[0], list)
|
and all(isinstance(item, str) for item in request)
|
||||||
and isinstance(request_payload[1], list)
|
):
|
||||||
):
|
last_request_type = "text"
|
||||||
node_ids = request_payload[0]
|
last_request_length = len(request)
|
||||||
query_vector = np.array(request_payload[1], dtype=np.float32)
|
embeddings = compute_embeddings(request, model_name, mode=embedding_mode)
|
||||||
|
rep_socket.send(msgpack.packb(embeddings.tolist()))
|
||||||
|
e2e_end = time.time()
|
||||||
|
logger.info(f"⏱️ Text embedding E2E time: {e2e_end - e2e_start:.6f}s")
|
||||||
|
continue
|
||||||
|
|
||||||
logger.debug("Distance calculation request received")
|
# Handle distance calculation request: [[ids], [query_vector]]
|
||||||
logger.debug(f" Node IDs: {node_ids}")
|
if (
|
||||||
logger.debug(f" Query vector dim: {len(query_vector)}")
|
isinstance(request, list)
|
||||||
|
and len(request) == 2
|
||||||
|
and isinstance(request[0], list)
|
||||||
|
and isinstance(request[1], list)
|
||||||
|
):
|
||||||
|
node_ids = request[0]
|
||||||
|
# Handle nested [[ids]] shape defensively
|
||||||
|
if len(node_ids) == 1 and isinstance(node_ids[0], list):
|
||||||
|
node_ids = node_ids[0]
|
||||||
|
query_vector = np.array(request[1], dtype=np.float32)
|
||||||
|
last_request_type = "distance"
|
||||||
|
last_request_length = len(node_ids)
|
||||||
|
|
||||||
# Get embeddings for node IDs
|
logger.debug("Distance calculation request received")
|
||||||
texts = []
|
logger.debug(f" Node IDs: {node_ids}")
|
||||||
for nid in node_ids:
|
logger.debug(f" Query vector dim: {len(query_vector)}")
|
||||||
|
|
||||||
|
# Gather texts for found ids
|
||||||
|
texts: list[str] = []
|
||||||
|
found_indices: list[int] = []
|
||||||
|
for idx, nid in enumerate(node_ids):
|
||||||
|
try:
|
||||||
|
passage_data = passages.get_passage(str(nid))
|
||||||
|
txt = passage_data.get("text", "")
|
||||||
|
if isinstance(txt, str) and len(txt) > 0:
|
||||||
|
texts.append(txt)
|
||||||
|
found_indices.append(idx)
|
||||||
|
else:
|
||||||
|
logger.error(f"Empty text for passage ID {nid}")
|
||||||
|
except KeyError:
|
||||||
|
logger.error(f"Passage ID {nid} not found")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Exception looking up passage ID {nid}: {e}")
|
||||||
|
|
||||||
|
# Prepare full-length response with large sentinel values
|
||||||
|
large_distance = 1e9
|
||||||
|
response_distances = [large_distance] * len(node_ids)
|
||||||
|
|
||||||
|
if texts:
|
||||||
|
try:
|
||||||
|
embeddings = compute_embeddings(
|
||||||
|
texts, model_name, mode=embedding_mode
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
f"Computed embeddings for {len(texts)} texts, shape: {embeddings.shape}"
|
||||||
|
)
|
||||||
|
if distance_metric == "l2":
|
||||||
|
partial = np.sum(
|
||||||
|
np.square(embeddings - query_vector.reshape(1, -1)), axis=1
|
||||||
|
)
|
||||||
|
else: # mips or cosine
|
||||||
|
partial = -np.dot(embeddings, query_vector)
|
||||||
|
|
||||||
|
for pos, dval in zip(found_indices, partial.flatten().tolist()):
|
||||||
|
response_distances[pos] = float(dval)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Distance computation error, using sentinels: {e}")
|
||||||
|
|
||||||
|
# Send response in expected shape [[distances]]
|
||||||
|
rep_socket.send(msgpack.packb([response_distances], use_single_float=True))
|
||||||
|
e2e_end = time.time()
|
||||||
|
logger.info(f"⏱️ Distance calculation E2E time: {e2e_end - e2e_start:.6f}s")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Fallback: treat as embedding-by-id request
|
||||||
|
if (
|
||||||
|
isinstance(request, list)
|
||||||
|
and len(request) == 1
|
||||||
|
and isinstance(request[0], list)
|
||||||
|
):
|
||||||
|
node_ids = request[0]
|
||||||
|
elif isinstance(request, list):
|
||||||
|
node_ids = request
|
||||||
|
else:
|
||||||
|
node_ids = []
|
||||||
|
last_request_type = "embedding"
|
||||||
|
last_request_length = len(node_ids)
|
||||||
|
logger.info(f"ZMQ received {len(node_ids)} node IDs for embedding fetch")
|
||||||
|
|
||||||
|
# Preallocate zero-filled flat data for robustness
|
||||||
|
if embedding_dim <= 0:
|
||||||
|
dims = [0, 0]
|
||||||
|
flat_data: list[float] = []
|
||||||
|
else:
|
||||||
|
dims = [len(node_ids), embedding_dim]
|
||||||
|
flat_data = [0.0] * (dims[0] * dims[1])
|
||||||
|
|
||||||
|
# Collect texts for found ids
|
||||||
|
texts: list[str] = []
|
||||||
|
found_indices: list[int] = []
|
||||||
|
for idx, nid in enumerate(node_ids):
|
||||||
try:
|
try:
|
||||||
passage_data = passages.get_passage(str(nid))
|
passage_data = passages.get_passage(str(nid))
|
||||||
txt = passage_data["text"]
|
txt = passage_data.get("text", "")
|
||||||
texts.append(txt)
|
if isinstance(txt, str) and len(txt) > 0:
|
||||||
|
texts.append(txt)
|
||||||
|
found_indices.append(idx)
|
||||||
|
else:
|
||||||
|
logger.error(f"Empty text for passage ID {nid}")
|
||||||
except KeyError:
|
except KeyError:
|
||||||
logger.error(f"Passage ID {nid} not found")
|
logger.error(f"Passage with ID {nid} not found")
|
||||||
raise RuntimeError(
|
|
||||||
f"FATAL: Passage with ID {nid} not found"
|
|
||||||
)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Exception looking up passage ID {nid}: {e}")
|
logger.error(f"Exception looking up passage ID {nid}: {e}")
|
||||||
raise
|
|
||||||
|
|
||||||
# Process embeddings
|
if texts:
|
||||||
embeddings = compute_embeddings(
|
try:
|
||||||
texts, model_name, mode=embedding_mode
|
embeddings = compute_embeddings(texts, model_name, mode=embedding_mode)
|
||||||
)
|
logger.info(
|
||||||
logger.info(
|
f"Computed embeddings for {len(texts)} texts, shape: {embeddings.shape}"
|
||||||
f"Computed embeddings for {len(texts)} texts, shape: {embeddings.shape}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Calculate distances
|
|
||||||
if distance_metric == "l2":
|
|
||||||
distances = np.sum(
|
|
||||||
np.square(embeddings - query_vector.reshape(1, -1)), axis=1
|
|
||||||
)
|
|
||||||
else: # mips or cosine
|
|
||||||
distances = -np.dot(embeddings, query_vector)
|
|
||||||
|
|
||||||
response_payload = distances.flatten().tolist()
|
|
||||||
response_bytes = msgpack.packb(
|
|
||||||
[response_payload], use_single_float=True
|
|
||||||
)
|
|
||||||
logger.debug(
|
|
||||||
f"Sending distance response with {len(distances)} distances"
|
|
||||||
)
|
|
||||||
|
|
||||||
socket.send(response_bytes)
|
|
||||||
e2e_end = time.time()
|
|
||||||
logger.info(
|
|
||||||
f"⏱️ Distance calculation E2E time: {e2e_end - e2e_start:.6f}s"
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Standard embedding request (passage ID lookup)
|
|
||||||
if (
|
|
||||||
not isinstance(request_payload, list)
|
|
||||||
or len(request_payload) != 1
|
|
||||||
or not isinstance(request_payload[0], list)
|
|
||||||
):
|
|
||||||
logger.error(
|
|
||||||
f"Invalid MessagePack request format. Expected [[ids...]] or [texts...], got: {type(request_payload)}"
|
|
||||||
)
|
|
||||||
socket.send(msgpack.packb([[], []]))
|
|
||||||
continue
|
|
||||||
|
|
||||||
node_ids = request_payload[0]
|
|
||||||
logger.debug(f"Request for {len(node_ids)} node embeddings")
|
|
||||||
|
|
||||||
# Look up texts by node IDs
|
|
||||||
texts = []
|
|
||||||
for nid in node_ids:
|
|
||||||
try:
|
|
||||||
passage_data = passages.get_passage(str(nid))
|
|
||||||
txt = passage_data["text"]
|
|
||||||
if not txt:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"FATAL: Empty text for passage ID {nid}"
|
|
||||||
)
|
)
|
||||||
texts.append(txt)
|
|
||||||
except KeyError:
|
|
||||||
raise RuntimeError(f"FATAL: Passage with ID {nid} not found")
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Exception looking up passage ID {nid}: {e}")
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Process embeddings
|
if np.isnan(embeddings).any() or np.isinf(embeddings).any():
|
||||||
embeddings = compute_embeddings(texts, model_name, mode=embedding_mode)
|
logger.error(
|
||||||
logger.info(
|
f"NaN or Inf detected in embeddings! Requested IDs: {node_ids[:5]}..."
|
||||||
f"Computed embeddings for {len(texts)} texts, shape: {embeddings.shape}"
|
)
|
||||||
)
|
dims = [0, embedding_dim]
|
||||||
|
flat_data = []
|
||||||
|
else:
|
||||||
|
emb_f32 = np.ascontiguousarray(embeddings, dtype=np.float32)
|
||||||
|
flat = emb_f32.flatten().tolist()
|
||||||
|
for j, pos in enumerate(found_indices):
|
||||||
|
start = pos * embedding_dim
|
||||||
|
end = start + embedding_dim
|
||||||
|
if end <= len(flat_data):
|
||||||
|
flat_data[start:end] = flat[
|
||||||
|
j * embedding_dim : (j + 1) * embedding_dim
|
||||||
|
]
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Embedding computation error, returning zeros: {e}")
|
||||||
|
|
||||||
# Serialization and response
|
response_payload = [dims, flat_data]
|
||||||
if np.isnan(embeddings).any() or np.isinf(embeddings).any():
|
response_bytes = msgpack.packb(response_payload, use_single_float=True)
|
||||||
logger.error(
|
|
||||||
f"NaN or Inf detected in embeddings! Requested IDs: {node_ids[:5]}..."
|
|
||||||
)
|
|
||||||
assert False
|
|
||||||
|
|
||||||
hidden_contiguous_f32 = np.ascontiguousarray(
|
rep_socket.send(response_bytes)
|
||||||
embeddings, dtype=np.float32
|
e2e_end = time.time()
|
||||||
)
|
logger.info(f"⏱️ ZMQ E2E time: {e2e_end - e2e_start:.6f}s")
|
||||||
response_payload = [
|
|
||||||
list(hidden_contiguous_f32.shape),
|
|
||||||
hidden_contiguous_f32.flatten().tolist(),
|
|
||||||
]
|
|
||||||
response_bytes = msgpack.packb(response_payload, use_single_float=True)
|
|
||||||
|
|
||||||
socket.send(response_bytes)
|
except zmq.Again:
|
||||||
e2e_end = time.time()
|
# Timeout - check shutdown_event and continue
|
||||||
logger.info(f"⏱️ ZMQ E2E time: {e2e_end - e2e_start:.6f}s")
|
continue
|
||||||
|
except Exception as e:
|
||||||
|
if not shutdown_event.is_set():
|
||||||
|
logger.error(f"Error in ZMQ server loop: {e}")
|
||||||
|
# Shape-correct fallback
|
||||||
|
try:
|
||||||
|
if last_request_type == "distance":
|
||||||
|
large_distance = 1e9
|
||||||
|
fallback_len = max(0, int(last_request_length))
|
||||||
|
safe = [[large_distance] * fallback_len]
|
||||||
|
elif last_request_type == "embedding":
|
||||||
|
bsz = max(0, int(last_request_length))
|
||||||
|
dim = max(0, int(embedding_dim))
|
||||||
|
safe = (
|
||||||
|
[[bsz, dim], [0.0] * (bsz * dim)] if dim > 0 else [[0, 0], []]
|
||||||
|
)
|
||||||
|
elif last_request_type == "text":
|
||||||
|
safe = [] # direct text embeddings expectation is a flat list
|
||||||
|
else:
|
||||||
|
safe = [[0, int(embedding_dim) if embedding_dim > 0 else 0], []]
|
||||||
|
rep_socket.send(msgpack.packb(safe, use_single_float=True))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
logger.info("Shutdown in progress, ignoring ZMQ error")
|
||||||
|
break
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
rep_socket.close(0)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
context.term()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
except zmq.Again:
|
logger.info("ZMQ server thread exiting gracefully")
|
||||||
logger.debug("ZMQ socket timeout, continuing to listen")
|
|
||||||
continue
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error in ZMQ server loop: {e}")
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
traceback.print_exc()
|
# Add shutdown coordination
|
||||||
socket.send(msgpack.packb([[], []]))
|
shutdown_event = threading.Event()
|
||||||
|
|
||||||
zmq_thread = threading.Thread(target=zmq_server_thread, daemon=True)
|
def shutdown_zmq_server():
|
||||||
|
"""Gracefully shutdown ZMQ server."""
|
||||||
|
logger.info("Initiating graceful shutdown...")
|
||||||
|
shutdown_event.set()
|
||||||
|
|
||||||
|
if zmq_thread.is_alive():
|
||||||
|
logger.info("Waiting for ZMQ thread to finish...")
|
||||||
|
zmq_thread.join(timeout=5)
|
||||||
|
if zmq_thread.is_alive():
|
||||||
|
logger.warning("ZMQ thread did not finish in time")
|
||||||
|
|
||||||
|
# Clean up ZMQ resources
|
||||||
|
try:
|
||||||
|
# Note: socket and context are cleaned up by thread exit
|
||||||
|
logger.info("ZMQ resources cleaned up")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error cleaning ZMQ resources: {e}")
|
||||||
|
|
||||||
|
# Clean up other resources
|
||||||
|
try:
|
||||||
|
import gc
|
||||||
|
|
||||||
|
gc.collect()
|
||||||
|
logger.info("Additional resources cleaned up")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error cleaning additional resources: {e}")
|
||||||
|
|
||||||
|
logger.info("Graceful shutdown completed")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
# Register signal handlers within this function scope
|
||||||
|
import signal
|
||||||
|
|
||||||
|
def signal_handler(sig, frame):
|
||||||
|
logger.info(f"Received signal {sig}, shutting down gracefully...")
|
||||||
|
shutdown_zmq_server()
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, signal_handler)
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
|
||||||
|
# Pass shutdown_event to ZMQ thread
|
||||||
|
zmq_thread = threading.Thread(
|
||||||
|
target=lambda: zmq_server_thread_with_shutdown(shutdown_event),
|
||||||
|
daemon=False, # Not daemon - we want to wait for it
|
||||||
|
)
|
||||||
zmq_thread.start()
|
zmq_thread.start()
|
||||||
logger.info(f"Started HNSW ZMQ server thread on port {zmq_port}")
|
logger.info(f"Started HNSW ZMQ server thread on port {zmq_port}")
|
||||||
|
|
||||||
# Keep the main thread alive
|
# Keep the main thread alive
|
||||||
try:
|
try:
|
||||||
while True:
|
while not shutdown_event.is_set():
|
||||||
time.sleep(1)
|
time.sleep(0.1) # Check shutdown more frequently
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
logger.info("HNSW Server shutting down...")
|
logger.info("HNSW Server shutting down...")
|
||||||
|
shutdown_zmq_server()
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# If we reach here, shutdown was triggered by signal
|
||||||
|
logger.info("Main loop exited, process should be shutting down")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import signal
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
def signal_handler(sig, frame):
|
# Signal handlers are now registered within create_hnsw_embedding_server
|
||||||
logger.info(f"Received signal {sig}, shutting down gracefully...")
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
# Register signal handlers for graceful shutdown
|
|
||||||
signal.signal(signal.SIGTERM, signal_handler)
|
|
||||||
signal.signal(signal.SIGINT, signal_handler)
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description="HNSW Embedding service")
|
parser = argparse.ArgumentParser(description="HNSW Embedding service")
|
||||||
parser.add_argument("--zmq-port", type=int, default=5555, help="ZMQ port to run on")
|
parser.add_argument("--zmq-port", type=int, default=5555, help="ZMQ port to run on")
|
||||||
@@ -299,7 +412,7 @@ if __name__ == "__main__":
|
|||||||
"--embedding-mode",
|
"--embedding-mode",
|
||||||
type=str,
|
type=str,
|
||||||
default="sentence-transformers",
|
default="sentence-transformers",
|
||||||
choices=["sentence-transformers", "openai", "mlx"],
|
choices=["sentence-transformers", "openai", "mlx", "ollama"],
|
||||||
help="Embedding backend mode",
|
help="Embedding backend mode",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -6,9 +6,14 @@ build-backend = "scikit_build_core.build"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "leann-backend-hnsw"
|
name = "leann-backend-hnsw"
|
||||||
version = "0.1.0"
|
version = "0.3.2"
|
||||||
description = "Custom-built HNSW (Faiss) backend for the Leann toolkit."
|
description = "Custom-built HNSW (Faiss) backend for the Leann toolkit."
|
||||||
dependencies = ["leann-core==0.1.0", "numpy"]
|
dependencies = [
|
||||||
|
"leann-core==0.3.2",
|
||||||
|
"numpy",
|
||||||
|
"pyzmq>=23.0.0",
|
||||||
|
"msgpack>=1.0.0",
|
||||||
|
]
|
||||||
|
|
||||||
[tool.scikit-build]
|
[tool.scikit-build]
|
||||||
wheel.packages = ["leann_backend_hnsw"]
|
wheel.packages = ["leann_backend_hnsw"]
|
||||||
@@ -17,6 +22,8 @@ cmake.build-type = "Release"
|
|||||||
build.verbose = true
|
build.verbose = true
|
||||||
build.tool-args = ["-j8"]
|
build.tool-args = ["-j8"]
|
||||||
|
|
||||||
# CMake definitions to optimize compilation
|
# CMake definitions to optimize compilation and find Homebrew packages
|
||||||
[tool.scikit-build.cmake.define]
|
[tool.scikit-build.cmake.define]
|
||||||
CMAKE_BUILD_PARALLEL_LEVEL = "8"
|
CMAKE_BUILD_PARALLEL_LEVEL = "8"
|
||||||
|
CMAKE_PREFIX_PATH = {env = "CMAKE_PREFIX_PATH"}
|
||||||
|
OpenMP_ROOT = {env = "OpenMP_ROOT"}
|
||||||
|
|||||||
Submodule packages/leann-backend-hnsw/third_party/faiss updated: ff22e2c86b...4a2c0d67d3
@@ -4,19 +4,49 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "leann-core"
|
name = "leann-core"
|
||||||
version = "0.1.0"
|
version = "0.3.2"
|
||||||
description = "Core API and plugin system for Leann."
|
description = "Core API and plugin system for LEANN"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
license = { text = "MIT" }
|
license = { text = "MIT" }
|
||||||
|
|
||||||
|
# All required dependencies included
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"numpy>=1.20.0",
|
"numpy>=1.20.0",
|
||||||
"tqdm>=4.60.0"
|
"tqdm>=4.60.0",
|
||||||
|
"psutil>=5.8.0",
|
||||||
|
"pyzmq>=23.0.0",
|
||||||
|
"msgpack>=1.0.0",
|
||||||
|
"torch>=2.0.0",
|
||||||
|
"sentence-transformers>=2.2.0",
|
||||||
|
"llama-index-core>=0.12.0",
|
||||||
|
"llama-index-readers-file>=0.4.0", # Essential for document reading
|
||||||
|
"llama-index-embeddings-huggingface>=0.5.5", # For embeddings
|
||||||
|
"python-dotenv>=1.0.0",
|
||||||
|
"openai>=1.0.0",
|
||||||
|
"huggingface-hub>=0.20.0",
|
||||||
|
"transformers>=4.30.0",
|
||||||
|
"requests>=2.25.0",
|
||||||
|
"accelerate>=0.20.0",
|
||||||
|
"PyPDF2>=3.0.0",
|
||||||
|
"pymupdf>=1.23.0",
|
||||||
|
"pdfplumber>=0.10.0",
|
||||||
|
"nbconvert>=7.0.0", # For .ipynb file support
|
||||||
|
"gitignore-parser>=0.1.12", # For proper .gitignore handling
|
||||||
|
"mlx>=0.26.3; sys_platform == 'darwin' and platform_machine == 'arm64'",
|
||||||
|
"mlx-lm>=0.26.0; sys_platform == 'darwin' and platform_machine == 'arm64'",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
colab = [
|
||||||
|
"torch>=2.0.0,<3.0.0", # Limit torch version to avoid conflicts
|
||||||
|
"transformers>=4.30.0,<5.0.0", # Limit transformers version
|
||||||
|
"accelerate>=0.20.0,<1.0.0", # Limit accelerate version
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
leann = "leann.cli:main"
|
leann = "leann.cli:main"
|
||||||
|
leann_mcp = "leann.mcp:main"
|
||||||
|
|
||||||
[tool.setuptools.packages.find]
|
[tool.setuptools.packages.find]
|
||||||
where = ["src"]
|
where = ["src"]
|
||||||
@@ -8,10 +8,14 @@ if platform.system() == "Darwin":
|
|||||||
os.environ["MKL_NUM_THREADS"] = "1"
|
os.environ["MKL_NUM_THREADS"] = "1"
|
||||||
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
||||||
os.environ["KMP_BLOCKTIME"] = "0"
|
os.environ["KMP_BLOCKTIME"] = "0"
|
||||||
|
# Additional fixes for PyTorch/sentence-transformers on macOS ARM64 only in CI
|
||||||
|
if os.environ.get("CI") == "true":
|
||||||
|
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "0"
|
||||||
|
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||||
|
|
||||||
from .api import LeannBuilder, LeannChat, LeannSearcher
|
from .api import LeannBuilder, LeannChat, LeannSearcher
|
||||||
from .registry import BACKEND_REGISTRY, autodiscover_backends
|
from .registry import BACKEND_REGISTRY, autodiscover_backends
|
||||||
|
|
||||||
autodiscover_backends()
|
autodiscover_backends()
|
||||||
|
|
||||||
__all__ = ["LeannBuilder", "LeannSearcher", "LeannChat", "BACKEND_REGISTRY"]
|
__all__ = ["BACKEND_REGISTRY", "LeannBuilder", "LeannChat", "LeannSearcher"]
|
||||||
|
|||||||
@@ -4,23 +4,32 @@ with the correct, original embedding logic from the user's reference code.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import pickle
|
|
||||||
from leann.interface import LeannBackendSearcherInterface
|
|
||||||
import numpy as np
|
|
||||||
import time
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import List, Dict, Any, Optional, Literal
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
from .registry import BACKEND_REGISTRY
|
|
||||||
from .interface import LeannBackendFactoryInterface
|
|
||||||
from .chat import get_llm
|
|
||||||
import logging
|
import logging
|
||||||
|
import pickle
|
||||||
|
import time
|
||||||
|
import warnings
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Literal, Optional
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from leann.interface import LeannBackendSearcherInterface
|
||||||
|
|
||||||
|
from .chat import get_llm
|
||||||
|
from .interface import LeannBackendFactoryInterface
|
||||||
|
from .registry import BACKEND_REGISTRY
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_registered_backends() -> list[str]:
|
||||||
|
"""Get list of registered backend names."""
|
||||||
|
return list(BACKEND_REGISTRY.keys())
|
||||||
|
|
||||||
|
|
||||||
def compute_embeddings(
|
def compute_embeddings(
|
||||||
chunks: List[str],
|
chunks: list[str],
|
||||||
model_name: str,
|
model_name: str,
|
||||||
mode: str = "sentence-transformers",
|
mode: str = "sentence-transformers",
|
||||||
use_server: bool = True,
|
use_server: bool = True,
|
||||||
@@ -37,6 +46,7 @@ def compute_embeddings(
|
|||||||
- "sentence-transformers": Use sentence-transformers library (default)
|
- "sentence-transformers": Use sentence-transformers library (default)
|
||||||
- "mlx": Use MLX backend for Apple Silicon
|
- "mlx": Use MLX backend for Apple Silicon
|
||||||
- "openai": Use OpenAI embedding API
|
- "openai": Use OpenAI embedding API
|
||||||
|
- "gemini": Use Google Gemini embedding API
|
||||||
use_server: Whether to use embedding server (True for search, False for build)
|
use_server: Whether to use embedding server (True for search, False for build)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@@ -61,9 +71,7 @@ def compute_embeddings(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def compute_embeddings_via_server(
|
def compute_embeddings_via_server(chunks: list[str], model_name: str, port: int) -> np.ndarray:
|
||||||
chunks: List[str], model_name: str, port: int
|
|
||||||
) -> np.ndarray:
|
|
||||||
"""Computes embeddings using sentence-transformers.
|
"""Computes embeddings using sentence-transformers.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -73,9 +81,9 @@ def compute_embeddings_via_server(
|
|||||||
logger.info(
|
logger.info(
|
||||||
f"Computing embeddings for {len(chunks)} chunks using SentenceTransformer model '{model_name}' (via embedding server)..."
|
f"Computing embeddings for {len(chunks)} chunks using SentenceTransformer model '{model_name}' (via embedding server)..."
|
||||||
)
|
)
|
||||||
import zmq
|
|
||||||
import msgpack
|
import msgpack
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import zmq
|
||||||
|
|
||||||
# Connect to embedding server
|
# Connect to embedding server
|
||||||
context = zmq.Context()
|
context = zmq.Context()
|
||||||
@@ -104,63 +112,213 @@ class SearchResult:
|
|||||||
id: str
|
id: str
|
||||||
score: float
|
score: float
|
||||||
text: str
|
text: str
|
||||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
metadata: dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
|
||||||
class PassageManager:
|
class PassageManager:
|
||||||
def __init__(self, passage_sources: List[Dict[str, Any]]):
|
def __init__(
|
||||||
self.offset_maps = {}
|
self, passage_sources: list[dict[str, Any]], metadata_file_path: Optional[str] = None
|
||||||
self.passage_files = {}
|
):
|
||||||
self.global_offset_map = {} # Combined map for fast lookup
|
self.offset_maps: dict[str, dict[str, int]] = {}
|
||||||
|
self.passage_files: dict[str, str] = {}
|
||||||
|
# Avoid materializing a single gigantic global map to reduce memory
|
||||||
|
# footprint on very large corpora (e.g., 60M+ passages). Instead, keep
|
||||||
|
# per-shard maps and do a lightweight per-shard lookup on demand.
|
||||||
|
self._total_count: int = 0
|
||||||
|
|
||||||
|
# Derive index base name for standard sibling fallbacks, e.g., <index_name>.passages.*
|
||||||
|
index_name_base = None
|
||||||
|
if metadata_file_path:
|
||||||
|
meta_name = Path(metadata_file_path).name
|
||||||
|
if meta_name.endswith(".meta.json"):
|
||||||
|
index_name_base = meta_name[: -len(".meta.json")]
|
||||||
|
|
||||||
for source in passage_sources:
|
for source in passage_sources:
|
||||||
assert source["type"] == "jsonl", "only jsonl is supported"
|
assert source["type"] == "jsonl", "only jsonl is supported"
|
||||||
passage_file = source["path"]
|
passage_file = source.get("path", "")
|
||||||
index_file = source["index_path"] # .idx file
|
index_file = source.get("index_path", "") # .idx file
|
||||||
|
|
||||||
|
# Fix path resolution - relative paths should be relative to metadata file directory
|
||||||
|
def _resolve_candidates(
|
||||||
|
primary: str,
|
||||||
|
relative_key: str,
|
||||||
|
default_name: Optional[str],
|
||||||
|
source_dict: dict[str, Any],
|
||||||
|
) -> list[Path]:
|
||||||
|
"""
|
||||||
|
Build an ordered list of candidate paths. For relative paths specified in
|
||||||
|
metadata, prefer resolution relative to the metadata file directory first,
|
||||||
|
then fall back to CWD-based resolution, and finally to conventional
|
||||||
|
sibling defaults (e.g., <index_base>.passages.idx / .jsonl).
|
||||||
|
"""
|
||||||
|
candidates: list[Path] = []
|
||||||
|
# 1) Primary path
|
||||||
|
if primary:
|
||||||
|
p = Path(primary)
|
||||||
|
if p.is_absolute():
|
||||||
|
candidates.append(p)
|
||||||
|
else:
|
||||||
|
# Prefer metadata-relative resolution for relative paths
|
||||||
|
if metadata_file_path:
|
||||||
|
candidates.append(Path(metadata_file_path).parent / p)
|
||||||
|
# Also consider CWD-relative as a fallback for legacy layouts
|
||||||
|
candidates.append(Path.cwd() / p)
|
||||||
|
# 2) metadata-relative explicit relative key (if present)
|
||||||
|
if metadata_file_path and source_dict.get(relative_key):
|
||||||
|
candidates.append(Path(metadata_file_path).parent / source_dict[relative_key])
|
||||||
|
# 3) metadata-relative standard sibling filename
|
||||||
|
if metadata_file_path and default_name:
|
||||||
|
candidates.append(Path(metadata_file_path).parent / default_name)
|
||||||
|
return candidates
|
||||||
|
|
||||||
|
# Build candidate lists and pick first existing; otherwise keep last candidate for error message
|
||||||
|
idx_default = f"{index_name_base}.passages.idx" if index_name_base else None
|
||||||
|
idx_candidates = _resolve_candidates(
|
||||||
|
index_file, "index_path_relative", idx_default, source
|
||||||
|
)
|
||||||
|
pas_default = f"{index_name_base}.passages.jsonl" if index_name_base else None
|
||||||
|
pas_candidates = _resolve_candidates(passage_file, "path_relative", pas_default, source)
|
||||||
|
|
||||||
|
def _pick_existing(cands: list[Path]) -> str:
|
||||||
|
for c in cands:
|
||||||
|
if c.exists():
|
||||||
|
return str(c.resolve())
|
||||||
|
# Fallback to last candidate (best guess) even if not exists; will error below
|
||||||
|
return str(cands[-1].resolve()) if cands else ""
|
||||||
|
|
||||||
|
index_file = _pick_existing(idx_candidates)
|
||||||
|
passage_file = _pick_existing(pas_candidates)
|
||||||
|
|
||||||
if not Path(index_file).exists():
|
if not Path(index_file).exists():
|
||||||
raise FileNotFoundError(f"Passage index file not found: {index_file}")
|
raise FileNotFoundError(f"Passage index file not found: {index_file}")
|
||||||
|
|
||||||
with open(index_file, "rb") as f:
|
with open(index_file, "rb") as f:
|
||||||
offset_map = pickle.load(f)
|
offset_map: dict[str, int] = pickle.load(f)
|
||||||
self.offset_maps[passage_file] = offset_map
|
self.offset_maps[passage_file] = offset_map
|
||||||
self.passage_files[passage_file] = passage_file
|
self.passage_files[passage_file] = passage_file
|
||||||
|
self._total_count += len(offset_map)
|
||||||
|
|
||||||
# Build global map for O(1) lookup
|
def get_passage(self, passage_id: str) -> dict[str, Any]:
|
||||||
for passage_id, offset in offset_map.items():
|
# Fast path: check each shard map (there are typically few shards).
|
||||||
self.global_offset_map[passage_id] = (passage_file, offset)
|
# This avoids building a massive combined dict while keeping lookups
|
||||||
|
# bounded by the number of shards.
|
||||||
def get_passage(self, passage_id: str) -> Dict[str, Any]:
|
for passage_file, offset_map in self.offset_maps.items():
|
||||||
if passage_id in self.global_offset_map:
|
try:
|
||||||
passage_file, offset = self.global_offset_map[passage_id]
|
offset = offset_map[passage_id]
|
||||||
# Lazy file opening - only open when needed
|
with open(passage_file, encoding="utf-8") as f:
|
||||||
with open(passage_file, "r", encoding="utf-8") as f:
|
f.seek(offset)
|
||||||
f.seek(offset)
|
return json.loads(f.readline())
|
||||||
return json.loads(f.readline())
|
except KeyError:
|
||||||
|
continue
|
||||||
raise KeyError(f"Passage ID not found: {passage_id}")
|
raise KeyError(f"Passage ID not found: {passage_id}")
|
||||||
|
|
||||||
|
def __len__(self) -> int:
|
||||||
|
return self._total_count
|
||||||
|
|
||||||
|
|
||||||
class LeannBuilder:
|
class LeannBuilder:
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
backend_name: str,
|
backend_name: str,
|
||||||
embedding_model: str = "facebook/contriever-msmarco",
|
embedding_model: str = "facebook/contriever",
|
||||||
dimensions: Optional[int] = None,
|
dimensions: Optional[int] = None,
|
||||||
embedding_mode: str = "sentence-transformers",
|
embedding_mode: str = "sentence-transformers",
|
||||||
**backend_kwargs,
|
**backend_kwargs,
|
||||||
):
|
):
|
||||||
self.backend_name = backend_name
|
self.backend_name = backend_name
|
||||||
backend_factory: LeannBackendFactoryInterface | None = BACKEND_REGISTRY.get(
|
# Normalize incompatible combinations early (for consistent metadata)
|
||||||
backend_name
|
if backend_name == "hnsw":
|
||||||
)
|
is_recompute = backend_kwargs.get("is_recompute", True)
|
||||||
|
is_compact = backend_kwargs.get("is_compact", True)
|
||||||
|
if is_recompute is False and is_compact is True:
|
||||||
|
warnings.warn(
|
||||||
|
"HNSW with is_recompute=False requires non-compact storage. Forcing is_compact=False.",
|
||||||
|
UserWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
backend_kwargs["is_compact"] = False
|
||||||
|
|
||||||
|
backend_factory: Optional[LeannBackendFactoryInterface] = BACKEND_REGISTRY.get(backend_name)
|
||||||
if backend_factory is None:
|
if backend_factory is None:
|
||||||
raise ValueError(f"Backend '{backend_name}' not found or not registered.")
|
raise ValueError(f"Backend '{backend_name}' not found or not registered.")
|
||||||
self.backend_factory = backend_factory
|
self.backend_factory = backend_factory
|
||||||
self.embedding_model = embedding_model
|
self.embedding_model = embedding_model
|
||||||
self.dimensions = dimensions
|
self.dimensions = dimensions
|
||||||
self.embedding_mode = embedding_mode
|
self.embedding_mode = embedding_mode
|
||||||
self.backend_kwargs = backend_kwargs
|
|
||||||
self.chunks: List[Dict[str, Any]] = []
|
|
||||||
|
|
||||||
def add_text(self, text: str, metadata: Optional[Dict[str, Any]] = None):
|
# Check if we need to use cosine distance for normalized embeddings
|
||||||
|
normalized_embeddings_models = {
|
||||||
|
# OpenAI models
|
||||||
|
("openai", "text-embedding-ada-002"),
|
||||||
|
("openai", "text-embedding-3-small"),
|
||||||
|
("openai", "text-embedding-3-large"),
|
||||||
|
# Voyage AI models
|
||||||
|
("voyage", "voyage-2"),
|
||||||
|
("voyage", "voyage-3"),
|
||||||
|
("voyage", "voyage-large-2"),
|
||||||
|
("voyage", "voyage-multilingual-2"),
|
||||||
|
("voyage", "voyage-code-2"),
|
||||||
|
# Cohere models
|
||||||
|
("cohere", "embed-english-v3.0"),
|
||||||
|
("cohere", "embed-multilingual-v3.0"),
|
||||||
|
("cohere", "embed-english-light-v3.0"),
|
||||||
|
("cohere", "embed-multilingual-light-v3.0"),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Also check for patterns in model names
|
||||||
|
is_normalized = False
|
||||||
|
current_model_lower = embedding_model.lower()
|
||||||
|
current_mode_lower = embedding_mode.lower()
|
||||||
|
|
||||||
|
# Check exact matches
|
||||||
|
for mode, model in normalized_embeddings_models:
|
||||||
|
if (current_mode_lower == mode and current_model_lower == model) or (
|
||||||
|
mode in current_mode_lower and model in current_model_lower
|
||||||
|
):
|
||||||
|
is_normalized = True
|
||||||
|
break
|
||||||
|
|
||||||
|
# Check patterns
|
||||||
|
if not is_normalized:
|
||||||
|
# OpenAI patterns
|
||||||
|
if "openai" in current_mode_lower or "openai" in current_model_lower:
|
||||||
|
if any(
|
||||||
|
pattern in current_model_lower
|
||||||
|
for pattern in ["text-embedding", "ada", "3-small", "3-large"]
|
||||||
|
):
|
||||||
|
is_normalized = True
|
||||||
|
# Voyage patterns
|
||||||
|
elif "voyage" in current_mode_lower or "voyage" in current_model_lower:
|
||||||
|
is_normalized = True
|
||||||
|
# Cohere patterns
|
||||||
|
elif "cohere" in current_mode_lower or "cohere" in current_model_lower:
|
||||||
|
if "embed" in current_model_lower:
|
||||||
|
is_normalized = True
|
||||||
|
|
||||||
|
# Handle distance metric
|
||||||
|
if is_normalized and "distance_metric" not in backend_kwargs:
|
||||||
|
backend_kwargs["distance_metric"] = "cosine"
|
||||||
|
warnings.warn(
|
||||||
|
f"Detected normalized embeddings model '{embedding_model}' with mode '{embedding_mode}'. "
|
||||||
|
f"Automatically setting distance_metric='cosine' for optimal performance. "
|
||||||
|
f"Normalized embeddings (L2 norm = 1) should use cosine similarity instead of MIPS.",
|
||||||
|
UserWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
elif is_normalized and backend_kwargs.get("distance_metric", "").lower() != "cosine":
|
||||||
|
current_metric = backend_kwargs.get("distance_metric", "mips")
|
||||||
|
warnings.warn(
|
||||||
|
f"Warning: Using '{current_metric}' distance metric with normalized embeddings model "
|
||||||
|
f"'{embedding_model}' may lead to suboptimal search results. "
|
||||||
|
f"Consider using 'cosine' distance metric for better performance.",
|
||||||
|
UserWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.backend_kwargs = backend_kwargs
|
||||||
|
self.chunks: list[dict[str, Any]] = []
|
||||||
|
|
||||||
|
def add_text(self, text: str, metadata: Optional[dict[str, Any]] = None):
|
||||||
if metadata is None:
|
if metadata is None:
|
||||||
metadata = {}
|
metadata = {}
|
||||||
passage_id = metadata.get("id", str(len(self.chunks)))
|
passage_id = metadata.get("id", str(len(self.chunks)))
|
||||||
@@ -170,6 +328,23 @@ class LeannBuilder:
|
|||||||
def build_index(self, index_path: str):
|
def build_index(self, index_path: str):
|
||||||
if not self.chunks:
|
if not self.chunks:
|
||||||
raise ValueError("No chunks added.")
|
raise ValueError("No chunks added.")
|
||||||
|
|
||||||
|
# Filter out invalid/empty text chunks early to keep passage and embedding counts aligned
|
||||||
|
valid_chunks: list[dict[str, Any]] = []
|
||||||
|
skipped = 0
|
||||||
|
for chunk in self.chunks:
|
||||||
|
text = chunk.get("text", "")
|
||||||
|
if isinstance(text, str) and text.strip():
|
||||||
|
valid_chunks.append(chunk)
|
||||||
|
else:
|
||||||
|
skipped += 1
|
||||||
|
if skipped > 0:
|
||||||
|
print(
|
||||||
|
f"Warning: Skipping {skipped} empty/invalid text chunk(s). Processing {len(valid_chunks)} valid chunks"
|
||||||
|
)
|
||||||
|
self.chunks = valid_chunks
|
||||||
|
if not self.chunks:
|
||||||
|
raise ValueError("All provided chunks are empty or invalid. Nothing to index.")
|
||||||
if self.dimensions is None:
|
if self.dimensions is None:
|
||||||
self.dimensions = len(
|
self.dimensions = len(
|
||||||
compute_embeddings(
|
compute_embeddings(
|
||||||
@@ -190,9 +365,7 @@ class LeannBuilder:
|
|||||||
try:
|
try:
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
chunk_iterator = tqdm(
|
chunk_iterator = tqdm(self.chunks, desc="Writing passages", unit="chunk")
|
||||||
self.chunks, desc="Writing passages", unit="chunk"
|
|
||||||
)
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
chunk_iterator = self.chunks
|
chunk_iterator = self.chunks
|
||||||
|
|
||||||
@@ -222,9 +395,7 @@ class LeannBuilder:
|
|||||||
string_ids = [chunk["id"] for chunk in self.chunks]
|
string_ids = [chunk["id"] for chunk in self.chunks]
|
||||||
current_backend_kwargs = {**self.backend_kwargs, "dimensions": self.dimensions}
|
current_backend_kwargs = {**self.backend_kwargs, "dimensions": self.dimensions}
|
||||||
builder_instance = self.backend_factory.builder(**current_backend_kwargs)
|
builder_instance = self.backend_factory.builder(**current_backend_kwargs)
|
||||||
builder_instance.build(
|
builder_instance.build(embeddings, string_ids, index_path, **current_backend_kwargs)
|
||||||
embeddings, string_ids, index_path, **current_backend_kwargs
|
|
||||||
)
|
|
||||||
leann_meta_path = index_dir / f"{index_name}.meta.json"
|
leann_meta_path = index_dir / f"{index_name}.meta.json"
|
||||||
meta_data = {
|
meta_data = {
|
||||||
"version": "1.0",
|
"version": "1.0",
|
||||||
@@ -236,8 +407,12 @@ class LeannBuilder:
|
|||||||
"passage_sources": [
|
"passage_sources": [
|
||||||
{
|
{
|
||||||
"type": "jsonl",
|
"type": "jsonl",
|
||||||
"path": str(passages_file),
|
# Preserve existing relative file names (backward-compatible)
|
||||||
"index_path": str(offset_file),
|
"path": passages_file.name,
|
||||||
|
"index_path": offset_file.name,
|
||||||
|
# Add optional redundant relative keys for remote build portability (non-breaking)
|
||||||
|
"path_relative": passages_file.name,
|
||||||
|
"index_path_relative": offset_file.name,
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
@@ -273,9 +448,7 @@ class LeannBuilder:
|
|||||||
ids, embeddings = data
|
ids, embeddings = data
|
||||||
|
|
||||||
if not isinstance(embeddings, np.ndarray):
|
if not isinstance(embeddings, np.ndarray):
|
||||||
raise ValueError(
|
raise ValueError(f"Expected embeddings to be numpy array, got {type(embeddings)}")
|
||||||
f"Expected embeddings to be numpy array, got {type(embeddings)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
if len(ids) != embeddings.shape[0]:
|
if len(ids) != embeddings.shape[0]:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
@@ -287,9 +460,7 @@ class LeannBuilder:
|
|||||||
if self.dimensions is None:
|
if self.dimensions is None:
|
||||||
self.dimensions = embedding_dim
|
self.dimensions = embedding_dim
|
||||||
elif self.dimensions != embedding_dim:
|
elif self.dimensions != embedding_dim:
|
||||||
raise ValueError(
|
raise ValueError(f"Dimension mismatch: expected {self.dimensions}, got {embedding_dim}")
|
||||||
f"Dimension mismatch: expected {self.dimensions}, got {embedding_dim}"
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Building index from precomputed embeddings: {len(ids)} items, {embedding_dim} dimensions"
|
f"Building index from precomputed embeddings: {len(ids)} items, {embedding_dim} dimensions"
|
||||||
@@ -356,8 +527,12 @@ class LeannBuilder:
|
|||||||
"passage_sources": [
|
"passage_sources": [
|
||||||
{
|
{
|
||||||
"type": "jsonl",
|
"type": "jsonl",
|
||||||
"path": str(passages_file),
|
# Preserve existing relative file names (backward-compatible)
|
||||||
"index_path": str(offset_file),
|
"path": passages_file.name,
|
||||||
|
"index_path": offset_file.name,
|
||||||
|
# Add optional redundant relative keys for remote build portability (non-breaking)
|
||||||
|
"path_relative": passages_file.name,
|
||||||
|
"index_path_relative": offset_file.name,
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"built_from_precomputed_embeddings": True,
|
"built_from_precomputed_embeddings": True,
|
||||||
@@ -374,27 +549,35 @@ class LeannBuilder:
|
|||||||
with open(leann_meta_path, "w", encoding="utf-8") as f:
|
with open(leann_meta_path, "w", encoding="utf-8") as f:
|
||||||
json.dump(meta_data, f, indent=2)
|
json.dump(meta_data, f, indent=2)
|
||||||
|
|
||||||
logger.info(
|
logger.info(f"Index built successfully from precomputed embeddings: {index_path}")
|
||||||
f"Index built successfully from precomputed embeddings: {index_path}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class LeannSearcher:
|
class LeannSearcher:
|
||||||
def __init__(self, index_path: str, enable_warmup: bool = False, **backend_kwargs):
|
def __init__(self, index_path: str, enable_warmup: bool = False, **backend_kwargs):
|
||||||
|
# Fix path resolution for Colab and other environments
|
||||||
|
if not Path(index_path).is_absolute():
|
||||||
|
index_path = str(Path(index_path).resolve())
|
||||||
|
|
||||||
self.meta_path_str = f"{index_path}.meta.json"
|
self.meta_path_str = f"{index_path}.meta.json"
|
||||||
if not Path(self.meta_path_str).exists():
|
if not Path(self.meta_path_str).exists():
|
||||||
raise FileNotFoundError(
|
parent_dir = Path(index_path).parent
|
||||||
f"Leann metadata file not found at {self.meta_path_str}"
|
print(
|
||||||
|
f"Leann metadata file not found at {self.meta_path_str}, and you may need to rm -rf {parent_dir}"
|
||||||
)
|
)
|
||||||
with open(self.meta_path_str, "r", encoding="utf-8") as f:
|
# highlight in red the filenotfound error
|
||||||
|
raise FileNotFoundError(
|
||||||
|
f"Leann metadata file not found at {self.meta_path_str}, \033[91m you may need to rm -rf {parent_dir}\033[0m"
|
||||||
|
)
|
||||||
|
with open(self.meta_path_str, encoding="utf-8") as f:
|
||||||
self.meta_data = json.load(f)
|
self.meta_data = json.load(f)
|
||||||
backend_name = self.meta_data["backend_name"]
|
backend_name = self.meta_data["backend_name"]
|
||||||
self.embedding_model = self.meta_data["embedding_model"]
|
self.embedding_model = self.meta_data["embedding_model"]
|
||||||
# Support both old and new format
|
# Support both old and new format
|
||||||
self.embedding_mode = self.meta_data.get(
|
self.embedding_mode = self.meta_data.get("embedding_mode", "sentence-transformers")
|
||||||
"embedding_mode", "sentence-transformers"
|
# Delegate portability handling to PassageManager
|
||||||
|
self.passage_manager = PassageManager(
|
||||||
|
self.meta_data.get("passage_sources", []), metadata_file_path=self.meta_path_str
|
||||||
)
|
)
|
||||||
self.passage_manager = PassageManager(self.meta_data.get("passage_sources", []))
|
|
||||||
backend_factory = BACKEND_REGISTRY.get(backend_name)
|
backend_factory = BACKEND_REGISTRY.get(backend_name)
|
||||||
if backend_factory is None:
|
if backend_factory is None:
|
||||||
raise ValueError(f"Backend '{backend_name}' not found.")
|
raise ValueError(f"Backend '{backend_name}' not found.")
|
||||||
@@ -415,12 +598,24 @@ class LeannSearcher:
|
|||||||
pruning_strategy: Literal["global", "local", "proportional"] = "global",
|
pruning_strategy: Literal["global", "local", "proportional"] = "global",
|
||||||
expected_zmq_port: int = 5557,
|
expected_zmq_port: int = 5557,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> List[SearchResult]:
|
) -> list[SearchResult]:
|
||||||
logger.info("🔍 LeannSearcher.search() called:")
|
logger.info("🔍 LeannSearcher.search() called:")
|
||||||
logger.info(f" Query: '{query}'")
|
logger.info(f" Query: '{query}'")
|
||||||
logger.info(f" Top_k: {top_k}")
|
logger.info(f" Top_k: {top_k}")
|
||||||
logger.info(f" Additional kwargs: {kwargs}")
|
logger.info(f" Additional kwargs: {kwargs}")
|
||||||
|
|
||||||
|
# Smart top_k detection and adjustment
|
||||||
|
# Use PassageManager length (sum of shard sizes) to avoid
|
||||||
|
# depending on a massive combined map
|
||||||
|
total_docs = len(self.passage_manager)
|
||||||
|
original_top_k = top_k
|
||||||
|
if top_k > total_docs:
|
||||||
|
top_k = total_docs
|
||||||
|
logger.warning(
|
||||||
|
f" ⚠️ Requested top_k ({original_top_k}) exceeds total documents ({total_docs})"
|
||||||
|
)
|
||||||
|
logger.warning(f" ✅ Auto-adjusted top_k to {top_k} to match available documents")
|
||||||
|
|
||||||
zmq_port = None
|
zmq_port = None
|
||||||
|
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
@@ -441,9 +636,9 @@ class LeannSearcher:
|
|||||||
use_server_if_available=recompute_embeddings,
|
use_server_if_available=recompute_embeddings,
|
||||||
zmq_port=zmq_port,
|
zmq_port=zmq_port,
|
||||||
)
|
)
|
||||||
logger.info(f" Generated embedding shape: {query_embedding.shape}")
|
# logger.info(f" Generated embedding shape: {query_embedding.shape}")
|
||||||
embedding_time = time.time() - start_time
|
# time.time() - start_time
|
||||||
logger.info(f" Embedding time: {embedding_time} seconds")
|
# logger.info(f" Embedding time: {embedding_time} seconds")
|
||||||
|
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
results = self.backend_impl.search(
|
results = self.backend_impl.search(
|
||||||
@@ -457,15 +652,13 @@ class LeannSearcher:
|
|||||||
zmq_port=zmq_port,
|
zmq_port=zmq_port,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
)
|
)
|
||||||
search_time = time.time() - start_time
|
# logger.info(f" Search time: {search_time} seconds")
|
||||||
logger.info(f" Search time: {search_time} seconds")
|
logger.info(f" Backend returned: labels={len(results.get('labels', [[]])[0])} results")
|
||||||
logger.info(
|
|
||||||
f" Backend returned: labels={len(results.get('labels', [[]])[0])} results"
|
|
||||||
)
|
|
||||||
|
|
||||||
enriched_results = []
|
enriched_results = []
|
||||||
if "labels" in results and "distances" in results:
|
if "labels" in results and "distances" in results:
|
||||||
logger.info(f" Processing {len(results['labels'][0])} passage IDs:")
|
logger.info(f" Processing {len(results['labels'][0])} passage IDs:")
|
||||||
|
# Python 3.9 does not support zip(strict=...); lengths are expected to match
|
||||||
for i, (string_id, dist) in enumerate(
|
for i, (string_id, dist) in enumerate(
|
||||||
zip(results["labels"][0], results["distances"][0])
|
zip(results["labels"][0], results["distances"][0])
|
||||||
):
|
):
|
||||||
@@ -479,23 +672,64 @@ class LeannSearcher:
|
|||||||
metadata=passage_data.get("metadata", {}),
|
metadata=passage_data.get("metadata", {}),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Color codes for better logging
|
||||||
|
GREEN = "\033[92m"
|
||||||
|
BLUE = "\033[94m"
|
||||||
|
YELLOW = "\033[93m"
|
||||||
|
RESET = "\033[0m"
|
||||||
|
|
||||||
|
# Truncate text for display (first 100 chars)
|
||||||
|
display_text = passage_data["text"]
|
||||||
logger.info(
|
logger.info(
|
||||||
f" {i + 1}. passage_id='{string_id}' -> SUCCESS: {passage_data['text']}..."
|
f" {GREEN}✓{RESET} {BLUE}[{i + 1:2d}]{RESET} {YELLOW}ID:{RESET} '{string_id}' {YELLOW}Score:{RESET} {dist:.4f} {YELLOW}Text:{RESET} {display_text}"
|
||||||
)
|
)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
|
RED = "\033[91m"
|
||||||
|
RESET = "\033[0m"
|
||||||
logger.error(
|
logger.error(
|
||||||
f" {i + 1}. passage_id='{string_id}' -> ERROR: Passage not found in PassageManager!"
|
f" {RED}✗{RESET} [{i + 1:2d}] ID: '{string_id}' -> {RED}ERROR: Passage not found!{RESET}"
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info(f" Final enriched results: {len(enriched_results)} passages")
|
# Define color codes outside the loop for final message
|
||||||
|
GREEN = "\033[92m"
|
||||||
|
RESET = "\033[0m"
|
||||||
|
logger.info(f" {GREEN}✓ Final enriched results: {len(enriched_results)} passages{RESET}")
|
||||||
return enriched_results
|
return enriched_results
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
"""Explicitly cleanup embedding server resources.
|
||||||
|
|
||||||
|
This method should be called after you're done using the searcher,
|
||||||
|
especially in test environments or batch processing scenarios.
|
||||||
|
"""
|
||||||
|
backend = getattr(self.backend_impl, "embedding_server_manager", None)
|
||||||
|
if backend is not None:
|
||||||
|
backend.stop_server()
|
||||||
|
|
||||||
|
# Enable automatic cleanup patterns
|
||||||
|
def __enter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc, tb):
|
||||||
|
try:
|
||||||
|
self.cleanup()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
try:
|
||||||
|
self.cleanup()
|
||||||
|
except Exception:
|
||||||
|
# Avoid noisy errors during interpreter shutdown
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class LeannChat:
|
class LeannChat:
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
index_path: str,
|
index_path: str,
|
||||||
llm_config: Optional[Dict[str, Any]] = None,
|
llm_config: Optional[dict[str, Any]] = None,
|
||||||
enable_warmup: bool = False,
|
enable_warmup: bool = False,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
):
|
):
|
||||||
@@ -511,13 +745,13 @@ class LeannChat:
|
|||||||
prune_ratio: float = 0.0,
|
prune_ratio: float = 0.0,
|
||||||
recompute_embeddings: bool = True,
|
recompute_embeddings: bool = True,
|
||||||
pruning_strategy: Literal["global", "local", "proportional"] = "global",
|
pruning_strategy: Literal["global", "local", "proportional"] = "global",
|
||||||
llm_kwargs: Optional[Dict[str, Any]] = None,
|
llm_kwargs: Optional[dict[str, Any]] = None,
|
||||||
expected_zmq_port: int = 5557,
|
expected_zmq_port: int = 5557,
|
||||||
**search_kwargs,
|
**search_kwargs,
|
||||||
):
|
):
|
||||||
if llm_kwargs is None:
|
if llm_kwargs is None:
|
||||||
llm_kwargs = {}
|
llm_kwargs = {}
|
||||||
|
search_time = time.time()
|
||||||
results = self.searcher.search(
|
results = self.searcher.search(
|
||||||
question,
|
question,
|
||||||
top_k=top_k,
|
top_k=top_k,
|
||||||
@@ -529,6 +763,8 @@ class LeannChat:
|
|||||||
expected_zmq_port=expected_zmq_port,
|
expected_zmq_port=expected_zmq_port,
|
||||||
**search_kwargs,
|
**search_kwargs,
|
||||||
)
|
)
|
||||||
|
search_time = time.time() - search_time
|
||||||
|
# logger.info(f" Search time: {search_time} seconds")
|
||||||
context = "\n\n".join([r.text for r in results])
|
context = "\n\n".join([r.text for r in results])
|
||||||
prompt = (
|
prompt = (
|
||||||
"Here is some retrieved context that might help answer your question:\n\n"
|
"Here is some retrieved context that might help answer your question:\n\n"
|
||||||
@@ -537,7 +773,10 @@ class LeannChat:
|
|||||||
"Please provide the best answer you can based on this context and your knowledge."
|
"Please provide the best answer you can based on this context and your knowledge."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
ask_time = time.time()
|
||||||
ans = self.llm.ask(prompt, **llm_kwargs)
|
ans = self.llm.ask(prompt, **llm_kwargs)
|
||||||
|
ask_time = time.time() - ask_time
|
||||||
|
logger.info(f" Ask time: {ask_time} seconds")
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
def start_interactive(self):
|
def start_interactive(self):
|
||||||
@@ -554,3 +793,28 @@ class LeannChat:
|
|||||||
except (KeyboardInterrupt, EOFError):
|
except (KeyboardInterrupt, EOFError):
|
||||||
print("\nGoodbye!")
|
print("\nGoodbye!")
|
||||||
break
|
break
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
"""Explicitly cleanup embedding server resources.
|
||||||
|
|
||||||
|
This method should be called after you're done using the chat interface,
|
||||||
|
especially in test environments or batch processing scenarios.
|
||||||
|
"""
|
||||||
|
if hasattr(self.searcher, "cleanup"):
|
||||||
|
self.searcher.cleanup()
|
||||||
|
|
||||||
|
# Enable automatic cleanup patterns
|
||||||
|
def __enter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc, tb):
|
||||||
|
try:
|
||||||
|
self.cleanup()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
try:
|
||||||
|
self.cleanup()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|||||||
@@ -4,22 +4,25 @@ This file contains the chat generation logic for the LEANN project,
|
|||||||
supporting different backends like Ollama, Hugging Face Transformers, and a simulation mode.
|
supporting different backends like Ollama, Hugging Face Transformers, and a simulation mode.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
import difflib
|
||||||
from typing import Dict, Any, Optional, List
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import difflib
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
# Configure logging
|
# Configure logging
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def check_ollama_models() -> List[str]:
|
def check_ollama_models(host: str) -> list[str]:
|
||||||
"""Check available Ollama models and return a list"""
|
"""Check available Ollama models and return a list"""
|
||||||
try:
|
try:
|
||||||
import requests
|
import requests
|
||||||
response = requests.get("http://localhost:11434/api/tags", timeout=5)
|
|
||||||
|
response = requests.get(f"{host}/api/tags", timeout=5)
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
data = response.json()
|
data = response.json()
|
||||||
return [model["name"] for model in data.get("models", [])]
|
return [model["name"] for model in data.get("models", [])]
|
||||||
@@ -28,7 +31,70 @@ def check_ollama_models() -> List[str]:
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
def search_ollama_models_fuzzy(query: str, available_models: List[str]) -> List[str]:
|
def check_ollama_model_exists_remotely(model_name: str) -> tuple[bool, list[str]]:
|
||||||
|
"""Check if a model exists in Ollama's remote library and return available tags
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(model_exists, available_tags): bool and list of matching tags
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import re
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
# Split model name and tag
|
||||||
|
if ":" in model_name:
|
||||||
|
base_model, requested_tag = model_name.split(":", 1)
|
||||||
|
else:
|
||||||
|
base_model, requested_tag = model_name, None
|
||||||
|
|
||||||
|
# First check if base model exists in library
|
||||||
|
library_response = requests.get("https://ollama.com/library", timeout=8)
|
||||||
|
if library_response.status_code != 200:
|
||||||
|
return True, [] # Assume exists if can't check
|
||||||
|
|
||||||
|
# Extract model names from library page
|
||||||
|
models_in_library = re.findall(r'href="/library/([^"]+)"', library_response.text)
|
||||||
|
|
||||||
|
if base_model not in models_in_library:
|
||||||
|
return False, [] # Base model doesn't exist
|
||||||
|
|
||||||
|
# If base model exists, get available tags
|
||||||
|
tags_response = requests.get(f"https://ollama.com/library/{base_model}/tags", timeout=8)
|
||||||
|
if tags_response.status_code != 200:
|
||||||
|
return True, [] # Base model exists but can't get tags
|
||||||
|
|
||||||
|
# Extract tags for this model - be more specific to avoid HTML artifacts
|
||||||
|
tag_pattern = rf"{re.escape(base_model)}:[a-zA-Z0-9\.\-_]+"
|
||||||
|
raw_tags = re.findall(tag_pattern, tags_response.text)
|
||||||
|
|
||||||
|
# Clean up tags - remove HTML artifacts and duplicates
|
||||||
|
available_tags = []
|
||||||
|
seen = set()
|
||||||
|
for tag in raw_tags:
|
||||||
|
# Skip if it looks like HTML (contains < or >)
|
||||||
|
if "<" in tag or ">" in tag:
|
||||||
|
continue
|
||||||
|
if tag not in seen:
|
||||||
|
seen.add(tag)
|
||||||
|
available_tags.append(tag)
|
||||||
|
|
||||||
|
# Check if exact model exists
|
||||||
|
if requested_tag is None:
|
||||||
|
# User just requested base model, suggest tags
|
||||||
|
return True, available_tags[:10] # Return up to 10 tags
|
||||||
|
else:
|
||||||
|
exact_match = model_name in available_tags
|
||||||
|
return exact_match, available_tags[:10]
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# If scraping fails, assume model might exist (don't block user)
|
||||||
|
return True, []
|
||||||
|
|
||||||
|
|
||||||
|
def search_ollama_models_fuzzy(query: str, available_models: list[str]) -> list[str]:
|
||||||
"""Use intelligent fuzzy search for Ollama models"""
|
"""Use intelligent fuzzy search for Ollama models"""
|
||||||
if not available_models:
|
if not available_models:
|
||||||
return []
|
return []
|
||||||
@@ -41,7 +107,9 @@ def search_ollama_models_fuzzy(query: str, available_models: List[str]) -> List[
|
|||||||
suggestions.extend(exact_matches)
|
suggestions.extend(exact_matches)
|
||||||
|
|
||||||
# 2. Starts with query
|
# 2. Starts with query
|
||||||
starts_with = [m for m in available_models if m.lower().startswith(query_lower) and m not in suggestions]
|
starts_with = [
|
||||||
|
m for m in available_models if m.lower().startswith(query_lower) and m not in suggestions
|
||||||
|
]
|
||||||
suggestions.extend(starts_with)
|
suggestions.extend(starts_with)
|
||||||
|
|
||||||
# 3. Contains query
|
# 3. Contains query
|
||||||
@@ -51,24 +119,25 @@ def search_ollama_models_fuzzy(query: str, available_models: List[str]) -> List[
|
|||||||
# 4. Base model name matching (remove version numbers)
|
# 4. Base model name matching (remove version numbers)
|
||||||
def get_base_name(model_name: str) -> str:
|
def get_base_name(model_name: str) -> str:
|
||||||
"""Extract base name without version (e.g., 'llama3:8b' -> 'llama3')"""
|
"""Extract base name without version (e.g., 'llama3:8b' -> 'llama3')"""
|
||||||
return model_name.split(':')[0].split('-')[0]
|
return model_name.split(":")[0].split("-")[0]
|
||||||
|
|
||||||
query_base = get_base_name(query_lower)
|
query_base = get_base_name(query_lower)
|
||||||
base_matches = [
|
base_matches = [
|
||||||
m for m in available_models
|
m
|
||||||
|
for m in available_models
|
||||||
if get_base_name(m.lower()) == query_base and m not in suggestions
|
if get_base_name(m.lower()) == query_base and m not in suggestions
|
||||||
]
|
]
|
||||||
suggestions.extend(base_matches)
|
suggestions.extend(base_matches)
|
||||||
|
|
||||||
# 5. Family/variant matching
|
# 5. Family/variant matching
|
||||||
model_families = {
|
model_families = {
|
||||||
'llama': ['llama2', 'llama3', 'alpaca', 'vicuna', 'codellama'],
|
"llama": ["llama2", "llama3", "alpaca", "vicuna", "codellama"],
|
||||||
'qwen': ['qwen', 'qwen2', 'qwen3'],
|
"qwen": ["qwen", "qwen2", "qwen3"],
|
||||||
'gemma': ['gemma', 'gemma2'],
|
"gemma": ["gemma", "gemma2"],
|
||||||
'phi': ['phi', 'phi2', 'phi3'],
|
"phi": ["phi", "phi2", "phi3"],
|
||||||
'mistral': ['mistral', 'mixtral', 'openhermes'],
|
"mistral": ["mistral", "mixtral", "openhermes"],
|
||||||
'dolphin': ['dolphin', 'openchat'],
|
"dolphin": ["dolphin", "openchat"],
|
||||||
'deepseek': ['deepseek', 'deepseek-coder']
|
"deepseek": ["deepseek", "deepseek-coder"],
|
||||||
}
|
}
|
||||||
|
|
||||||
query_family = None
|
query_family = None
|
||||||
@@ -80,7 +149,8 @@ def search_ollama_models_fuzzy(query: str, available_models: List[str]) -> List[
|
|||||||
if query_family:
|
if query_family:
|
||||||
family_variants = model_families[query_family]
|
family_variants = model_families[query_family]
|
||||||
family_matches = [
|
family_matches = [
|
||||||
m for m in available_models
|
m
|
||||||
|
for m in available_models
|
||||||
if any(variant in m.lower() for variant in family_variants) and m not in suggestions
|
if any(variant in m.lower() for variant in family_variants) and m not in suggestions
|
||||||
]
|
]
|
||||||
suggestions.extend(family_matches)
|
suggestions.extend(family_matches)
|
||||||
@@ -99,15 +169,13 @@ def search_ollama_models_fuzzy(query: str, available_models: List[str]) -> List[
|
|||||||
# Remove this too - no need for fallback
|
# Remove this too - no need for fallback
|
||||||
|
|
||||||
|
|
||||||
def suggest_similar_models(invalid_model: str, available_models: List[str]) -> List[str]:
|
def suggest_similar_models(invalid_model: str, available_models: list[str]) -> list[str]:
|
||||||
"""Use difflib to find similar model names"""
|
"""Use difflib to find similar model names"""
|
||||||
if not available_models:
|
if not available_models:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
# Get close matches using fuzzy matching
|
# Get close matches using fuzzy matching
|
||||||
suggestions = difflib.get_close_matches(
|
suggestions = difflib.get_close_matches(invalid_model, available_models, n=3, cutoff=0.3)
|
||||||
invalid_model, available_models, n=3, cutoff=0.3
|
|
||||||
)
|
|
||||||
return suggestions
|
return suggestions
|
||||||
|
|
||||||
|
|
||||||
@@ -115,13 +183,14 @@ def check_hf_model_exists(model_name: str) -> bool:
|
|||||||
"""Quick check if HuggingFace model exists without downloading"""
|
"""Quick check if HuggingFace model exists without downloading"""
|
||||||
try:
|
try:
|
||||||
from huggingface_hub import model_info
|
from huggingface_hub import model_info
|
||||||
|
|
||||||
model_info(model_name)
|
model_info(model_name)
|
||||||
return True
|
return True
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def get_popular_hf_models() -> List[str]:
|
def get_popular_hf_models() -> list[str]:
|
||||||
"""Return a list of popular HuggingFace models for suggestions"""
|
"""Return a list of popular HuggingFace models for suggestions"""
|
||||||
try:
|
try:
|
||||||
from huggingface_hub import list_models
|
from huggingface_hub import list_models
|
||||||
@@ -131,15 +200,15 @@ def get_popular_hf_models() -> List[str]:
|
|||||||
filter="text-generation",
|
filter="text-generation",
|
||||||
sort="downloads",
|
sort="downloads",
|
||||||
direction=-1,
|
direction=-1,
|
||||||
limit=20 # Get top 20 most downloaded
|
limit=20, # Get top 20 most downloaded
|
||||||
)
|
)
|
||||||
|
|
||||||
# Extract model names and filter for chat/conversation models
|
# Extract model names and filter for chat/conversation models
|
||||||
model_names = []
|
model_names = []
|
||||||
chat_keywords = ['chat', 'instruct', 'dialog', 'conversation', 'assistant']
|
chat_keywords = ["chat", "instruct", "dialog", "conversation", "assistant"]
|
||||||
|
|
||||||
for model in models:
|
for model in models:
|
||||||
model_name = model.id if hasattr(model, 'id') else str(model)
|
model_name = model.id if hasattr(model, "id") else str(model)
|
||||||
# Prioritize models with chat-related keywords
|
# Prioritize models with chat-related keywords
|
||||||
if any(keyword in model_name.lower() for keyword in chat_keywords):
|
if any(keyword in model_name.lower() for keyword in chat_keywords):
|
||||||
model_names.append(model_name)
|
model_names.append(model_name)
|
||||||
@@ -153,7 +222,7 @@ def get_popular_hf_models() -> List[str]:
|
|||||||
return _get_fallback_hf_models()
|
return _get_fallback_hf_models()
|
||||||
|
|
||||||
|
|
||||||
def _get_fallback_hf_models() -> List[str]:
|
def _get_fallback_hf_models() -> list[str]:
|
||||||
"""Fallback list of popular HuggingFace models"""
|
"""Fallback list of popular HuggingFace models"""
|
||||||
return [
|
return [
|
||||||
"microsoft/DialoGPT-medium",
|
"microsoft/DialoGPT-medium",
|
||||||
@@ -165,11 +234,11 @@ def _get_fallback_hf_models() -> List[str]:
|
|||||||
"facebook/blenderbot_small-90M",
|
"facebook/blenderbot_small-90M",
|
||||||
"microsoft/phi-1_5",
|
"microsoft/phi-1_5",
|
||||||
"facebook/opt-350m",
|
"facebook/opt-350m",
|
||||||
"EleutherAI/gpt-neo-1.3B"
|
"EleutherAI/gpt-neo-1.3B",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def search_hf_models_fuzzy(query: str, limit: int = 10) -> List[str]:
|
def search_hf_models_fuzzy(query: str, limit: int = 10) -> list[str]:
|
||||||
"""Use HuggingFace Hub's native fuzzy search for model suggestions"""
|
"""Use HuggingFace Hub's native fuzzy search for model suggestions"""
|
||||||
try:
|
try:
|
||||||
from huggingface_hub import list_models
|
from huggingface_hub import list_models
|
||||||
@@ -180,10 +249,10 @@ def search_hf_models_fuzzy(query: str, limit: int = 10) -> List[str]:
|
|||||||
filter="text-generation",
|
filter="text-generation",
|
||||||
sort="downloads",
|
sort="downloads",
|
||||||
direction=-1,
|
direction=-1,
|
||||||
limit=limit
|
limit=limit,
|
||||||
)
|
)
|
||||||
|
|
||||||
model_names = [model.id if hasattr(model, 'id') else str(model) for model in models]
|
model_names = [model.id if hasattr(model, "id") else str(model) for model in models]
|
||||||
|
|
||||||
# If direct search doesn't return enough results, try some variations
|
# If direct search doesn't return enough results, try some variations
|
||||||
if len(model_names) < 3:
|
if len(model_names) < 3:
|
||||||
@@ -191,17 +260,17 @@ def search_hf_models_fuzzy(query: str, limit: int = 10) -> List[str]:
|
|||||||
variations = []
|
variations = []
|
||||||
|
|
||||||
# Extract base name (e.g., "gpt3" from "gpt-3.5")
|
# Extract base name (e.g., "gpt3" from "gpt-3.5")
|
||||||
base_query = query.lower().replace('-', '').replace('.', '').replace('_', '')
|
base_query = query.lower().replace("-", "").replace(".", "").replace("_", "")
|
||||||
if base_query != query.lower():
|
if base_query != query.lower():
|
||||||
variations.append(base_query)
|
variations.append(base_query)
|
||||||
|
|
||||||
# Try common model name patterns
|
# Try common model name patterns
|
||||||
if 'gpt' in query.lower():
|
if "gpt" in query.lower():
|
||||||
variations.extend(['gpt2', 'gpt-neo', 'gpt-j', 'dialoGPT'])
|
variations.extend(["gpt2", "gpt-neo", "gpt-j", "dialoGPT"])
|
||||||
elif 'llama' in query.lower():
|
elif "llama" in query.lower():
|
||||||
variations.extend(['llama2', 'alpaca', 'vicuna'])
|
variations.extend(["llama2", "alpaca", "vicuna"])
|
||||||
elif 'bert' in query.lower():
|
elif "bert" in query.lower():
|
||||||
variations.extend(['roberta', 'distilbert', 'albert'])
|
variations.extend(["roberta", "distilbert", "albert"])
|
||||||
|
|
||||||
# Search with variations
|
# Search with variations
|
||||||
for var in variations[:2]: # Limit to 2 variations to avoid too many API calls
|
for var in variations[:2]: # Limit to 2 variations to avoid too many API calls
|
||||||
@@ -211,11 +280,13 @@ def search_hf_models_fuzzy(query: str, limit: int = 10) -> List[str]:
|
|||||||
filter="text-generation",
|
filter="text-generation",
|
||||||
sort="downloads",
|
sort="downloads",
|
||||||
direction=-1,
|
direction=-1,
|
||||||
limit=3
|
limit=3,
|
||||||
)
|
)
|
||||||
var_names = [model.id if hasattr(model, 'id') else str(model) for model in var_models]
|
var_names = [
|
||||||
|
model.id if hasattr(model, "id") else str(model) for model in var_models
|
||||||
|
]
|
||||||
model_names.extend(var_names)
|
model_names.extend(var_names)
|
||||||
except:
|
except Exception:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Remove duplicates while preserving order
|
# Remove duplicates while preserving order
|
||||||
@@ -233,34 +304,86 @@ def search_hf_models_fuzzy(query: str, limit: int = 10) -> List[str]:
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
def search_hf_models(query: str, limit: int = 10) -> List[str]:
|
def search_hf_models(query: str, limit: int = 10) -> list[str]:
|
||||||
"""Simple search for HuggingFace models based on query (kept for backward compatibility)"""
|
"""Simple search for HuggingFace models based on query (kept for backward compatibility)"""
|
||||||
return search_hf_models_fuzzy(query, limit)
|
return search_hf_models_fuzzy(query, limit)
|
||||||
|
|
||||||
|
|
||||||
def validate_model_and_suggest(model_name: str, llm_type: str) -> Optional[str]:
|
def validate_model_and_suggest(
|
||||||
|
model_name: str, llm_type: str, host: str = "http://localhost:11434"
|
||||||
|
) -> Optional[str]:
|
||||||
"""Validate model name and provide suggestions if invalid"""
|
"""Validate model name and provide suggestions if invalid"""
|
||||||
if llm_type == "ollama":
|
if llm_type == "ollama":
|
||||||
available_models = check_ollama_models()
|
available_models = check_ollama_models(host)
|
||||||
if available_models and model_name not in available_models:
|
if available_models and model_name not in available_models:
|
||||||
# Use intelligent fuzzy search based on locally installed models
|
|
||||||
suggestions = search_ollama_models_fuzzy(model_name, available_models)
|
|
||||||
|
|
||||||
error_msg = f"Model '{model_name}' not found in your local Ollama installation."
|
error_msg = f"Model '{model_name}' not found in your local Ollama installation."
|
||||||
if suggestions:
|
|
||||||
error_msg += "\n\nDid you mean one of these installed models?\n"
|
|
||||||
for i, suggestion in enumerate(suggestions, 1):
|
|
||||||
error_msg += f" {i}. {suggestion}\n"
|
|
||||||
else:
|
|
||||||
error_msg += "\n\nYour installed models:\n"
|
|
||||||
for i, model in enumerate(available_models[:8], 1):
|
|
||||||
error_msg += f" {i}. {model}\n"
|
|
||||||
if len(available_models) > 8:
|
|
||||||
error_msg += f" ... and {len(available_models) - 8} more\n"
|
|
||||||
|
|
||||||
error_msg += "\nTo list all models: ollama list"
|
# Check if the model exists remotely and get available tags
|
||||||
error_msg += "\nTo download a new model: ollama pull <model_name>"
|
model_exists_remotely, available_tags = check_ollama_model_exists_remotely(model_name)
|
||||||
error_msg += "\nBrowse models: https://ollama.com/library"
|
|
||||||
|
if model_exists_remotely and model_name in available_tags:
|
||||||
|
# Exact model exists remotely - suggest pulling it
|
||||||
|
error_msg += "\n\nTo install the requested model:\n"
|
||||||
|
error_msg += f" ollama pull {model_name}\n"
|
||||||
|
|
||||||
|
# Show local alternatives
|
||||||
|
suggestions = search_ollama_models_fuzzy(model_name, available_models)
|
||||||
|
if suggestions:
|
||||||
|
error_msg += "\nOr use one of these similar installed models:\n"
|
||||||
|
for i, suggestion in enumerate(suggestions, 1):
|
||||||
|
error_msg += f" {i}. {suggestion}\n"
|
||||||
|
|
||||||
|
elif model_exists_remotely and available_tags:
|
||||||
|
# Base model exists but requested tag doesn't - suggest correct tags
|
||||||
|
base_model = model_name.split(":")[0]
|
||||||
|
requested_tag = model_name.split(":", 1)[1] if ":" in model_name else None
|
||||||
|
|
||||||
|
error_msg += (
|
||||||
|
f"\n\nModel '{base_model}' exists, but tag '{requested_tag}' is not available."
|
||||||
|
)
|
||||||
|
error_msg += f"\n\nAvailable {base_model} models you can install:\n"
|
||||||
|
for i, tag in enumerate(available_tags[:8], 1):
|
||||||
|
error_msg += f" {i}. ollama pull {tag}\n"
|
||||||
|
if len(available_tags) > 8:
|
||||||
|
error_msg += f" ... and {len(available_tags) - 8} more variants\n"
|
||||||
|
|
||||||
|
# Also show local alternatives
|
||||||
|
suggestions = search_ollama_models_fuzzy(model_name, available_models)
|
||||||
|
if suggestions:
|
||||||
|
error_msg += "\nOr use one of these similar installed models:\n"
|
||||||
|
for i, suggestion in enumerate(suggestions, 1):
|
||||||
|
error_msg += f" {i}. {suggestion}\n"
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Model doesn't exist remotely - show fuzzy suggestions
|
||||||
|
suggestions = search_ollama_models_fuzzy(model_name, available_models)
|
||||||
|
error_msg += f"\n\nModel '{model_name}' was not found in Ollama's library."
|
||||||
|
|
||||||
|
if suggestions:
|
||||||
|
error_msg += (
|
||||||
|
"\n\nDid you mean one of these installed models?\n"
|
||||||
|
+ "\nTry to use ollama pull to install the model you need\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
for i, suggestion in enumerate(suggestions, 1):
|
||||||
|
error_msg += f" {i}. {suggestion}\n"
|
||||||
|
else:
|
||||||
|
error_msg += "\n\nYour installed models:\n"
|
||||||
|
for i, model in enumerate(available_models[:8], 1):
|
||||||
|
error_msg += f" {i}. {model}\n"
|
||||||
|
if len(available_models) > 8:
|
||||||
|
error_msg += f" ... and {len(available_models) - 8} more\n"
|
||||||
|
|
||||||
|
error_msg += "\n\nCommands:"
|
||||||
|
error_msg += "\n ollama list # List installed models"
|
||||||
|
if model_exists_remotely and available_tags:
|
||||||
|
if model_name in available_tags:
|
||||||
|
error_msg += f"\n ollama pull {model_name} # Install requested model"
|
||||||
|
else:
|
||||||
|
error_msg += (
|
||||||
|
f"\n ollama pull {available_tags[0]} # Install recommended variant"
|
||||||
|
)
|
||||||
|
error_msg += "\n https://ollama.com/library # Browse available models"
|
||||||
return error_msg
|
return error_msg
|
||||||
|
|
||||||
elif llm_type == "hf":
|
elif llm_type == "hf":
|
||||||
@@ -299,7 +422,6 @@ class LLMInterface(ABC):
|
|||||||
top_k=10,
|
top_k=10,
|
||||||
complexity=64,
|
complexity=64,
|
||||||
beam_width=8,
|
beam_width=8,
|
||||||
USE_DEFERRED_FETCH=True,
|
|
||||||
skip_search_reorder=True,
|
skip_search_reorder=True,
|
||||||
recompute_beighbor_embeddings=True,
|
recompute_beighbor_embeddings=True,
|
||||||
dedup_node_dis=True,
|
dedup_node_dis=True,
|
||||||
@@ -311,7 +433,6 @@ class LLMInterface(ABC):
|
|||||||
Supported kwargs:
|
Supported kwargs:
|
||||||
- complexity (int): Search complexity parameter (default: 32)
|
- complexity (int): Search complexity parameter (default: 32)
|
||||||
- beam_width (int): Beam width for search (default: 4)
|
- beam_width (int): Beam width for search (default: 4)
|
||||||
- USE_DEFERRED_FETCH (bool): Enable deferred fetch mode (default: False)
|
|
||||||
- skip_search_reorder (bool): Skip search reorder step (default: False)
|
- skip_search_reorder (bool): Skip search reorder step (default: False)
|
||||||
- recompute_beighbor_embeddings (bool): Enable ZMQ embedding server for neighbor recomputation (default: False)
|
- recompute_beighbor_embeddings (bool): Enable ZMQ embedding server for neighbor recomputation (default: False)
|
||||||
- dedup_node_dis (bool): Deduplicate nodes by distance (default: False)
|
- dedup_node_dis (bool): Deduplicate nodes by distance (default: False)
|
||||||
@@ -348,7 +469,7 @@ class OllamaChat(LLMInterface):
|
|||||||
requests.get(host)
|
requests.get(host)
|
||||||
|
|
||||||
# Pre-check model availability with helpful suggestions
|
# Pre-check model availability with helpful suggestions
|
||||||
model_error = validate_model_and_suggest(model, "ollama")
|
model_error = validate_model_and_suggest(model, "ollama", host)
|
||||||
if model_error:
|
if model_error:
|
||||||
raise ValueError(model_error)
|
raise ValueError(model_error)
|
||||||
|
|
||||||
@@ -357,27 +478,50 @@ class OllamaChat(LLMInterface):
|
|||||||
"The 'requests' library is required for Ollama. Please install it with 'pip install requests'."
|
"The 'requests' library is required for Ollama. Please install it with 'pip install requests'."
|
||||||
)
|
)
|
||||||
except requests.exceptions.ConnectionError:
|
except requests.exceptions.ConnectionError:
|
||||||
logger.error(
|
logger.error(f"Could not connect to Ollama at {host}. Please ensure Ollama is running.")
|
||||||
f"Could not connect to Ollama at {host}. Please ensure Ollama is running."
|
|
||||||
)
|
|
||||||
raise ConnectionError(
|
raise ConnectionError(
|
||||||
f"Could not connect to Ollama at {host}. Please ensure Ollama is running."
|
f"Could not connect to Ollama at {host}. Please ensure Ollama is running."
|
||||||
)
|
)
|
||||||
|
|
||||||
def ask(self, prompt: str, **kwargs) -> str:
|
def ask(self, prompt: str, **kwargs) -> str:
|
||||||
import requests
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
full_url = f"{self.host}/api/generate"
|
full_url = f"{self.host}/api/generate"
|
||||||
|
|
||||||
|
# Handle thinking budget for reasoning models
|
||||||
|
options = kwargs.copy()
|
||||||
|
thinking_budget = kwargs.get("thinking_budget")
|
||||||
|
if thinking_budget:
|
||||||
|
# Remove thinking_budget from options as it's not a standard Ollama option
|
||||||
|
options.pop("thinking_budget", None)
|
||||||
|
# Only apply reasoning parameters to models that support it
|
||||||
|
reasoning_supported_models = [
|
||||||
|
"gpt-oss:20b",
|
||||||
|
"gpt-oss:120b",
|
||||||
|
"deepseek-r1",
|
||||||
|
"deepseek-coder",
|
||||||
|
]
|
||||||
|
|
||||||
|
if thinking_budget in ["low", "medium", "high"]:
|
||||||
|
if any(model in self.model.lower() for model in reasoning_supported_models):
|
||||||
|
options["reasoning"] = {"effort": thinking_budget, "exclude": False}
|
||||||
|
logger.info(f"Applied reasoning effort={thinking_budget} to model {self.model}")
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
f"Thinking budget '{thinking_budget}' requested but model '{self.model}' may not support reasoning parameters. Proceeding without reasoning."
|
||||||
|
)
|
||||||
|
|
||||||
payload = {
|
payload = {
|
||||||
"model": self.model,
|
"model": self.model,
|
||||||
"prompt": prompt,
|
"prompt": prompt,
|
||||||
"stream": False, # Keep it simple for now
|
"stream": False, # Keep it simple for now
|
||||||
"options": kwargs,
|
"options": options,
|
||||||
}
|
}
|
||||||
logger.debug(f"Sending request to Ollama: {payload}")
|
logger.debug(f"Sending request to Ollama: {payload}")
|
||||||
try:
|
try:
|
||||||
logger.info(f"Sending request to Ollama and waiting for response...")
|
logger.info("Sending request to Ollama and waiting for response...")
|
||||||
response = requests.post(full_url, data=json.dumps(payload))
|
response = requests.post(full_url, data=json.dumps(payload))
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
@@ -397,7 +541,7 @@ class OllamaChat(LLMInterface):
|
|||||||
|
|
||||||
|
|
||||||
class HFChat(LLMInterface):
|
class HFChat(LLMInterface):
|
||||||
"""LLM interface for local Hugging Face Transformers models."""
|
"""LLM interface for local Hugging Face Transformers models with proper chat templates."""
|
||||||
|
|
||||||
def __init__(self, model_name: str = "deepseek-ai/deepseek-llm-7b-chat"):
|
def __init__(self, model_name: str = "deepseek-ai/deepseek-llm-7b-chat"):
|
||||||
logger.info(f"Initializing HFChat with model='{model_name}'")
|
logger.info(f"Initializing HFChat with model='{model_name}'")
|
||||||
@@ -408,8 +552,8 @@ class HFChat(LLMInterface):
|
|||||||
raise ValueError(model_error)
|
raise ValueError(model_error)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from transformers.pipelines import pipeline
|
|
||||||
import torch
|
import torch
|
||||||
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise ImportError(
|
raise ImportError(
|
||||||
"The 'transformers' and 'torch' libraries are required for Hugging Face models. Please install them with 'pip install transformers torch'."
|
"The 'transformers' and 'torch' libraries are required for Hugging Face models. Please install them with 'pip install transformers torch'."
|
||||||
@@ -417,54 +561,177 @@ class HFChat(LLMInterface):
|
|||||||
|
|
||||||
# Auto-detect device
|
# Auto-detect device
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
device = "cuda"
|
self.device = "cuda"
|
||||||
logger.info("CUDA is available. Using GPU.")
|
logger.info("CUDA is available. Using GPU.")
|
||||||
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
||||||
device = "mps"
|
self.device = "mps"
|
||||||
logger.info("MPS is available. Using Apple Silicon GPU.")
|
logger.info("MPS is available. Using Apple Silicon GPU.")
|
||||||
else:
|
else:
|
||||||
device = "cpu"
|
self.device = "cpu"
|
||||||
logger.info("No GPU detected. Using CPU.")
|
logger.info("No GPU detected. Using CPU.")
|
||||||
|
|
||||||
self.pipeline = pipeline("text-generation", model=model_name, device=device)
|
# Load tokenizer and model with timeout protection
|
||||||
|
try:
|
||||||
|
import signal
|
||||||
|
|
||||||
|
def timeout_handler(signum, frame):
|
||||||
|
raise TimeoutError("Model download/loading timed out")
|
||||||
|
|
||||||
|
# Set timeout for model loading (60 seconds)
|
||||||
|
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
|
||||||
|
signal.alarm(60)
|
||||||
|
|
||||||
|
try:
|
||||||
|
logger.info(f"Loading tokenizer for {model_name}...")
|
||||||
|
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||||
|
|
||||||
|
logger.info(f"Loading model {model_name}...")
|
||||||
|
self.model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
model_name,
|
||||||
|
torch_dtype=torch.float16 if self.device != "cpu" else torch.float32,
|
||||||
|
device_map="auto" if self.device != "cpu" else None,
|
||||||
|
trust_remote_code=True,
|
||||||
|
)
|
||||||
|
logger.info(f"Successfully loaded {model_name}")
|
||||||
|
finally:
|
||||||
|
signal.alarm(0) # Cancel the alarm
|
||||||
|
signal.signal(signal.SIGALRM, old_handler) # Restore old handler
|
||||||
|
|
||||||
|
except TimeoutError:
|
||||||
|
logger.error(f"Model loading timed out for {model_name}")
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Model loading timed out for {model_name}. Please check your internet connection or try a smaller model."
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to load model {model_name}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Move model to device if not using device_map
|
||||||
|
if self.device != "cpu" and "device_map" not in str(self.model):
|
||||||
|
self.model = self.model.to(self.device)
|
||||||
|
|
||||||
|
# Set pad token if not present
|
||||||
|
if self.tokenizer.pad_token is None:
|
||||||
|
self.tokenizer.pad_token = self.tokenizer.eos_token
|
||||||
|
|
||||||
def ask(self, prompt: str, **kwargs) -> str:
|
def ask(self, prompt: str, **kwargs) -> str:
|
||||||
# Map OpenAI-style arguments to Hugging Face equivalents
|
print("kwargs in HF: ", kwargs)
|
||||||
if "max_tokens" in kwargs:
|
# Check if this is a Qwen model and add /no_think by default
|
||||||
# Prefer user-provided max_new_tokens if both are present
|
is_qwen_model = "qwen" in self.model.config._name_or_path.lower()
|
||||||
kwargs.setdefault("max_new_tokens", kwargs["max_tokens"])
|
|
||||||
# Remove the unsupported key to avoid errors in Transformers
|
|
||||||
kwargs.pop("max_tokens")
|
|
||||||
|
|
||||||
# Handle temperature=0 edge-case for greedy decoding
|
# For Qwen models, automatically add /no_think to the prompt
|
||||||
if "temperature" in kwargs and kwargs["temperature"] == 0.0:
|
if is_qwen_model and "/no_think" not in prompt and "/think" not in prompt:
|
||||||
# Remove unsupported zero temperature and use deterministic generation
|
prompt = prompt + " /no_think"
|
||||||
kwargs.pop("temperature")
|
|
||||||
kwargs.setdefault("do_sample", False)
|
|
||||||
|
|
||||||
# Sensible defaults for text generation
|
# Prepare chat template
|
||||||
params = {"max_length": 500, "num_return_sequences": 1, **kwargs}
|
messages = [{"role": "user", "content": prompt}]
|
||||||
logger.info(f"Generating text with Hugging Face model with params: {params}")
|
|
||||||
results = self.pipeline(prompt, **params)
|
|
||||||
|
|
||||||
# Handle different response formats from transformers
|
# Apply chat template if available
|
||||||
if isinstance(results, list) and len(results) > 0:
|
if hasattr(self.tokenizer, "apply_chat_template"):
|
||||||
generated_text = (
|
try:
|
||||||
results[0].get("generated_text", "")
|
formatted_prompt = self.tokenizer.apply_chat_template(
|
||||||
if isinstance(results[0], dict)
|
messages, tokenize=False, add_generation_prompt=True
|
||||||
else str(results[0])
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Chat template failed, using raw prompt: {e}")
|
||||||
|
formatted_prompt = prompt
|
||||||
|
else:
|
||||||
|
# Fallback for models without chat template
|
||||||
|
formatted_prompt = prompt
|
||||||
|
|
||||||
|
# Tokenize input
|
||||||
|
inputs = self.tokenizer(
|
||||||
|
formatted_prompt,
|
||||||
|
return_tensors="pt",
|
||||||
|
padding=True,
|
||||||
|
truncation=True,
|
||||||
|
max_length=2048,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Move inputs to device
|
||||||
|
if self.device != "cpu":
|
||||||
|
inputs = {k: v.to(self.device) for k, v in inputs.items()}
|
||||||
|
|
||||||
|
# Set generation parameters
|
||||||
|
generation_config = {
|
||||||
|
"max_new_tokens": kwargs.get("max_tokens", kwargs.get("max_new_tokens", 512)),
|
||||||
|
"temperature": kwargs.get("temperature", 0.7),
|
||||||
|
"top_p": kwargs.get("top_p", 0.9),
|
||||||
|
"do_sample": kwargs.get("temperature", 0.7) > 0,
|
||||||
|
"pad_token_id": self.tokenizer.eos_token_id,
|
||||||
|
"eos_token_id": self.tokenizer.eos_token_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Handle temperature=0 for greedy decoding
|
||||||
|
if generation_config["temperature"] == 0.0:
|
||||||
|
generation_config["do_sample"] = False
|
||||||
|
generation_config.pop("temperature")
|
||||||
|
|
||||||
|
logger.info(f"Generating with HuggingFace model, config: {generation_config}")
|
||||||
|
|
||||||
|
# Generate
|
||||||
|
with torch.no_grad():
|
||||||
|
outputs = self.model.generate(**inputs, **generation_config)
|
||||||
|
|
||||||
|
# Decode response
|
||||||
|
generated_tokens = outputs[0][inputs["input_ids"].shape[1] :]
|
||||||
|
response = self.tokenizer.decode(generated_tokens, skip_special_tokens=True)
|
||||||
|
|
||||||
|
return response.strip()
|
||||||
|
|
||||||
|
|
||||||
|
class GeminiChat(LLMInterface):
|
||||||
|
"""LLM interface for Google Gemini models."""
|
||||||
|
|
||||||
|
def __init__(self, model: str = "gemini-2.5-flash", api_key: Optional[str] = None):
|
||||||
|
self.model = model
|
||||||
|
self.api_key = api_key or os.getenv("GEMINI_API_KEY")
|
||||||
|
|
||||||
|
if not self.api_key:
|
||||||
|
raise ValueError(
|
||||||
|
"Gemini API key is required. Set GEMINI_API_KEY environment variable or pass api_key parameter."
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
generated_text = str(results)
|
|
||||||
|
|
||||||
# Extract only the newly generated portion by removing the original prompt
|
logger.info(f"Initializing Gemini Chat with model='{model}'")
|
||||||
if isinstance(generated_text, str) and generated_text.startswith(prompt):
|
|
||||||
response = generated_text[len(prompt) :].strip()
|
|
||||||
else:
|
|
||||||
# Fallback: return the full response if prompt removal fails
|
|
||||||
response = str(generated_text)
|
|
||||||
|
|
||||||
return response
|
try:
|
||||||
|
import google.genai as genai
|
||||||
|
|
||||||
|
self.client = genai.Client(api_key=self.api_key)
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"The 'google-genai' library is required for Gemini models. Please install it with 'uv pip install google-genai'."
|
||||||
|
)
|
||||||
|
|
||||||
|
def ask(self, prompt: str, **kwargs) -> str:
|
||||||
|
logger.info(f"Sending request to Gemini with model {self.model}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
from google.genai.types import GenerateContentConfig
|
||||||
|
|
||||||
|
generation_config = GenerateContentConfig(
|
||||||
|
temperature=kwargs.get("temperature", 0.7),
|
||||||
|
max_output_tokens=kwargs.get("max_tokens", 1000),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle top_p parameter
|
||||||
|
if "top_p" in kwargs:
|
||||||
|
generation_config.top_p = kwargs["top_p"]
|
||||||
|
|
||||||
|
response = self.client.models.generate_content(
|
||||||
|
model=self.model,
|
||||||
|
contents=prompt,
|
||||||
|
config=generation_config,
|
||||||
|
)
|
||||||
|
# Handle potential None response text
|
||||||
|
response_text = response.text
|
||||||
|
if response_text is None:
|
||||||
|
logger.warning("Gemini returned None response text")
|
||||||
|
return ""
|
||||||
|
return response_text.strip()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error communicating with Gemini: {e}")
|
||||||
|
return f"Error: Could not get a response from Gemini. Details: {e}"
|
||||||
|
|
||||||
|
|
||||||
class OpenAIChat(LLMInterface):
|
class OpenAIChat(LLMInterface):
|
||||||
@@ -495,15 +762,38 @@ class OpenAIChat(LLMInterface):
|
|||||||
params = {
|
params = {
|
||||||
"model": self.model,
|
"model": self.model,
|
||||||
"messages": [{"role": "user", "content": prompt}],
|
"messages": [{"role": "user", "content": prompt}],
|
||||||
"max_tokens": kwargs.get("max_tokens", 1000),
|
|
||||||
"temperature": kwargs.get("temperature", 0.7),
|
"temperature": kwargs.get("temperature", 0.7),
|
||||||
**{
|
|
||||||
k: v
|
|
||||||
for k, v in kwargs.items()
|
|
||||||
if k not in ["max_tokens", "temperature"]
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Handle max_tokens vs max_completion_tokens based on model
|
||||||
|
max_tokens = kwargs.get("max_tokens", 1000)
|
||||||
|
if "o3" in self.model or "o4" in self.model or "o1" in self.model:
|
||||||
|
# o-series models use max_completion_tokens
|
||||||
|
params["max_completion_tokens"] = max_tokens
|
||||||
|
params["temperature"] = 1.0
|
||||||
|
else:
|
||||||
|
# Other models use max_tokens
|
||||||
|
params["max_tokens"] = max_tokens
|
||||||
|
|
||||||
|
# Handle thinking budget for reasoning models
|
||||||
|
thinking_budget = kwargs.get("thinking_budget")
|
||||||
|
if thinking_budget and thinking_budget in ["low", "medium", "high"]:
|
||||||
|
# Check if this is an o-series model (partial match for model names)
|
||||||
|
o_series_models = ["o3", "o3-mini", "o4-mini", "o1", "o3-pro", "o3-deep-research"]
|
||||||
|
if any(model in self.model for model in o_series_models):
|
||||||
|
# Use the correct OpenAI reasoning parameter format
|
||||||
|
params["reasoning_effort"] = thinking_budget
|
||||||
|
logger.info(f"Applied reasoning_effort={thinking_budget} to model {self.model}")
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
f"Thinking budget '{thinking_budget}' requested but model '{self.model}' may not support reasoning parameters. Proceeding without reasoning."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add other kwargs (excluding thinking_budget as it's handled above)
|
||||||
|
for k, v in kwargs.items():
|
||||||
|
if k not in ["max_tokens", "temperature", "thinking_budget"]:
|
||||||
|
params[k] = v
|
||||||
|
|
||||||
logger.info(f"Sending request to OpenAI with model {self.model}")
|
logger.info(f"Sending request to OpenAI with model {self.model}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -523,7 +813,7 @@ class SimulatedChat(LLMInterface):
|
|||||||
return "This is a simulated answer from the LLM based on the retrieved context."
|
return "This is a simulated answer from the LLM based on the retrieved context."
|
||||||
|
|
||||||
|
|
||||||
def get_llm(llm_config: Optional[Dict[str, Any]] = None) -> LLMInterface:
|
def get_llm(llm_config: Optional[dict[str, Any]] = None) -> LLMInterface:
|
||||||
"""
|
"""
|
||||||
Factory function to get an LLM interface based on configuration.
|
Factory function to get an LLM interface based on configuration.
|
||||||
|
|
||||||
@@ -557,6 +847,8 @@ def get_llm(llm_config: Optional[Dict[str, Any]] = None) -> LLMInterface:
|
|||||||
return HFChat(model_name=model or "deepseek-ai/deepseek-llm-7b-chat")
|
return HFChat(model_name=model or "deepseek-ai/deepseek-llm-7b-chat")
|
||||||
elif llm_type == "openai":
|
elif llm_type == "openai":
|
||||||
return OpenAIChat(model=model or "gpt-4o", api_key=llm_config.get("api_key"))
|
return OpenAIChat(model=model or "gpt-4o", api_key=llm_config.get("api_key"))
|
||||||
|
elif llm_type == "gemini":
|
||||||
|
return GeminiChat(model=model or "gemini-2.5-flash", api_key=llm_config.get("api_key"))
|
||||||
elif llm_type == "simulated":
|
elif llm_type == "simulated":
|
||||||
return SimulatedChat()
|
return SimulatedChat()
|
||||||
else:
|
else:
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -4,11 +4,12 @@ Consolidates all embedding computation logic using SentenceTransformer
|
|||||||
Preserves all optimization parameters to ensure performance
|
Preserves all optimization parameters to ensure performance
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import torch
|
|
||||||
from typing import List, Dict, Any
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
|
||||||
# Set up logger with proper level
|
# Set up logger with proper level
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -17,11 +18,11 @@ log_level = getattr(logging, LOG_LEVEL, logging.WARNING)
|
|||||||
logger.setLevel(log_level)
|
logger.setLevel(log_level)
|
||||||
|
|
||||||
# Global model cache to avoid repeated loading
|
# Global model cache to avoid repeated loading
|
||||||
_model_cache: Dict[str, Any] = {}
|
_model_cache: dict[str, Any] = {}
|
||||||
|
|
||||||
|
|
||||||
def compute_embeddings(
|
def compute_embeddings(
|
||||||
texts: List[str],
|
texts: list[str],
|
||||||
model_name: str,
|
model_name: str,
|
||||||
mode: str = "sentence-transformers",
|
mode: str = "sentence-transformers",
|
||||||
is_build: bool = False,
|
is_build: bool = False,
|
||||||
@@ -34,7 +35,7 @@ def compute_embeddings(
|
|||||||
Args:
|
Args:
|
||||||
texts: List of texts to compute embeddings for
|
texts: List of texts to compute embeddings for
|
||||||
model_name: Model name
|
model_name: Model name
|
||||||
mode: Computation mode ('sentence-transformers', 'openai', 'mlx')
|
mode: Computation mode ('sentence-transformers', 'openai', 'mlx', 'ollama')
|
||||||
is_build: Whether this is a build operation (shows progress bar)
|
is_build: Whether this is a build operation (shows progress bar)
|
||||||
batch_size: Batch size for processing
|
batch_size: Batch size for processing
|
||||||
adaptive_optimization: Whether to use adaptive optimization based on batch size
|
adaptive_optimization: Whether to use adaptive optimization based on batch size
|
||||||
@@ -54,12 +55,16 @@ def compute_embeddings(
|
|||||||
return compute_embeddings_openai(texts, model_name)
|
return compute_embeddings_openai(texts, model_name)
|
||||||
elif mode == "mlx":
|
elif mode == "mlx":
|
||||||
return compute_embeddings_mlx(texts, model_name)
|
return compute_embeddings_mlx(texts, model_name)
|
||||||
|
elif mode == "ollama":
|
||||||
|
return compute_embeddings_ollama(texts, model_name, is_build=is_build)
|
||||||
|
elif mode == "gemini":
|
||||||
|
return compute_embeddings_gemini(texts, model_name, is_build=is_build)
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unsupported embedding mode: {mode}")
|
raise ValueError(f"Unsupported embedding mode: {mode}")
|
||||||
|
|
||||||
|
|
||||||
def compute_embeddings_sentence_transformers(
|
def compute_embeddings_sentence_transformers(
|
||||||
texts: List[str],
|
texts: list[str],
|
||||||
model_name: str,
|
model_name: str,
|
||||||
use_fp16: bool = True,
|
use_fp16: bool = True,
|
||||||
device: str = "auto",
|
device: str = "auto",
|
||||||
@@ -101,7 +106,7 @@ def compute_embeddings_sentence_transformers(
|
|||||||
if device == "mps":
|
if device == "mps":
|
||||||
batch_size = 128 # MPS optimal batch size from benchmark
|
batch_size = 128 # MPS optimal batch size from benchmark
|
||||||
if model_name == "Qwen/Qwen3-Embedding-0.6B":
|
if model_name == "Qwen/Qwen3-Embedding-0.6B":
|
||||||
batch_size = 64
|
batch_size = 32
|
||||||
elif device == "cuda":
|
elif device == "cuda":
|
||||||
batch_size = 256 # CUDA optimal batch size
|
batch_size = 256 # CUDA optimal batch size
|
||||||
# Keep original batch_size for CPU
|
# Keep original batch_size for CPU
|
||||||
@@ -114,9 +119,7 @@ def compute_embeddings_sentence_transformers(
|
|||||||
logger.info(f"Using cached optimized model: {model_name}")
|
logger.info(f"Using cached optimized model: {model_name}")
|
||||||
model = _model_cache[cache_key]
|
model = _model_cache[cache_key]
|
||||||
else:
|
else:
|
||||||
logger.info(
|
logger.info(f"Loading and caching optimized SentenceTransformer model: {model_name}")
|
||||||
f"Loading and caching optimized SentenceTransformer model: {model_name}"
|
|
||||||
)
|
|
||||||
from sentence_transformers import SentenceTransformer
|
from sentence_transformers import SentenceTransformer
|
||||||
|
|
||||||
logger.info(f"Using device: {device}")
|
logger.info(f"Using device: {device}")
|
||||||
@@ -134,9 +137,7 @@ def compute_embeddings_sentence_transformers(
|
|||||||
if hasattr(torch.mps, "set_per_process_memory_fraction"):
|
if hasattr(torch.mps, "set_per_process_memory_fraction"):
|
||||||
torch.mps.set_per_process_memory_fraction(0.9)
|
torch.mps.set_per_process_memory_fraction(0.9)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
logger.warning(
|
logger.warning("Some MPS optimizations not available in this PyTorch version")
|
||||||
"Some MPS optimizations not available in this PyTorch version"
|
|
||||||
)
|
|
||||||
elif device == "cpu":
|
elif device == "cpu":
|
||||||
# TODO: Haven't tested this yet
|
# TODO: Haven't tested this yet
|
||||||
torch.set_num_threads(min(8, os.cpu_count() or 4))
|
torch.set_num_threads(min(8, os.cpu_count() or 4))
|
||||||
@@ -226,28 +227,35 @@ def compute_embeddings_sentence_transformers(
|
|||||||
device=device,
|
device=device,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info(
|
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
||||||
f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Validate results
|
# Validate results
|
||||||
if np.isnan(embeddings).any() or np.isinf(embeddings).any():
|
if np.isnan(embeddings).any() or np.isinf(embeddings).any():
|
||||||
raise RuntimeError(
|
raise RuntimeError(f"Detected NaN or Inf values in embeddings, model: {model_name}")
|
||||||
f"Detected NaN or Inf values in embeddings, model: {model_name}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return embeddings
|
return embeddings
|
||||||
|
|
||||||
|
|
||||||
def compute_embeddings_openai(texts: List[str], model_name: str) -> np.ndarray:
|
def compute_embeddings_openai(texts: list[str], model_name: str) -> np.ndarray:
|
||||||
# TODO: @yichuan-w add progress bar only in build mode
|
# TODO: @yichuan-w add progress bar only in build mode
|
||||||
"""Compute embeddings using OpenAI API"""
|
"""Compute embeddings using OpenAI API"""
|
||||||
try:
|
try:
|
||||||
import openai
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
import openai
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
raise ImportError(f"OpenAI package not installed: {e}")
|
raise ImportError(f"OpenAI package not installed: {e}")
|
||||||
|
|
||||||
|
# Validate input list
|
||||||
|
if not texts:
|
||||||
|
raise ValueError("Cannot compute embeddings for empty text list")
|
||||||
|
# Extra validation: abort early if any item is empty/whitespace
|
||||||
|
invalid_count = sum(1 for t in texts if not isinstance(t, str) or not t.strip())
|
||||||
|
if invalid_count > 0:
|
||||||
|
raise ValueError(
|
||||||
|
f"Found {invalid_count} empty/invalid text(s) in input. Upstream should filter before calling OpenAI."
|
||||||
|
)
|
||||||
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
if not api_key:
|
if not api_key:
|
||||||
raise RuntimeError("OPENAI_API_KEY environment variable not set")
|
raise RuntimeError("OPENAI_API_KEY environment variable not set")
|
||||||
@@ -264,10 +272,19 @@ def compute_embeddings_openai(texts: List[str], model_name: str) -> np.ndarray:
|
|||||||
logger.info(
|
logger.info(
|
||||||
f"Computing embeddings for {len(texts)} texts using OpenAI API, model: '{model_name}'"
|
f"Computing embeddings for {len(texts)} texts using OpenAI API, model: '{model_name}'"
|
||||||
)
|
)
|
||||||
|
print(f"len of texts: {len(texts)}")
|
||||||
|
|
||||||
# OpenAI has limits on batch size and input length
|
# OpenAI has limits on batch size and input length
|
||||||
max_batch_size = 100 # Conservative batch size
|
max_batch_size = 800 # Conservative batch size because the token limit is 300K
|
||||||
all_embeddings = []
|
all_embeddings = []
|
||||||
|
# get the avg len of texts
|
||||||
|
avg_len = sum(len(text) for text in texts) / len(texts)
|
||||||
|
print(f"avg len of texts: {avg_len}")
|
||||||
|
# if avg len is less than 1000, use the max batch size
|
||||||
|
if avg_len > 300:
|
||||||
|
max_batch_size = 500
|
||||||
|
|
||||||
|
# if avg len is less than 1000, use the max batch size
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
@@ -293,15 +310,12 @@ def compute_embeddings_openai(texts: List[str], model_name: str) -> np.ndarray:
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
embeddings = np.array(all_embeddings, dtype=np.float32)
|
embeddings = np.array(all_embeddings, dtype=np.float32)
|
||||||
logger.info(
|
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
||||||
f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}"
|
print(f"len of embeddings: {len(embeddings)}")
|
||||||
)
|
|
||||||
return embeddings
|
return embeddings
|
||||||
|
|
||||||
|
|
||||||
def compute_embeddings_mlx(
|
def compute_embeddings_mlx(chunks: list[str], model_name: str, batch_size: int = 16) -> np.ndarray:
|
||||||
chunks: List[str], model_name: str, batch_size: int = 16
|
|
||||||
) -> np.ndarray:
|
|
||||||
# TODO: @yichuan-w add progress bar only in build mode
|
# TODO: @yichuan-w add progress bar only in build mode
|
||||||
"""Computes embeddings using an MLX model."""
|
"""Computes embeddings using an MLX model."""
|
||||||
try:
|
try:
|
||||||
@@ -373,3 +387,366 @@ def compute_embeddings_mlx(
|
|||||||
|
|
||||||
# Stack numpy arrays
|
# Stack numpy arrays
|
||||||
return np.stack(all_embeddings)
|
return np.stack(all_embeddings)
|
||||||
|
|
||||||
|
|
||||||
|
def compute_embeddings_ollama(
|
||||||
|
texts: list[str], model_name: str, is_build: bool = False, host: str = "http://localhost:11434"
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Compute embeddings using Ollama API with simplified batch processing.
|
||||||
|
|
||||||
|
Uses batch size of 32 for MPS/CPU and 128 for CUDA to optimize performance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
texts: List of texts to compute embeddings for
|
||||||
|
model_name: Ollama model name (e.g., "nomic-embed-text", "mxbai-embed-large")
|
||||||
|
is_build: Whether this is a build operation (shows progress bar)
|
||||||
|
host: Ollama host URL (default: http://localhost:11434)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Normalized embeddings array, shape: (len(texts), embedding_dim)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"The 'requests' library is required for Ollama embeddings. Install with: uv pip install requests"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not texts:
|
||||||
|
raise ValueError("Cannot compute embeddings for empty text list")
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Computing embeddings for {len(texts)} texts using Ollama API, model: '{model_name}'"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check if Ollama is running
|
||||||
|
try:
|
||||||
|
response = requests.get(f"{host}/api/version", timeout=5)
|
||||||
|
response.raise_for_status()
|
||||||
|
except requests.exceptions.ConnectionError:
|
||||||
|
error_msg = (
|
||||||
|
f"❌ Could not connect to Ollama at {host}.\n\n"
|
||||||
|
"Please ensure Ollama is running:\n"
|
||||||
|
" • macOS/Linux: ollama serve\n"
|
||||||
|
" • Windows: Make sure Ollama is running in the system tray\n\n"
|
||||||
|
"Installation: https://ollama.com/download"
|
||||||
|
)
|
||||||
|
raise RuntimeError(error_msg)
|
||||||
|
except Exception as e:
|
||||||
|
raise RuntimeError(f"Unexpected error connecting to Ollama: {e}")
|
||||||
|
|
||||||
|
# Check if model exists and provide helpful suggestions
|
||||||
|
try:
|
||||||
|
response = requests.get(f"{host}/api/tags", timeout=5)
|
||||||
|
response.raise_for_status()
|
||||||
|
models = response.json()
|
||||||
|
model_names = [model["name"] for model in models.get("models", [])]
|
||||||
|
|
||||||
|
# Filter for embedding models (models that support embeddings)
|
||||||
|
embedding_models = []
|
||||||
|
suggested_embedding_models = [
|
||||||
|
"nomic-embed-text",
|
||||||
|
"mxbai-embed-large",
|
||||||
|
"bge-m3",
|
||||||
|
"all-minilm",
|
||||||
|
"snowflake-arctic-embed",
|
||||||
|
]
|
||||||
|
|
||||||
|
for model in model_names:
|
||||||
|
# Check if it's an embedding model (by name patterns or known models)
|
||||||
|
base_name = model.split(":")[0]
|
||||||
|
if any(emb in base_name for emb in ["embed", "bge", "minilm", "e5"]):
|
||||||
|
embedding_models.append(model)
|
||||||
|
|
||||||
|
# Check if model exists (handle versioned names) and resolve to full name
|
||||||
|
resolved_model_name = None
|
||||||
|
for name in model_names:
|
||||||
|
# Exact match
|
||||||
|
if model_name == name:
|
||||||
|
resolved_model_name = name
|
||||||
|
break
|
||||||
|
# Match without version tag (use the versioned name)
|
||||||
|
elif model_name == name.split(":")[0]:
|
||||||
|
resolved_model_name = name
|
||||||
|
break
|
||||||
|
|
||||||
|
if not resolved_model_name:
|
||||||
|
error_msg = f"❌ Model '{model_name}' not found in local Ollama.\n\n"
|
||||||
|
|
||||||
|
# Suggest pulling the model
|
||||||
|
error_msg += "📦 To install this embedding model:\n"
|
||||||
|
error_msg += f" ollama pull {model_name}\n\n"
|
||||||
|
|
||||||
|
# Show available embedding models
|
||||||
|
if embedding_models:
|
||||||
|
error_msg += "✅ Available embedding models:\n"
|
||||||
|
for model in embedding_models[:5]:
|
||||||
|
error_msg += f" • {model}\n"
|
||||||
|
if len(embedding_models) > 5:
|
||||||
|
error_msg += f" ... and {len(embedding_models) - 5} more\n"
|
||||||
|
else:
|
||||||
|
error_msg += "💡 Popular embedding models to install:\n"
|
||||||
|
for model in suggested_embedding_models[:3]:
|
||||||
|
error_msg += f" • ollama pull {model}\n"
|
||||||
|
|
||||||
|
error_msg += "\n📚 Browse more: https://ollama.com/library"
|
||||||
|
raise ValueError(error_msg)
|
||||||
|
|
||||||
|
# Use the resolved model name for all subsequent operations
|
||||||
|
if resolved_model_name != model_name:
|
||||||
|
logger.info(f"Resolved model name '{model_name}' to '{resolved_model_name}'")
|
||||||
|
model_name = resolved_model_name
|
||||||
|
|
||||||
|
# Verify the model supports embeddings by testing it
|
||||||
|
try:
|
||||||
|
test_response = requests.post(
|
||||||
|
f"{host}/api/embeddings", json={"model": model_name, "prompt": "test"}, timeout=10
|
||||||
|
)
|
||||||
|
if test_response.status_code != 200:
|
||||||
|
error_msg = (
|
||||||
|
f"⚠️ Model '{model_name}' exists but may not support embeddings.\n\n"
|
||||||
|
f"Please use an embedding model like:\n"
|
||||||
|
)
|
||||||
|
for model in suggested_embedding_models[:3]:
|
||||||
|
error_msg += f" • {model}\n"
|
||||||
|
raise ValueError(error_msg)
|
||||||
|
except requests.exceptions.RequestException:
|
||||||
|
# If test fails, continue anyway - model might still work
|
||||||
|
pass
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
logger.warning(f"Could not verify model existence: {e}")
|
||||||
|
|
||||||
|
# Determine batch size based on device availability
|
||||||
|
# Check for CUDA/MPS availability using torch if available
|
||||||
|
batch_size = 32 # Default for MPS/CPU
|
||||||
|
try:
|
||||||
|
import torch
|
||||||
|
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
batch_size = 128 # CUDA gets larger batch size
|
||||||
|
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
||||||
|
batch_size = 32 # MPS gets smaller batch size
|
||||||
|
except ImportError:
|
||||||
|
# If torch is not available, use conservative batch size
|
||||||
|
batch_size = 32
|
||||||
|
|
||||||
|
logger.info(f"Using batch size: {batch_size}")
|
||||||
|
|
||||||
|
def get_batch_embeddings(batch_texts):
|
||||||
|
"""Get embeddings for a batch of texts."""
|
||||||
|
all_embeddings = []
|
||||||
|
failed_indices = []
|
||||||
|
|
||||||
|
for i, text in enumerate(batch_texts):
|
||||||
|
max_retries = 3
|
||||||
|
retry_count = 0
|
||||||
|
|
||||||
|
# Truncate very long texts to avoid API issues
|
||||||
|
truncated_text = text[:8000] if len(text) > 8000 else text
|
||||||
|
while retry_count < max_retries:
|
||||||
|
try:
|
||||||
|
response = requests.post(
|
||||||
|
f"{host}/api/embeddings",
|
||||||
|
json={"model": model_name, "prompt": truncated_text},
|
||||||
|
timeout=30,
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
result = response.json()
|
||||||
|
embedding = result.get("embedding")
|
||||||
|
|
||||||
|
if embedding is None:
|
||||||
|
raise ValueError(f"No embedding returned for text {i}")
|
||||||
|
|
||||||
|
if not isinstance(embedding, list) or len(embedding) == 0:
|
||||||
|
raise ValueError(f"Invalid embedding format for text {i}")
|
||||||
|
|
||||||
|
all_embeddings.append(embedding)
|
||||||
|
break
|
||||||
|
|
||||||
|
except requests.exceptions.Timeout:
|
||||||
|
retry_count += 1
|
||||||
|
if retry_count >= max_retries:
|
||||||
|
logger.warning(f"Timeout for text {i} after {max_retries} retries")
|
||||||
|
failed_indices.append(i)
|
||||||
|
all_embeddings.append(None)
|
||||||
|
break
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
retry_count += 1
|
||||||
|
if retry_count >= max_retries:
|
||||||
|
logger.error(f"Failed to get embedding for text {i}: {e}")
|
||||||
|
failed_indices.append(i)
|
||||||
|
all_embeddings.append(None)
|
||||||
|
break
|
||||||
|
return all_embeddings, failed_indices
|
||||||
|
|
||||||
|
# Process texts in batches
|
||||||
|
all_embeddings = []
|
||||||
|
all_failed_indices = []
|
||||||
|
|
||||||
|
# Setup progress bar if needed
|
||||||
|
show_progress = is_build or len(texts) > 10
|
||||||
|
try:
|
||||||
|
if show_progress:
|
||||||
|
from tqdm import tqdm
|
||||||
|
except ImportError:
|
||||||
|
show_progress = False
|
||||||
|
|
||||||
|
# Process batches
|
||||||
|
num_batches = (len(texts) + batch_size - 1) // batch_size
|
||||||
|
|
||||||
|
if show_progress:
|
||||||
|
batch_iterator = tqdm(range(num_batches), desc="Computing Ollama embeddings")
|
||||||
|
else:
|
||||||
|
batch_iterator = range(num_batches)
|
||||||
|
|
||||||
|
for batch_idx in batch_iterator:
|
||||||
|
start_idx = batch_idx * batch_size
|
||||||
|
end_idx = min(start_idx + batch_size, len(texts))
|
||||||
|
batch_texts = texts[start_idx:end_idx]
|
||||||
|
|
||||||
|
batch_embeddings, batch_failed = get_batch_embeddings(batch_texts)
|
||||||
|
|
||||||
|
# Adjust failed indices to global indices
|
||||||
|
global_failed = [start_idx + idx for idx in batch_failed]
|
||||||
|
all_failed_indices.extend(global_failed)
|
||||||
|
all_embeddings.extend(batch_embeddings)
|
||||||
|
|
||||||
|
# Handle failed embeddings
|
||||||
|
if all_failed_indices:
|
||||||
|
if len(all_failed_indices) == len(texts):
|
||||||
|
raise RuntimeError("Failed to compute any embeddings")
|
||||||
|
|
||||||
|
logger.warning(
|
||||||
|
f"Failed to compute embeddings for {len(all_failed_indices)}/{len(texts)} texts"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use zero embeddings as fallback for failed ones
|
||||||
|
valid_embedding = next((e for e in all_embeddings if e is not None), None)
|
||||||
|
if valid_embedding:
|
||||||
|
embedding_dim = len(valid_embedding)
|
||||||
|
for i, embedding in enumerate(all_embeddings):
|
||||||
|
if embedding is None:
|
||||||
|
all_embeddings[i] = [0.0] * embedding_dim
|
||||||
|
|
||||||
|
# Remove None values
|
||||||
|
all_embeddings = [e for e in all_embeddings if e is not None]
|
||||||
|
|
||||||
|
if not all_embeddings:
|
||||||
|
raise RuntimeError("No valid embeddings were computed")
|
||||||
|
|
||||||
|
# Validate embedding dimensions
|
||||||
|
expected_dim = len(all_embeddings[0])
|
||||||
|
inconsistent_dims = []
|
||||||
|
for i, embedding in enumerate(all_embeddings):
|
||||||
|
if len(embedding) != expected_dim:
|
||||||
|
inconsistent_dims.append((i, len(embedding)))
|
||||||
|
|
||||||
|
if inconsistent_dims:
|
||||||
|
error_msg = f"Ollama returned inconsistent embedding dimensions. Expected {expected_dim}, but got:\n"
|
||||||
|
for idx, dim in inconsistent_dims[:10]: # Show first 10 inconsistent ones
|
||||||
|
error_msg += f" - Text {idx}: {dim} dimensions\n"
|
||||||
|
if len(inconsistent_dims) > 10:
|
||||||
|
error_msg += f" ... and {len(inconsistent_dims) - 10} more\n"
|
||||||
|
error_msg += f"\nThis is likely an Ollama API bug with model '{model_name}'. Please try:\n"
|
||||||
|
error_msg += "1. Restart Ollama service: 'ollama serve'\n"
|
||||||
|
error_msg += f"2. Re-pull the model: 'ollama pull {model_name}'\n"
|
||||||
|
error_msg += (
|
||||||
|
"3. Use sentence-transformers instead: --embedding-mode sentence-transformers\n"
|
||||||
|
)
|
||||||
|
error_msg += "4. Report this issue to Ollama: https://github.com/ollama/ollama/issues"
|
||||||
|
raise ValueError(error_msg)
|
||||||
|
|
||||||
|
# Convert to numpy array and normalize
|
||||||
|
embeddings = np.array(all_embeddings, dtype=np.float32)
|
||||||
|
|
||||||
|
# Normalize embeddings (L2 normalization)
|
||||||
|
norms = np.linalg.norm(embeddings, axis=1, keepdims=True)
|
||||||
|
embeddings = embeddings / (norms + 1e-8) # Add small epsilon to avoid division by zero
|
||||||
|
|
||||||
|
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
||||||
|
|
||||||
|
return embeddings
|
||||||
|
|
||||||
|
|
||||||
|
def compute_embeddings_gemini(
|
||||||
|
texts: list[str], model_name: str = "text-embedding-004", is_build: bool = False
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Compute embeddings using Google Gemini API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
texts: List of texts to compute embeddings for
|
||||||
|
model_name: Gemini model name (default: "text-embedding-004")
|
||||||
|
is_build: Whether this is a build operation (shows progress bar)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Embeddings array, shape: (len(texts), embedding_dim)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import os
|
||||||
|
|
||||||
|
import google.genai as genai
|
||||||
|
except ImportError as e:
|
||||||
|
raise ImportError(f"Google GenAI package not installed: {e}")
|
||||||
|
|
||||||
|
api_key = os.getenv("GEMINI_API_KEY")
|
||||||
|
if not api_key:
|
||||||
|
raise RuntimeError("GEMINI_API_KEY environment variable not set")
|
||||||
|
|
||||||
|
# Cache Gemini client
|
||||||
|
cache_key = "gemini_client"
|
||||||
|
if cache_key in _model_cache:
|
||||||
|
client = _model_cache[cache_key]
|
||||||
|
else:
|
||||||
|
client = genai.Client(api_key=api_key)
|
||||||
|
_model_cache[cache_key] = client
|
||||||
|
logger.info("Gemini client cached")
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Computing embeddings for {len(texts)} texts using Gemini API, model: '{model_name}'"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Gemini supports batch embedding
|
||||||
|
max_batch_size = 100 # Conservative batch size for Gemini
|
||||||
|
all_embeddings = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
total_batches = (len(texts) + max_batch_size - 1) // max_batch_size
|
||||||
|
batch_range = range(0, len(texts), max_batch_size)
|
||||||
|
batch_iterator = tqdm(
|
||||||
|
batch_range, desc="Computing embeddings", unit="batch", total=total_batches
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
# Fallback when tqdm is not available
|
||||||
|
batch_iterator = range(0, len(texts), max_batch_size)
|
||||||
|
|
||||||
|
for i in batch_iterator:
|
||||||
|
batch_texts = texts[i : i + max_batch_size]
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Use the embed_content method from the new Google GenAI SDK
|
||||||
|
response = client.models.embed_content(
|
||||||
|
model=model_name,
|
||||||
|
contents=batch_texts,
|
||||||
|
config=genai.types.EmbedContentConfig(
|
||||||
|
task_type="RETRIEVAL_DOCUMENT" # For document embedding
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract embeddings from response
|
||||||
|
for embedding_data in response.embeddings:
|
||||||
|
all_embeddings.append(embedding_data.values)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Batch {i} failed: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
embeddings = np.array(all_embeddings, dtype=np.float32)
|
||||||
|
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
||||||
|
|
||||||
|
return embeddings
|
||||||
|
|||||||
@@ -1,13 +1,14 @@
|
|||||||
import time
|
|
||||||
import atexit
|
import atexit
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
import socket
|
import socket
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import os
|
import time
|
||||||
import logging
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
import psutil
|
|
||||||
|
# Lightweight, self-contained server manager with no cross-process inspection
|
||||||
|
|
||||||
# Set up logging based on environment variable
|
# Set up logging based on environment variable
|
||||||
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
||||||
@@ -18,136 +19,31 @@ logging.basicConfig(
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _is_colab_environment() -> bool:
|
||||||
|
"""Check if we're running in Google Colab environment."""
|
||||||
|
return "COLAB_GPU" in os.environ or "COLAB_TPU" in os.environ
|
||||||
|
|
||||||
|
|
||||||
|
def _get_available_port(start_port: int = 5557) -> int:
|
||||||
|
"""Get an available port starting from start_port."""
|
||||||
|
port = start_port
|
||||||
|
while port < start_port + 100: # Try up to 100 ports
|
||||||
|
try:
|
||||||
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
|
s.bind(("localhost", port))
|
||||||
|
return port
|
||||||
|
except OSError:
|
||||||
|
port += 1
|
||||||
|
raise RuntimeError(f"No available ports found in range {start_port}-{start_port + 100}")
|
||||||
|
|
||||||
|
|
||||||
def _check_port(port: int) -> bool:
|
def _check_port(port: int) -> bool:
|
||||||
"""Check if a port is in use"""
|
"""Check if a port is in use"""
|
||||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
return s.connect_ex(("localhost", port)) == 0
|
return s.connect_ex(("localhost", port)) == 0
|
||||||
|
|
||||||
|
|
||||||
def _check_process_matches_config(
|
# Note: All cross-process scanning helpers removed for simplicity
|
||||||
port: int, expected_model: str, expected_passages_file: str
|
|
||||||
) -> bool:
|
|
||||||
"""
|
|
||||||
Check if the process using the port matches our expected model and passages file.
|
|
||||||
Returns True if matches, False otherwise.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
for proc in psutil.process_iter(["pid", "cmdline"]):
|
|
||||||
if not _is_process_listening_on_port(proc, port):
|
|
||||||
continue
|
|
||||||
|
|
||||||
cmdline = proc.info["cmdline"]
|
|
||||||
if not cmdline:
|
|
||||||
continue
|
|
||||||
|
|
||||||
return _check_cmdline_matches_config(
|
|
||||||
cmdline, port, expected_model, expected_passages_file
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug(f"No process found listening on port {port}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"Could not check process on port {port}: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def _is_process_listening_on_port(proc, port: int) -> bool:
|
|
||||||
"""Check if a process is listening on the given port."""
|
|
||||||
try:
|
|
||||||
connections = proc.net_connections()
|
|
||||||
for conn in connections:
|
|
||||||
if conn.laddr.port == port and conn.status == psutil.CONN_LISTEN:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def _check_cmdline_matches_config(
|
|
||||||
cmdline: list, port: int, expected_model: str, expected_passages_file: str
|
|
||||||
) -> bool:
|
|
||||||
"""Check if command line matches our expected configuration."""
|
|
||||||
cmdline_str = " ".join(cmdline)
|
|
||||||
logger.debug(f"Found process on port {port}: {cmdline_str}")
|
|
||||||
|
|
||||||
# Check if it's our embedding server
|
|
||||||
is_embedding_server = any(
|
|
||||||
server_type in cmdline_str
|
|
||||||
for server_type in [
|
|
||||||
"embedding_server",
|
|
||||||
"leann_backend_diskann.embedding_server",
|
|
||||||
"leann_backend_hnsw.hnsw_embedding_server",
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
if not is_embedding_server:
|
|
||||||
logger.debug(f"Process on port {port} is not our embedding server")
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Check model name
|
|
||||||
model_matches = _check_model_in_cmdline(cmdline, expected_model)
|
|
||||||
|
|
||||||
# Check passages file if provided
|
|
||||||
passages_matches = _check_passages_in_cmdline(cmdline, expected_passages_file)
|
|
||||||
|
|
||||||
result = model_matches and passages_matches
|
|
||||||
logger.debug(
|
|
||||||
f"model_matches: {model_matches}, passages_matches: {passages_matches}, overall: {result}"
|
|
||||||
)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def _check_model_in_cmdline(cmdline: list, expected_model: str) -> bool:
|
|
||||||
"""Check if the command line contains the expected model."""
|
|
||||||
if "--model-name" not in cmdline:
|
|
||||||
return False
|
|
||||||
|
|
||||||
model_idx = cmdline.index("--model-name")
|
|
||||||
if model_idx + 1 >= len(cmdline):
|
|
||||||
return False
|
|
||||||
|
|
||||||
actual_model = cmdline[model_idx + 1]
|
|
||||||
return actual_model == expected_model
|
|
||||||
|
|
||||||
|
|
||||||
def _check_passages_in_cmdline(cmdline: list, expected_passages_file: str) -> bool:
|
|
||||||
"""Check if the command line contains the expected passages file."""
|
|
||||||
if "--passages-file" not in cmdline:
|
|
||||||
return False # Expected but not found
|
|
||||||
|
|
||||||
passages_idx = cmdline.index("--passages-file")
|
|
||||||
if passages_idx + 1 >= len(cmdline):
|
|
||||||
return False
|
|
||||||
|
|
||||||
actual_passages = cmdline[passages_idx + 1]
|
|
||||||
expected_path = Path(expected_passages_file).resolve()
|
|
||||||
actual_path = Path(actual_passages).resolve()
|
|
||||||
return actual_path == expected_path
|
|
||||||
|
|
||||||
|
|
||||||
def _find_compatible_port_or_next_available(
|
|
||||||
start_port: int, model_name: str, passages_file: str, max_attempts: int = 100
|
|
||||||
) -> tuple[int, bool]:
|
|
||||||
"""
|
|
||||||
Find a port that either has a compatible server or is available.
|
|
||||||
Returns (port, is_compatible) where is_compatible indicates if we found a matching server.
|
|
||||||
"""
|
|
||||||
for port in range(start_port, start_port + max_attempts):
|
|
||||||
if not _check_port(port):
|
|
||||||
# Port is available
|
|
||||||
return port, False
|
|
||||||
|
|
||||||
# Port is in use, check if it's compatible
|
|
||||||
if _check_process_matches_config(port, model_name, passages_file):
|
|
||||||
logger.info(f"Found compatible server on port {port}")
|
|
||||||
return port, True
|
|
||||||
else:
|
|
||||||
logger.info(f"Port {port} has incompatible server, trying next port...")
|
|
||||||
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Could not find compatible or available port in range {start_port}-{start_port + max_attempts}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class EmbeddingServerManager:
|
class EmbeddingServerManager:
|
||||||
@@ -166,7 +62,16 @@ class EmbeddingServerManager:
|
|||||||
self.backend_module_name = backend_module_name
|
self.backend_module_name = backend_module_name
|
||||||
self.server_process: Optional[subprocess.Popen] = None
|
self.server_process: Optional[subprocess.Popen] = None
|
||||||
self.server_port: Optional[int] = None
|
self.server_port: Optional[int] = None
|
||||||
|
# Track last-started config for in-process reuse only
|
||||||
|
self._server_config: Optional[dict] = None
|
||||||
self._atexit_registered = False
|
self._atexit_registered = False
|
||||||
|
# Also register a weakref finalizer to ensure cleanup when manager is GC'ed
|
||||||
|
try:
|
||||||
|
import weakref
|
||||||
|
|
||||||
|
self._finalizer = weakref.finalize(self, self._finalize_process)
|
||||||
|
except Exception:
|
||||||
|
self._finalizer = None
|
||||||
|
|
||||||
def start_server(
|
def start_server(
|
||||||
self,
|
self,
|
||||||
@@ -175,69 +80,58 @@ class EmbeddingServerManager:
|
|||||||
embedding_mode: str = "sentence-transformers",
|
embedding_mode: str = "sentence-transformers",
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> tuple[bool, int]:
|
) -> tuple[bool, int]:
|
||||||
"""
|
"""Start the embedding server."""
|
||||||
Starts the embedding server process.
|
# passages_file may be present in kwargs for server CLI, but we don't need it here
|
||||||
|
|
||||||
Args:
|
# If this manager already has a live server, just reuse it
|
||||||
port (int): The preferred ZMQ port for the server.
|
if self.server_process and self.server_process.poll() is None and self.server_port:
|
||||||
model_name (str): The name of the embedding model to use.
|
logger.info("Reusing in-process server")
|
||||||
**kwargs: Additional arguments for the server.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
tuple[bool, int]: (success, actual_port_used)
|
|
||||||
"""
|
|
||||||
passages_file = kwargs.get("passages_file")
|
|
||||||
assert isinstance(passages_file, str), "passages_file must be a string"
|
|
||||||
|
|
||||||
# Check if we have a compatible running server
|
|
||||||
if self._has_compatible_running_server(model_name, passages_file):
|
|
||||||
assert self.server_port is not None, (
|
|
||||||
"a compatible running server should set server_port"
|
|
||||||
)
|
|
||||||
return True, self.server_port
|
return True, self.server_port
|
||||||
|
|
||||||
# Find available port (compatible or free)
|
# For Colab environment, use a different strategy
|
||||||
|
if _is_colab_environment():
|
||||||
|
logger.info("Detected Colab environment, using alternative startup strategy")
|
||||||
|
return self._start_server_colab(port, model_name, embedding_mode, **kwargs)
|
||||||
|
|
||||||
|
# Always pick a fresh available port
|
||||||
try:
|
try:
|
||||||
actual_port, is_compatible = _find_compatible_port_or_next_available(
|
actual_port = _get_available_port(port)
|
||||||
port, model_name, passages_file
|
except RuntimeError:
|
||||||
)
|
logger.error("No available ports found")
|
||||||
except RuntimeError as e:
|
|
||||||
logger.error(str(e))
|
|
||||||
return False, port
|
return False, port
|
||||||
|
|
||||||
if is_compatible:
|
# Start a new server
|
||||||
logger.info(f"Using existing compatible server on port {actual_port}")
|
|
||||||
self.server_port = actual_port
|
|
||||||
self.server_process = None # We don't own this process
|
|
||||||
return True, actual_port
|
|
||||||
|
|
||||||
if actual_port != port:
|
|
||||||
logger.info(f"Using port {actual_port} instead of {port}")
|
|
||||||
|
|
||||||
# Start new server
|
|
||||||
return self._start_new_server(actual_port, model_name, embedding_mode, **kwargs)
|
return self._start_new_server(actual_port, model_name, embedding_mode, **kwargs)
|
||||||
|
|
||||||
def _has_compatible_running_server(
|
def _start_server_colab(
|
||||||
self, model_name: str, passages_file: str
|
self,
|
||||||
) -> bool:
|
port: int,
|
||||||
"""Check if we have a compatible running server."""
|
model_name: str,
|
||||||
if not (
|
embedding_mode: str = "sentence-transformers",
|
||||||
self.server_process
|
**kwargs,
|
||||||
and self.server_process.poll() is None
|
) -> tuple[bool, int]:
|
||||||
and self.server_port
|
"""Start server with Colab-specific configuration."""
|
||||||
):
|
# Try to find an available port
|
||||||
return False
|
try:
|
||||||
|
actual_port = _get_available_port(port)
|
||||||
|
except RuntimeError:
|
||||||
|
logger.error("No available ports found")
|
||||||
|
return False, port
|
||||||
|
|
||||||
if _check_process_matches_config(self.server_port, model_name, passages_file):
|
logger.info(f"Starting server on port {actual_port} for Colab environment")
|
||||||
logger.info(
|
|
||||||
f"Existing server process (PID {self.server_process.pid}) is compatible"
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
|
|
||||||
logger.info(
|
# Use a simpler startup strategy for Colab
|
||||||
"Existing server process is incompatible. Should start a new server."
|
command = self._build_server_command(actual_port, model_name, embedding_mode, **kwargs)
|
||||||
)
|
|
||||||
return False
|
try:
|
||||||
|
# In Colab, we'll use a more direct approach
|
||||||
|
self._launch_server_process_colab(command, actual_port)
|
||||||
|
return self._wait_for_server_ready_colab(actual_port)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to start embedding server in Colab: {e}")
|
||||||
|
return False, actual_port
|
||||||
|
|
||||||
|
# Note: No compatibility check needed; manager is per-searcher and configs are stable per instance
|
||||||
|
|
||||||
def _start_new_server(
|
def _start_new_server(
|
||||||
self, port: int, model_name: str, embedding_mode: str, **kwargs
|
self, port: int, model_name: str, embedding_mode: str, **kwargs
|
||||||
@@ -269,9 +163,13 @@ class EmbeddingServerManager:
|
|||||||
]
|
]
|
||||||
|
|
||||||
if kwargs.get("passages_file"):
|
if kwargs.get("passages_file"):
|
||||||
command.extend(["--passages-file", str(kwargs["passages_file"])])
|
# Convert to absolute path to ensure subprocess can find the file
|
||||||
|
passages_file = Path(kwargs["passages_file"]).resolve()
|
||||||
|
command.extend(["--passages-file", str(passages_file)])
|
||||||
if embedding_mode != "sentence-transformers":
|
if embedding_mode != "sentence-transformers":
|
||||||
command.extend(["--embedding-mode", embedding_mode])
|
command.extend(["--embedding-mode", embedding_mode])
|
||||||
|
if kwargs.get("distance_metric"):
|
||||||
|
command.extend(["--distance-metric", kwargs["distance_metric"]])
|
||||||
|
|
||||||
return command
|
return command
|
||||||
|
|
||||||
@@ -280,22 +178,62 @@ class EmbeddingServerManager:
|
|||||||
project_root = Path(__file__).parent.parent.parent.parent.parent
|
project_root = Path(__file__).parent.parent.parent.parent.parent
|
||||||
logger.info(f"Command: {' '.join(command)}")
|
logger.info(f"Command: {' '.join(command)}")
|
||||||
|
|
||||||
# Let server output go directly to console
|
# In CI environment, redirect stdout to avoid buffer deadlock but keep stderr for debugging
|
||||||
# The server will respect LEANN_LOG_LEVEL environment variable
|
# Embedding servers use many print statements that can fill stdout buffers
|
||||||
|
is_ci = os.environ.get("CI") == "true"
|
||||||
|
if is_ci:
|
||||||
|
stdout_target = subprocess.DEVNULL
|
||||||
|
stderr_target = None # Keep stderr for error debugging in CI
|
||||||
|
logger.info(
|
||||||
|
"CI environment detected, redirecting embedding server stdout to DEVNULL, keeping stderr"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
stdout_target = None # Direct to console for visible logs
|
||||||
|
stderr_target = None # Direct to console for visible logs
|
||||||
|
|
||||||
|
# Start embedding server subprocess
|
||||||
|
logger.info(f"Starting server process with command: {' '.join(command)}")
|
||||||
self.server_process = subprocess.Popen(
|
self.server_process = subprocess.Popen(
|
||||||
command,
|
command,
|
||||||
cwd=project_root,
|
cwd=project_root,
|
||||||
stdout=None, # Direct to console
|
stdout=stdout_target,
|
||||||
stderr=None, # Direct to console
|
stderr=stderr_target,
|
||||||
)
|
)
|
||||||
self.server_port = port
|
self.server_port = port
|
||||||
|
# Record config for in-process reuse
|
||||||
|
try:
|
||||||
|
self._server_config = {
|
||||||
|
"model_name": command[command.index("--model-name") + 1]
|
||||||
|
if "--model-name" in command
|
||||||
|
else "",
|
||||||
|
"passages_file": command[command.index("--passages-file") + 1]
|
||||||
|
if "--passages-file" in command
|
||||||
|
else "",
|
||||||
|
"embedding_mode": command[command.index("--embedding-mode") + 1]
|
||||||
|
if "--embedding-mode" in command
|
||||||
|
else "sentence-transformers",
|
||||||
|
}
|
||||||
|
except Exception:
|
||||||
|
self._server_config = {
|
||||||
|
"model_name": "",
|
||||||
|
"passages_file": "",
|
||||||
|
"embedding_mode": "sentence-transformers",
|
||||||
|
}
|
||||||
logger.info(f"Server process started with PID: {self.server_process.pid}")
|
logger.info(f"Server process started with PID: {self.server_process.pid}")
|
||||||
|
|
||||||
# Register atexit callback only when we actually start a process
|
# Register atexit callback only when we actually start a process
|
||||||
if not self._atexit_registered:
|
if not self._atexit_registered:
|
||||||
# Use a lambda to avoid issues with bound methods
|
# Always attempt best-effort finalize at interpreter exit
|
||||||
atexit.register(lambda: self.stop_server() if self.server_process else None)
|
atexit.register(self._finalize_process)
|
||||||
self._atexit_registered = True
|
self._atexit_registered = True
|
||||||
|
# Touch finalizer so it knows there is a live process
|
||||||
|
if getattr(self, "_finalizer", None) is not None and not self._finalizer.alive:
|
||||||
|
try:
|
||||||
|
import weakref
|
||||||
|
|
||||||
|
self._finalizer = weakref.finalize(self, self._finalize_process)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
def _wait_for_server_ready(self, port: int) -> tuple[bool, int]:
|
def _wait_for_server_ready(self, port: int) -> tuple[bool, int]:
|
||||||
"""Wait for the server to be ready."""
|
"""Wait for the server to be ready."""
|
||||||
@@ -320,29 +258,114 @@ class EmbeddingServerManager:
|
|||||||
if not self.server_process:
|
if not self.server_process:
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.server_process.poll() is not None:
|
if self.server_process and self.server_process.poll() is not None:
|
||||||
# Process already terminated
|
# Process already terminated
|
||||||
self.server_process = None
|
self.server_process = None
|
||||||
|
self.server_port = None
|
||||||
|
self._server_config = None
|
||||||
return
|
return
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Terminating server process (PID: {self.server_process.pid}) for backend {self.backend_module_name}..."
|
f"Terminating server process (PID: {self.server_process.pid}) for backend {self.backend_module_name}..."
|
||||||
)
|
)
|
||||||
self.server_process.terminate()
|
|
||||||
|
|
||||||
|
# Use simple termination first; if the server installed signal handlers,
|
||||||
|
# it will exit cleanly. Otherwise escalate to kill after a short wait.
|
||||||
try:
|
try:
|
||||||
self.server_process.wait(timeout=5)
|
self.server_process.terminate()
|
||||||
logger.info(f"Server process {self.server_process.pid} terminated.")
|
|
||||||
except subprocess.TimeoutExpired:
|
|
||||||
logger.warning(
|
|
||||||
f"Server process {self.server_process.pid} did not terminate gracefully, killing it."
|
|
||||||
)
|
|
||||||
self.server_process.kill()
|
|
||||||
|
|
||||||
# Clean up process resources to prevent resource tracker warnings
|
|
||||||
try:
|
|
||||||
self.server_process.wait() # Ensure process is fully cleaned up
|
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
self.server_process = None
|
try:
|
||||||
|
self.server_process.wait(timeout=5) # Give more time for graceful shutdown
|
||||||
|
logger.info(f"Server process {self.server_process.pid} terminated gracefully.")
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
logger.warning(
|
||||||
|
f"Server process {self.server_process.pid} did not terminate within 5 seconds, force killing..."
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
self.server_process.kill()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
self.server_process.wait(timeout=2)
|
||||||
|
logger.info(f"Server process {self.server_process.pid} killed successfully.")
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to kill server process {self.server_process.pid} - it may be hung"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Clean up process resources with timeout to avoid CI hang
|
||||||
|
try:
|
||||||
|
# Use shorter timeout in CI environments
|
||||||
|
is_ci = os.environ.get("CI") == "true"
|
||||||
|
timeout = 3 if is_ci else 10
|
||||||
|
self.server_process.wait(timeout=timeout)
|
||||||
|
logger.info(f"Server process {self.server_process.pid} cleanup completed")
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
logger.warning(f"Process cleanup timeout after {timeout}s, proceeding anyway")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error during process cleanup: {e}")
|
||||||
|
finally:
|
||||||
|
self.server_process = None
|
||||||
|
self.server_port = None
|
||||||
|
self._server_config = None
|
||||||
|
|
||||||
|
def _finalize_process(self) -> None:
|
||||||
|
"""Best-effort cleanup used by weakref.finalize/atexit."""
|
||||||
|
try:
|
||||||
|
self.stop_server()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _adopt_existing_server(self, *args, **kwargs) -> None:
|
||||||
|
# Removed: cross-process adoption no longer supported
|
||||||
|
return
|
||||||
|
|
||||||
|
def _launch_server_process_colab(self, command: list, port: int) -> None:
|
||||||
|
"""Launch the server process with Colab-specific settings."""
|
||||||
|
logger.info(f"Colab Command: {' '.join(command)}")
|
||||||
|
|
||||||
|
# In Colab, we need to be more careful about process management
|
||||||
|
self.server_process = subprocess.Popen(
|
||||||
|
command,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
text=True,
|
||||||
|
)
|
||||||
|
self.server_port = port
|
||||||
|
logger.info(f"Colab server process started with PID: {self.server_process.pid}")
|
||||||
|
|
||||||
|
# Register atexit callback (unified)
|
||||||
|
if not self._atexit_registered:
|
||||||
|
atexit.register(self._finalize_process)
|
||||||
|
self._atexit_registered = True
|
||||||
|
# Record config for in-process reuse is best-effort in Colab mode
|
||||||
|
self._server_config = {
|
||||||
|
"model_name": "",
|
||||||
|
"passages_file": "",
|
||||||
|
"embedding_mode": "sentence-transformers",
|
||||||
|
}
|
||||||
|
|
||||||
|
def _wait_for_server_ready_colab(self, port: int) -> tuple[bool, int]:
|
||||||
|
"""Wait for the server to be ready with Colab-specific timeout."""
|
||||||
|
max_wait, wait_interval = 30, 0.5 # Shorter timeout for Colab
|
||||||
|
|
||||||
|
for _ in range(int(max_wait / wait_interval)):
|
||||||
|
if _check_port(port):
|
||||||
|
logger.info("Colab embedding server is ready!")
|
||||||
|
return True, port
|
||||||
|
|
||||||
|
if self.server_process and self.server_process.poll() is not None:
|
||||||
|
# Check for error output
|
||||||
|
stdout, stderr = self.server_process.communicate()
|
||||||
|
logger.error("Colab server terminated during startup.")
|
||||||
|
logger.error(f"stdout: {stdout}")
|
||||||
|
logger.error(f"stderr: {stderr}")
|
||||||
|
return False, port
|
||||||
|
|
||||||
|
time.sleep(wait_interval)
|
||||||
|
|
||||||
|
logger.error(f"Colab server failed to start within {max_wait} seconds.")
|
||||||
|
self.stop_server()
|
||||||
|
return False, port
|
||||||
|
|||||||
@@ -1,15 +1,14 @@
|
|||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Any, Literal, Optional
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from typing import Dict, Any, List, Literal, Optional
|
|
||||||
|
|
||||||
|
|
||||||
class LeannBackendBuilderInterface(ABC):
|
class LeannBackendBuilderInterface(ABC):
|
||||||
"""Backend interface for building indexes"""
|
"""Backend interface for building indexes"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def build(
|
def build(self, data: np.ndarray, ids: list[str], index_path: str, **kwargs) -> None:
|
||||||
self, data: np.ndarray, ids: List[str], index_path: str, **kwargs
|
|
||||||
) -> None:
|
|
||||||
"""Build index
|
"""Build index
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -53,7 +52,7 @@ class LeannBackendSearcherInterface(ABC):
|
|||||||
pruning_strategy: Literal["global", "local", "proportional"] = "global",
|
pruning_strategy: Literal["global", "local", "proportional"] = "global",
|
||||||
zmq_port: Optional[int] = None,
|
zmq_port: Optional[int] = None,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> Dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
"""Search for nearest neighbors
|
"""Search for nearest neighbors
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
|||||||
154
packages/leann-core/src/leann/mcp.py
Executable file
154
packages/leann-core/src/leann/mcp.py
Executable file
@@ -0,0 +1,154 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import json
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def handle_request(request):
|
||||||
|
if request.get("method") == "initialize":
|
||||||
|
return {
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": request.get("id"),
|
||||||
|
"result": {
|
||||||
|
"capabilities": {"tools": {}},
|
||||||
|
"protocolVersion": "2024-11-05",
|
||||||
|
"serverInfo": {"name": "leann-mcp", "version": "1.0.0"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
elif request.get("method") == "tools/list":
|
||||||
|
return {
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": request.get("id"),
|
||||||
|
"result": {
|
||||||
|
"tools": [
|
||||||
|
{
|
||||||
|
"name": "leann_search",
|
||||||
|
"description": """🔍 Search code using natural language - like having a coding assistant who knows your entire codebase!
|
||||||
|
|
||||||
|
🎯 **Perfect for**:
|
||||||
|
- "How does authentication work?" → finds auth-related code
|
||||||
|
- "Error handling patterns" → locates try-catch blocks and error logic
|
||||||
|
- "Database connection setup" → finds DB initialization code
|
||||||
|
- "API endpoint definitions" → locates route handlers
|
||||||
|
- "Configuration management" → finds config files and usage
|
||||||
|
|
||||||
|
💡 **Pro tip**: Use this before making any changes to understand existing patterns and conventions.""",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"index_name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Name of the LEANN index to search. Use 'leann_list' first to see available indexes.",
|
||||||
|
},
|
||||||
|
"query": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Search query - can be natural language (e.g., 'how to handle errors') or technical terms (e.g., 'async function definition')",
|
||||||
|
},
|
||||||
|
"top_k": {
|
||||||
|
"type": "integer",
|
||||||
|
"default": 5,
|
||||||
|
"minimum": 1,
|
||||||
|
"maximum": 20,
|
||||||
|
"description": "Number of search results to return. Use 5-10 for focused results, 15-20 for comprehensive exploration.",
|
||||||
|
},
|
||||||
|
"complexity": {
|
||||||
|
"type": "integer",
|
||||||
|
"default": 32,
|
||||||
|
"minimum": 16,
|
||||||
|
"maximum": 128,
|
||||||
|
"description": "Search complexity level. Use 16-32 for fast searches (recommended), 64+ for higher precision when needed.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["index_name", "query"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "leann_list",
|
||||||
|
"description": "📋 Show all your indexed codebases - your personal code library! Use this to see what's available for search.",
|
||||||
|
"inputSchema": {"type": "object", "properties": {}},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
elif request.get("method") == "tools/call":
|
||||||
|
tool_name = request["params"]["name"]
|
||||||
|
args = request["params"].get("arguments", {})
|
||||||
|
|
||||||
|
try:
|
||||||
|
if tool_name == "leann_search":
|
||||||
|
# Validate required parameters
|
||||||
|
if not args.get("index_name") or not args.get("query"):
|
||||||
|
return {
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": request.get("id"),
|
||||||
|
"result": {
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "Error: Both index_name and query are required",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
# Build simplified command with non-interactive flag for MCP compatibility
|
||||||
|
cmd = [
|
||||||
|
"leann",
|
||||||
|
"search",
|
||||||
|
args["index_name"],
|
||||||
|
args["query"],
|
||||||
|
f"--top-k={args.get('top_k', 5)}",
|
||||||
|
f"--complexity={args.get('complexity', 32)}",
|
||||||
|
"--non-interactive",
|
||||||
|
]
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||||
|
|
||||||
|
elif tool_name == "leann_list":
|
||||||
|
result = subprocess.run(["leann", "list"], capture_output=True, text=True)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": request.get("id"),
|
||||||
|
"result": {
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": result.stdout
|
||||||
|
if result.returncode == 0
|
||||||
|
else f"Error: {result.stderr}",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return {
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": request.get("id"),
|
||||||
|
"error": {"code": -1, "message": str(e)},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
for line in sys.stdin:
|
||||||
|
try:
|
||||||
|
request = json.loads(line.strip())
|
||||||
|
response = handle_request(request)
|
||||||
|
if response:
|
||||||
|
print(json.dumps(response))
|
||||||
|
sys.stdout.flush()
|
||||||
|
except Exception as e:
|
||||||
|
error_response = {
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": None,
|
||||||
|
"error": {"code": -1, "message": str(e)},
|
||||||
|
}
|
||||||
|
print(json.dumps(error_response))
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -1,20 +1,26 @@
|
|||||||
# packages/leann-core/src/leann/registry.py
|
# packages/leann-core/src/leann/registry.py
|
||||||
|
|
||||||
from typing import Dict, TYPE_CHECKING
|
|
||||||
import importlib
|
import importlib
|
||||||
import importlib.metadata
|
import importlib.metadata
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import TYPE_CHECKING, Optional, Union
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from leann.interface import LeannBackendFactoryInterface
|
from leann.interface import LeannBackendFactoryInterface
|
||||||
|
|
||||||
BACKEND_REGISTRY: Dict[str, "LeannBackendFactoryInterface"] = {}
|
# Set up logger for this module
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
BACKEND_REGISTRY: dict[str, "LeannBackendFactoryInterface"] = {}
|
||||||
|
|
||||||
|
|
||||||
def register_backend(name: str):
|
def register_backend(name: str):
|
||||||
"""A decorator to register a new backend class."""
|
"""A decorator to register a new backend class."""
|
||||||
|
|
||||||
def decorator(cls):
|
def decorator(cls):
|
||||||
print(f"INFO: Registering backend '{name}'")
|
logger.debug(f"Registering backend '{name}'")
|
||||||
BACKEND_REGISTRY[name] = cls
|
BACKEND_REGISTRY[name] = cls
|
||||||
return cls
|
return cls
|
||||||
|
|
||||||
@@ -31,13 +37,62 @@ def autodiscover_backends():
|
|||||||
backend_module_name = dist_name.replace("-", "_")
|
backend_module_name = dist_name.replace("-", "_")
|
||||||
discovered_backends.append(backend_module_name)
|
discovered_backends.append(backend_module_name)
|
||||||
|
|
||||||
for backend_module_name in sorted(
|
for backend_module_name in sorted(discovered_backends): # sort for deterministic loading
|
||||||
discovered_backends
|
|
||||||
): # sort for deterministic loading
|
|
||||||
try:
|
try:
|
||||||
importlib.import_module(backend_module_name)
|
importlib.import_module(backend_module_name)
|
||||||
# Registration message is printed by the decorator
|
# Registration message is printed by the decorator
|
||||||
except ImportError as e:
|
except ImportError:
|
||||||
# print(f"WARN: Could not import backend module '{backend_module_name}': {e}")
|
# print(f"WARN: Could not import backend module '{backend_module_name}': {e}")
|
||||||
pass
|
pass
|
||||||
# print("INFO: Backend auto-discovery finished.")
|
# print("INFO: Backend auto-discovery finished.")
|
||||||
|
|
||||||
|
|
||||||
|
def register_project_directory(project_dir: Optional[Union[str, Path]] = None):
|
||||||
|
"""
|
||||||
|
Register a project directory in the global LEANN registry.
|
||||||
|
|
||||||
|
This allows `leann list` to discover indexes created by apps or other tools.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
project_dir: Directory to register. If None, uses current working directory.
|
||||||
|
"""
|
||||||
|
if project_dir is None:
|
||||||
|
project_dir = Path.cwd()
|
||||||
|
else:
|
||||||
|
project_dir = Path(project_dir)
|
||||||
|
|
||||||
|
# Only register directories that have some kind of LEANN content
|
||||||
|
# Either .leann/indexes/ (CLI format) or *.leann.meta.json files (apps format)
|
||||||
|
has_cli_indexes = (project_dir / ".leann" / "indexes").exists()
|
||||||
|
has_app_indexes = any(project_dir.rglob("*.leann.meta.json"))
|
||||||
|
|
||||||
|
if not (has_cli_indexes or has_app_indexes):
|
||||||
|
# Don't register if there are no LEANN indexes
|
||||||
|
return
|
||||||
|
|
||||||
|
global_registry = Path.home() / ".leann" / "projects.json"
|
||||||
|
global_registry.parent.mkdir(exist_ok=True)
|
||||||
|
|
||||||
|
project_str = str(project_dir.resolve())
|
||||||
|
|
||||||
|
# Load existing registry
|
||||||
|
projects = []
|
||||||
|
if global_registry.exists():
|
||||||
|
try:
|
||||||
|
with open(global_registry) as f:
|
||||||
|
projects = json.load(f)
|
||||||
|
except Exception:
|
||||||
|
logger.debug("Could not load existing project registry")
|
||||||
|
projects = []
|
||||||
|
|
||||||
|
# Add project if not already present
|
||||||
|
if project_str not in projects:
|
||||||
|
projects.append(project_str)
|
||||||
|
|
||||||
|
# Save updated registry
|
||||||
|
try:
|
||||||
|
with open(global_registry, "w") as f:
|
||||||
|
json.dump(projects, f, indent=2)
|
||||||
|
logger.debug(f"Registered project directory: {project_str}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not save project registry: {e}")
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import json
|
import json
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, Any, Literal, Optional
|
from typing import Any, Literal, Optional
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
@@ -38,9 +38,7 @@ class BaseSearcher(LeannBackendSearcherInterface, ABC):
|
|||||||
|
|
||||||
self.embedding_model = self.meta.get("embedding_model")
|
self.embedding_model = self.meta.get("embedding_model")
|
||||||
if not self.embedding_model:
|
if not self.embedding_model:
|
||||||
print(
|
print("WARNING: embedding_model not found in meta.json. Recompute will fail.")
|
||||||
"WARNING: embedding_model not found in meta.json. Recompute will fail."
|
|
||||||
)
|
|
||||||
|
|
||||||
self.embedding_mode = self.meta.get("embedding_mode", "sentence-transformers")
|
self.embedding_mode = self.meta.get("embedding_mode", "sentence-transformers")
|
||||||
|
|
||||||
@@ -48,39 +46,40 @@ class BaseSearcher(LeannBackendSearcherInterface, ABC):
|
|||||||
backend_module_name=backend_module_name,
|
backend_module_name=backend_module_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _load_meta(self) -> Dict[str, Any]:
|
def _load_meta(self) -> dict[str, Any]:
|
||||||
"""Loads the metadata file associated with the index."""
|
"""Loads the metadata file associated with the index."""
|
||||||
# This is the corrected logic for finding the meta file.
|
# This is the corrected logic for finding the meta file.
|
||||||
meta_path = self.index_dir / f"{self.index_path.name}.meta.json"
|
meta_path = self.index_dir / f"{self.index_path.name}.meta.json"
|
||||||
if not meta_path.exists():
|
if not meta_path.exists():
|
||||||
raise FileNotFoundError(f"Leann metadata file not found at {meta_path}")
|
raise FileNotFoundError(f"Leann metadata file not found at {meta_path}")
|
||||||
with open(meta_path, "r", encoding="utf-8") as f:
|
with open(meta_path, encoding="utf-8") as f:
|
||||||
return json.load(f)
|
return json.load(f)
|
||||||
|
|
||||||
def _ensure_server_running(
|
def _ensure_server_running(self, passages_source_file: str, port: int, **kwargs) -> int:
|
||||||
self, passages_source_file: str, port: int, **kwargs
|
|
||||||
) -> int:
|
|
||||||
"""
|
"""
|
||||||
Ensures the embedding server is running if recompute is needed.
|
Ensures the embedding server is running if recompute is needed.
|
||||||
This is a helper for subclasses.
|
This is a helper for subclasses.
|
||||||
"""
|
"""
|
||||||
if not self.embedding_model:
|
if not self.embedding_model:
|
||||||
raise ValueError(
|
raise ValueError("Cannot use recompute mode without 'embedding_model' in meta.json.")
|
||||||
"Cannot use recompute mode without 'embedding_model' in meta.json."
|
|
||||||
)
|
# Get distance_metric from meta if not provided in kwargs
|
||||||
|
distance_metric = (
|
||||||
|
kwargs.get("distance_metric")
|
||||||
|
or self.meta.get("backend_kwargs", {}).get("distance_metric")
|
||||||
|
or "mips"
|
||||||
|
)
|
||||||
|
|
||||||
server_started, actual_port = self.embedding_server_manager.start_server(
|
server_started, actual_port = self.embedding_server_manager.start_server(
|
||||||
port=port,
|
port=port,
|
||||||
model_name=self.embedding_model,
|
model_name=self.embedding_model,
|
||||||
embedding_mode=self.embedding_mode,
|
embedding_mode=self.embedding_mode,
|
||||||
passages_file=passages_source_file,
|
passages_file=passages_source_file,
|
||||||
distance_metric=kwargs.get("distance_metric"),
|
distance_metric=distance_metric,
|
||||||
enable_warmup=kwargs.get("enable_warmup", False),
|
enable_warmup=kwargs.get("enable_warmup", False),
|
||||||
)
|
)
|
||||||
if not server_started:
|
if not server_started:
|
||||||
raise RuntimeError(
|
raise RuntimeError(f"Failed to start embedding server on port {actual_port}")
|
||||||
f"Failed to start embedding server on port {actual_port}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return actual_port
|
return actual_port
|
||||||
|
|
||||||
@@ -109,11 +108,10 @@ class BaseSearcher(LeannBackendSearcherInterface, ABC):
|
|||||||
# on that port?
|
# on that port?
|
||||||
|
|
||||||
# Ensure we have a server with passages_file for compatibility
|
# Ensure we have a server with passages_file for compatibility
|
||||||
passages_source_file = (
|
passages_source_file = self.index_dir / f"{self.index_path.name}.meta.json"
|
||||||
self.index_dir / f"{self.index_path.name}.meta.json"
|
# Convert to absolute path to ensure server can find it
|
||||||
)
|
|
||||||
zmq_port = self._ensure_server_running(
|
zmq_port = self._ensure_server_running(
|
||||||
str(passages_source_file), zmq_port
|
str(passages_source_file.resolve()), zmq_port
|
||||||
)
|
)
|
||||||
|
|
||||||
return self._compute_embedding_via_server([query], zmq_port)[
|
return self._compute_embedding_via_server([query], zmq_port)[
|
||||||
@@ -131,8 +129,8 @@ class BaseSearcher(LeannBackendSearcherInterface, ABC):
|
|||||||
|
|
||||||
def _compute_embedding_via_server(self, chunks: list, zmq_port: int) -> np.ndarray:
|
def _compute_embedding_via_server(self, chunks: list, zmq_port: int) -> np.ndarray:
|
||||||
"""Compute embeddings using the ZMQ embedding server."""
|
"""Compute embeddings using the ZMQ embedding server."""
|
||||||
import zmq
|
|
||||||
import msgpack
|
import msgpack
|
||||||
|
import zmq
|
||||||
|
|
||||||
try:
|
try:
|
||||||
context = zmq.Context()
|
context = zmq.Context()
|
||||||
@@ -173,7 +171,7 @@ class BaseSearcher(LeannBackendSearcherInterface, ABC):
|
|||||||
pruning_strategy: Literal["global", "local", "proportional"] = "global",
|
pruning_strategy: Literal["global", "local", "proportional"] = "global",
|
||||||
zmq_port: Optional[int] = None,
|
zmq_port: Optional[int] = None,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> Dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Search for the top_k nearest neighbors of the query vector.
|
Search for the top_k nearest neighbors of the query vector.
|
||||||
|
|
||||||
|
|||||||
147
packages/leann-mcp/README.md
Normal file
147
packages/leann-mcp/README.md
Normal file
@@ -0,0 +1,147 @@
|
|||||||
|
# 🔥 LEANN Claude Code Integration
|
||||||
|
|
||||||
|
Transform your development workflow with intelligent code assistance using LEANN's semantic search directly in Claude Code.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
Install LEANN globally for MCP integration (with default backend):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv tool install leann-core --with leann
|
||||||
|
```
|
||||||
|
This installs the `leann` CLI into an isolated tool environment and includes both backends so `leann build` works out-of-the-box.
|
||||||
|
|
||||||
|
## 🚀 Quick Setup
|
||||||
|
|
||||||
|
Add the LEANN MCP server to Claude Code. Choose the scope based on how widely you want it available. Below is the command to install it globally; if you prefer a local install, skip this step:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Global (recommended): available in all projects for your user
|
||||||
|
claude mcp add --scope user leann-server -- leann_mcp
|
||||||
|
```
|
||||||
|
|
||||||
|
- `leann-server`: the display name of the MCP server in Claude Code (you can change it).
|
||||||
|
- `leann_mcp`: the Python entry point installed with LEANN that starts the MCP server.
|
||||||
|
|
||||||
|
Verify it is registered globally:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
claude mcp list | cat
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🛠️ Available Tools
|
||||||
|
|
||||||
|
Once connected, you'll have access to these powerful semantic search tools in Claude Code:
|
||||||
|
|
||||||
|
- **`leann_list`** - List all available indexes across your projects
|
||||||
|
- **`leann_search`** - Perform semantic searches across code and documents
|
||||||
|
|
||||||
|
|
||||||
|
## 🎯 Quick Start Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Add locally if you did not add it globally (current folder only; default if --scope is omitted)
|
||||||
|
claude mcp add leann-server -- leann_mcp
|
||||||
|
|
||||||
|
# Build an index for your project (change to your actual path)
|
||||||
|
# See the advanced examples below for more ways to configure indexing
|
||||||
|
# Set the index name (replace 'my-project' with your own)
|
||||||
|
leann build my-project --docs $(git ls-files)
|
||||||
|
|
||||||
|
# Start Claude Code
|
||||||
|
claude
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🚀 Advanced Usage Examples to build the index
|
||||||
|
|
||||||
|
### Index Entire Git Repository
|
||||||
|
```bash
|
||||||
|
# Index all tracked files in your Git repository.
|
||||||
|
# Note: submodules are currently skipped; we can add them back if needed.
|
||||||
|
leann build my-repo --docs $(git ls-files) --embedding-mode sentence-transformers --embedding-model all-MiniLM-L6-v2 --backend hnsw
|
||||||
|
|
||||||
|
# Index only tracked Python files from Git.
|
||||||
|
leann build my-python-code --docs $(git ls-files "*.py") --embedding-mode sentence-transformers --embedding-model all-MiniLM-L6-v2 --backend hnsw
|
||||||
|
|
||||||
|
# If you encounter empty requests caused by empty files (e.g., __init__.py), exclude zero-byte files. Thanks @ww2283 for pointing [that](https://github.com/yichuan-w/LEANN/issues/48) out
|
||||||
|
leann build leann-prospec-lig --docs $(find ./src -name "*.py" -not -empty) --embedding-mode openai --embedding-model text-embedding-3-small
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multiple Directories and Files
|
||||||
|
```bash
|
||||||
|
# Index multiple directories
|
||||||
|
leann build my-codebase --docs ./src ./tests ./docs ./config --embedding-mode sentence-transformers --embedding-model all-MiniLM-L6-v2 --backend hnsw
|
||||||
|
|
||||||
|
# Mix files and directories
|
||||||
|
leann build my-project --docs ./README.md ./src/ ./package.json ./docs/ --embedding-mode sentence-transformers --embedding-model all-MiniLM-L6-v2 --backend hnsw
|
||||||
|
|
||||||
|
# Specific files only
|
||||||
|
leann build my-configs --docs ./tsconfig.json ./package.json ./webpack.config.js --embedding-mode sentence-transformers --embedding-model all-MiniLM-L6-v2 --backend hnsw
|
||||||
|
```
|
||||||
|
|
||||||
|
### Advanced Git Integration
|
||||||
|
```bash
|
||||||
|
# Index recently modified files
|
||||||
|
leann build recent-changes --docs $(git diff --name-only HEAD~10..HEAD) --embedding-mode sentence-transformers --embedding-model all-MiniLM-L6-v2 --backend hnsw
|
||||||
|
|
||||||
|
# Index files matching pattern
|
||||||
|
leann build frontend --docs $(git ls-files "*.tsx" "*.ts" "*.jsx" "*.js") --embedding-mode sentence-transformers --embedding-model all-MiniLM-L6-v2 --backend hnsw
|
||||||
|
|
||||||
|
# Index documentation and config files
|
||||||
|
leann build docs-and-configs --docs $(git ls-files "*.md" "*.yml" "*.yaml" "*.json" "*.toml") --embedding-mode sentence-transformers --embedding-model all-MiniLM-L6-v2 --backend hnsw
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## **Try this in Claude Code:**
|
||||||
|
```
|
||||||
|
Help me understand this codebase. List available indexes and search for authentication patterns.
|
||||||
|
```
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="../../assets/claude_code_leann.png" alt="LEANN in Claude Code" width="80%">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
If you see a prompt asking whether to proceed with LEANN, you can now use it in your chat!
|
||||||
|
|
||||||
|
## 🧠 How It Works
|
||||||
|
|
||||||
|
The integration consists of three key components working seamlessly together:
|
||||||
|
|
||||||
|
- **`leann`** - Core CLI tool for indexing and searching (installed globally via `uv tool install`)
|
||||||
|
- **`leann_mcp`** - MCP server that wraps `leann` commands for Claude Code integration
|
||||||
|
- **Claude Code** - Calls `leann_mcp`, which executes `leann` commands and returns intelligent results
|
||||||
|
|
||||||
|
## 📁 File Support
|
||||||
|
|
||||||
|
LEANN understands **30+ file types** including:
|
||||||
|
- **Programming**: Python, JavaScript, TypeScript, Java, Go, Rust, C++, C#
|
||||||
|
- **Data**: SQL, YAML, JSON, CSV, XML
|
||||||
|
- **Documentation**: Markdown, TXT, PDF
|
||||||
|
- **And many more!**
|
||||||
|
|
||||||
|
## 💾 Storage & Organization
|
||||||
|
|
||||||
|
- **Project indexes**: Stored in `.leann/` directory (just like `.git`)
|
||||||
|
- **Global registry**: Project tracking at `~/.leann/projects.json`
|
||||||
|
- **Multi-project support**: Switch between different codebases seamlessly
|
||||||
|
- **Portable**: Transfer indexes between machines with minimal overhead
|
||||||
|
|
||||||
|
## 🗑️ Uninstalling
|
||||||
|
|
||||||
|
To remove the LEANN MCP server from Claude Code:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
claude mcp remove leann-server
|
||||||
|
```
|
||||||
|
To remove LEANN
|
||||||
|
```
|
||||||
|
uv pip uninstall leann leann-backend-hnsw leann-core
|
||||||
|
```
|
||||||
|
|
||||||
|
To globally remove LEANN (for version update)
|
||||||
|
```
|
||||||
|
uv tool list | cat
|
||||||
|
uv tool uninstall leann-core
|
||||||
|
command -v leann || echo "leann gone"
|
||||||
|
command -v leann_mcp || echo "leann_mcp gone"
|
||||||
|
```
|
||||||
36
packages/leann/README.md
Normal file
36
packages/leann/README.md
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# LEANN - The smallest vector index in the world
|
||||||
|
|
||||||
|
LEANN is a revolutionary vector database that democratizes personal AI. Transform your laptop into a powerful RAG system that can index and search through millions of documents while using **97% less storage** than traditional solutions **without accuracy loss**.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Default installation (includes both HNSW and DiskANN backends)
|
||||||
|
uv pip install leann
|
||||||
|
```
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```python
|
||||||
|
from leann import LeannBuilder, LeannSearcher, LeannChat
|
||||||
|
from pathlib import Path
|
||||||
|
INDEX_PATH = str(Path("./").resolve() / "demo.leann")
|
||||||
|
|
||||||
|
# Build an index (choose backend: "hnsw" or "diskann")
|
||||||
|
builder = LeannBuilder(backend_name="hnsw") # or "diskann" for large-scale deployments
|
||||||
|
builder.add_text("LEANN saves 97% storage compared to traditional vector databases.")
|
||||||
|
builder.add_text("Tung Tung Tung Sahur called—they need their banana‑crocodile hybrid back")
|
||||||
|
builder.build_index(INDEX_PATH)
|
||||||
|
|
||||||
|
# Search
|
||||||
|
searcher = LeannSearcher(INDEX_PATH)
|
||||||
|
results = searcher.search("fantastical AI-generated creatures", top_k=1)
|
||||||
|
|
||||||
|
# Chat with your data
|
||||||
|
chat = LeannChat(INDEX_PATH, llm_config={"type": "hf", "model": "Qwen/Qwen3-0.6B"})
|
||||||
|
response = chat.ask("How much storage does LEANN save?", top_k=1)
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT License
|
||||||
12
packages/leann/__init__.py
Normal file
12
packages/leann/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
"""
|
||||||
|
LEANN - Low-storage Embedding Approximation for Neural Networks
|
||||||
|
|
||||||
|
A revolutionary vector database that democratizes personal AI.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__version__ = "0.1.0"
|
||||||
|
|
||||||
|
# Re-export main API from leann-core
|
||||||
|
from leann_core import LeannBuilder, LeannChat, LeannSearcher
|
||||||
|
|
||||||
|
__all__ = ["LeannBuilder", "LeannChat", "LeannSearcher"]
|
||||||
39
packages/leann/pyproject.toml
Normal file
39
packages/leann/pyproject.toml
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "leann"
|
||||||
|
version = "0.3.2"
|
||||||
|
description = "LEANN - The smallest vector index in the world. RAG Everything with LEANN!"
|
||||||
|
readme = "README.md"
|
||||||
|
requires-python = ">=3.9"
|
||||||
|
license = { text = "MIT" }
|
||||||
|
authors = [
|
||||||
|
{ name = "LEANN Team" }
|
||||||
|
]
|
||||||
|
keywords = ["vector-database", "rag", "embeddings", "search", "ai"]
|
||||||
|
classifiers = [
|
||||||
|
"Development Status :: 4 - Beta",
|
||||||
|
"Intended Audience :: Developers",
|
||||||
|
"License :: OSI Approved :: MIT License",
|
||||||
|
"Programming Language :: Python :: 3",
|
||||||
|
"Programming Language :: Python :: 3.9",
|
||||||
|
"Programming Language :: Python :: 3.10",
|
||||||
|
"Programming Language :: Python :: 3.11",
|
||||||
|
"Programming Language :: Python :: 3.12",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Default installation: core + hnsw + diskann
|
||||||
|
dependencies = [
|
||||||
|
"leann-core>=0.1.0",
|
||||||
|
"leann-backend-hnsw>=0.1.0",
|
||||||
|
"leann-backend-diskann>=0.1.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
# All backends now included by default
|
||||||
|
|
||||||
|
[project.urls]
|
||||||
|
Repository = "https://github.com/yichuan-w/LEANN"
|
||||||
|
Issues = "https://github.com/yichuan-w/LEANN/issues"
|
||||||
1
packages/wechat-exporter/__init__.py
Normal file
1
packages/wechat-exporter/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
__all__ = []
|
||||||
@@ -1,22 +1,23 @@
|
|||||||
import json
|
import json
|
||||||
import typer
|
|
||||||
from pathlib import Path
|
|
||||||
import requests
|
|
||||||
from tqdm import tqdm
|
|
||||||
import xml.etree.ElementTree as ET
|
|
||||||
from typing_extensions import Annotated
|
|
||||||
import sqlite3
|
import sqlite3
|
||||||
|
import xml.etree.ElementTree as ElementTree
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Annotated
|
||||||
|
|
||||||
|
import requests
|
||||||
|
import typer
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
app = typer.Typer()
|
app = typer.Typer()
|
||||||
|
|
||||||
|
|
||||||
def get_safe_path(s: str) -> str:
|
def get_safe_path(s: str) -> str:
|
||||||
"""
|
"""
|
||||||
Remove invalid characters to sanitize a path.
|
Remove invalid characters to sanitize a path.
|
||||||
:param s: str to sanitize
|
:param s: str to sanitize
|
||||||
:returns: sanitized str
|
:returns: sanitized str
|
||||||
"""
|
"""
|
||||||
ban_chars = "\\ / : * ? \" ' < > | $ \r \n".replace(
|
ban_chars = "\\ / : * ? \" ' < > | $ \r \n".replace(" ", "")
|
||||||
' ', '')
|
|
||||||
for i in ban_chars:
|
for i in ban_chars:
|
||||||
s = s.replace(i, "")
|
s = s.replace(i, "")
|
||||||
return s
|
return s
|
||||||
@@ -25,36 +26,40 @@ def get_safe_path(s: str) -> str:
|
|||||||
def process_history(history: str):
|
def process_history(history: str):
|
||||||
if history.startswith("<?xml") or history.startswith("<msg>"):
|
if history.startswith("<?xml") or history.startswith("<msg>"):
|
||||||
try:
|
try:
|
||||||
root = ET.fromstring(history)
|
root = ElementTree.fromstring(history)
|
||||||
title = root.find('.//title').text if root.find('.//title') is not None else None
|
title = root.find(".//title").text if root.find(".//title") is not None else None
|
||||||
quoted = root.find('.//refermsg/content').text if root.find('.//refermsg/content') is not None else None
|
quoted = (
|
||||||
|
root.find(".//refermsg/content").text
|
||||||
|
if root.find(".//refermsg/content") is not None
|
||||||
|
else None
|
||||||
|
)
|
||||||
if title and quoted:
|
if title and quoted:
|
||||||
return {
|
return {"title": title, "quoted": process_history(quoted)}
|
||||||
"title": title,
|
|
||||||
"quoted": process_history(quoted)
|
|
||||||
}
|
|
||||||
if title:
|
if title:
|
||||||
return title
|
return title
|
||||||
except Exception:
|
except Exception:
|
||||||
return history
|
return history
|
||||||
return history
|
return history
|
||||||
|
|
||||||
|
|
||||||
def get_message(history: dict | str):
|
def get_message(history: dict | str):
|
||||||
if isinstance(history, dict):
|
if isinstance(history, dict):
|
||||||
if 'title' in history:
|
if "title" in history:
|
||||||
return history['title']
|
return history["title"]
|
||||||
else:
|
else:
|
||||||
return history
|
return history
|
||||||
|
|
||||||
|
|
||||||
def export_chathistory(user_id: str):
|
def export_chathistory(user_id: str):
|
||||||
res = requests.get("http://localhost:48065/wechat/chatlog", params={
|
res = requests.get(
|
||||||
"userId": user_id,
|
"http://localhost:48065/wechat/chatlog",
|
||||||
"count": 100000
|
params={"userId": user_id, "count": 100000},
|
||||||
}).json()
|
).json()
|
||||||
for i in range(len(res['chatLogs'])):
|
for i in range(len(res["chatLogs"])):
|
||||||
res['chatLogs'][i]['content'] = process_history(res['chatLogs'][i]['content'])
|
res["chatLogs"][i]["content"] = process_history(res["chatLogs"][i]["content"])
|
||||||
res['chatLogs'][i]['message'] = get_message(res['chatLogs'][i]['content'])
|
res["chatLogs"][i]["message"] = get_message(res["chatLogs"][i]["content"])
|
||||||
return res['chatLogs']
|
return res["chatLogs"]
|
||||||
|
|
||||||
|
|
||||||
@app.command()
|
@app.command()
|
||||||
def export_all(dest: Annotated[Path, typer.Argument(help="Destination path to export to.")]):
|
def export_all(dest: Annotated[Path, typer.Argument(help="Destination path to export to.")]):
|
||||||
@@ -64,7 +69,7 @@ def export_all(dest: Annotated[Path, typer.Argument(help="Destination path to ex
|
|||||||
if not dest.is_dir():
|
if not dest.is_dir():
|
||||||
if not dest.exists():
|
if not dest.exists():
|
||||||
inp = typer.prompt("Destination path does not exist, create it? (y/n)")
|
inp = typer.prompt("Destination path does not exist, create it? (y/n)")
|
||||||
if inp.lower() == 'y':
|
if inp.lower() == "y":
|
||||||
dest.mkdir(parents=True)
|
dest.mkdir(parents=True)
|
||||||
else:
|
else:
|
||||||
typer.echo("Aborted.", err=True)
|
typer.echo("Aborted.", err=True)
|
||||||
@@ -77,12 +82,12 @@ def export_all(dest: Annotated[Path, typer.Argument(help="Destination path to ex
|
|||||||
exported_count = 0
|
exported_count = 0
|
||||||
for user in tqdm(all_users):
|
for user in tqdm(all_users):
|
||||||
try:
|
try:
|
||||||
usr_chatlog = export_chathistory(user['arg'])
|
usr_chatlog = export_chathistory(user["arg"])
|
||||||
|
|
||||||
# Only write file if there are messages
|
# Only write file if there are messages
|
||||||
if len(usr_chatlog) > 0:
|
if len(usr_chatlog) > 0:
|
||||||
out_path = dest/get_safe_path((user['title'] or "")+"-"+user['arg']+'.json')
|
out_path = dest / get_safe_path((user["title"] or "") + "-" + user["arg"] + ".json")
|
||||||
with open(out_path, 'w', encoding='utf-8') as f:
|
with open(out_path, "w", encoding="utf-8") as f:
|
||||||
json.dump(usr_chatlog, f, ensure_ascii=False, indent=2)
|
json.dump(usr_chatlog, f, ensure_ascii=False, indent=2)
|
||||||
exported_count += 1
|
exported_count += 1
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -91,25 +96,49 @@ def export_all(dest: Annotated[Path, typer.Argument(help="Destination path to ex
|
|||||||
|
|
||||||
print(f"Exported {exported_count} users' chat history to {dest} in json.")
|
print(f"Exported {exported_count} users' chat history to {dest} in json.")
|
||||||
|
|
||||||
|
|
||||||
@app.command()
|
@app.command()
|
||||||
def export_sqlite(dest: Annotated[Path, typer.Argument(help="Destination path to export to.")] = Path("chatlog.db")):
|
def export_sqlite(
|
||||||
|
dest: Annotated[Path, typer.Argument(help="Destination path to export to.")] = Path(
|
||||||
|
"chatlog.db"
|
||||||
|
),
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Export all users' chat history to a sqlite database.
|
Export all users' chat history to a sqlite database.
|
||||||
"""
|
"""
|
||||||
connection = sqlite3.connect(dest)
|
connection = sqlite3.connect(dest)
|
||||||
cursor = connection.cursor()
|
cursor = connection.cursor()
|
||||||
cursor.execute("CREATE TABLE IF NOT EXISTS chatlog (id INTEGER PRIMARY KEY AUTOINCREMENT, with_id TEXT, from_user TEXT, to_user TEXT, message TEXT, timest DATETIME, auxiliary TEXT)")
|
cursor.execute(
|
||||||
|
"CREATE TABLE IF NOT EXISTS chatlog (id INTEGER PRIMARY KEY AUTOINCREMENT, with_id TEXT, from_user TEXT, to_user TEXT, message TEXT, timest DATETIME, auxiliary TEXT)"
|
||||||
|
)
|
||||||
cursor.execute("CREATE INDEX IF NOT EXISTS chatlog_with_id_index ON chatlog (with_id)")
|
cursor.execute("CREATE INDEX IF NOT EXISTS chatlog_with_id_index ON chatlog (with_id)")
|
||||||
cursor.execute("CREATE TABLE iF NOT EXISTS users (id TEXT PRIMARY KEY, name TEXT)")
|
cursor.execute("CREATE TABLE iF NOT EXISTS users (id TEXT PRIMARY KEY, name TEXT)")
|
||||||
|
|
||||||
all_users = requests.get("http://localhost:48065/wechat/allcontacts").json()
|
all_users = requests.get("http://localhost:48065/wechat/allcontacts").json()
|
||||||
for user in tqdm(all_users):
|
for user in tqdm(all_users):
|
||||||
cursor.execute("INSERT OR IGNORE INTO users (id, name) VALUES (?, ?)", (user['arg'], user['title']))
|
cursor.execute(
|
||||||
usr_chatlog = export_chathistory(user['arg'])
|
"INSERT OR IGNORE INTO users (id, name) VALUES (?, ?)",
|
||||||
|
(user["arg"], user["title"]),
|
||||||
|
)
|
||||||
|
usr_chatlog = export_chathistory(user["arg"])
|
||||||
for msg in usr_chatlog:
|
for msg in usr_chatlog:
|
||||||
cursor.execute("INSERT INTO chatlog (with_id, from_user, to_user, message, timest, auxiliary) VALUES (?, ?, ?, ?, ?, ?)", (user['arg'], msg['fromUser'], msg['toUser'], msg['message'], msg['createTime'], str(msg['content'])))
|
cursor.execute(
|
||||||
|
"INSERT INTO chatlog (with_id, from_user, to_user, message, timest, auxiliary) VALUES (?, ?, ?, ?, ?, ?)",
|
||||||
|
(
|
||||||
|
user["arg"],
|
||||||
|
msg["fromUser"],
|
||||||
|
msg["toUser"],
|
||||||
|
msg["message"],
|
||||||
|
msg["createTime"],
|
||||||
|
str(msg["content"]),
|
||||||
|
),
|
||||||
|
)
|
||||||
connection.commit()
|
connection.commit()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
def main():
|
||||||
app()
|
app()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
|||||||
121
pyproject.toml
121
pyproject.toml
@@ -5,16 +5,15 @@ build-backend = "setuptools.build_meta"
|
|||||||
[project]
|
[project]
|
||||||
name = "leann-workspace"
|
name = "leann-workspace"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
requires-python = ">=3.10"
|
requires-python = ">=3.9"
|
||||||
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"leann-core",
|
"leann-core",
|
||||||
"leann-backend-hnsw",
|
"leann-backend-hnsw",
|
||||||
|
"typer>=0.12.3",
|
||||||
"numpy>=1.26.0",
|
"numpy>=1.26.0",
|
||||||
"torch",
|
"torch",
|
||||||
"tqdm",
|
"tqdm",
|
||||||
"flask",
|
|
||||||
"flask_compress",
|
|
||||||
"datasets>=2.15.0",
|
"datasets>=2.15.0",
|
||||||
"evaluate",
|
"evaluate",
|
||||||
"colorama",
|
"colorama",
|
||||||
@@ -25,38 +24,140 @@ dependencies = [
|
|||||||
"requests>=2.25.0",
|
"requests>=2.25.0",
|
||||||
"sentence-transformers>=2.2.0",
|
"sentence-transformers>=2.2.0",
|
||||||
"openai>=1.0.0",
|
"openai>=1.0.0",
|
||||||
|
# PDF parsing dependencies - essential for document processing
|
||||||
"PyPDF2>=3.0.0",
|
"PyPDF2>=3.0.0",
|
||||||
|
"pdfplumber>=0.11.0",
|
||||||
|
"pymupdf>=1.26.0",
|
||||||
|
"pypdfium2>=4.30.0",
|
||||||
|
# LlamaIndex core and readers - updated versions
|
||||||
"llama-index>=0.12.44",
|
"llama-index>=0.12.44",
|
||||||
"llama-index-readers-docling",
|
"llama-index-readers-file>=0.4.0", # Essential for PDF parsing
|
||||||
"llama-index-node-parser-docling",
|
# "llama-index-readers-docling", # Requires Python >= 3.10
|
||||||
"ipykernel==6.29.5",
|
# "llama-index-node-parser-docling", # Requires Python >= 3.10
|
||||||
"msgpack>=1.1.1",
|
|
||||||
"llama-index-vector-stores-faiss>=0.4.0",
|
"llama-index-vector-stores-faiss>=0.4.0",
|
||||||
"llama-index-embeddings-huggingface>=0.5.5",
|
"llama-index-embeddings-huggingface>=0.5.5",
|
||||||
"mlx>=0.26.3",
|
# Other dependencies
|
||||||
"mlx-lm>=0.26.0",
|
"ipykernel==6.29.5",
|
||||||
|
"msgpack>=1.1.1",
|
||||||
|
"mlx>=0.26.3; sys_platform == 'darwin' and platform_machine == 'arm64'",
|
||||||
|
"mlx-lm>=0.26.0; sys_platform == 'darwin' and platform_machine == 'arm64'",
|
||||||
"psutil>=5.8.0",
|
"psutil>=5.8.0",
|
||||||
|
"pybind11>=3.0.0",
|
||||||
|
"pathspec>=0.12.1",
|
||||||
|
"nbconvert>=7.16.6",
|
||||||
|
"gitignore-parser>=0.1.12",
|
||||||
|
# AST-aware code chunking dependencies
|
||||||
|
"astchunk>=0.1.0",
|
||||||
|
"tree-sitter>=0.20.0",
|
||||||
|
"tree-sitter-python>=0.20.0",
|
||||||
|
"tree-sitter-java>=0.20.0",
|
||||||
|
"tree-sitter-c-sharp>=0.20.0",
|
||||||
|
"tree-sitter-typescript>=0.20.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
dev = [
|
dev = [
|
||||||
"pytest>=7.0",
|
"pytest>=7.0",
|
||||||
"pytest-cov>=4.0",
|
"pytest-cov>=4.0",
|
||||||
|
"pytest-xdist>=3.0", # For parallel test execution
|
||||||
"black>=23.0",
|
"black>=23.0",
|
||||||
"ruff>=0.1.0",
|
"ruff==0.12.7", # Fixed version to ensure consistent formatting across all environments
|
||||||
"matplotlib",
|
"matplotlib",
|
||||||
"huggingface-hub>=0.20.0",
|
"huggingface-hub>=0.20.0",
|
||||||
|
"pre-commit>=3.5.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
test = [
|
||||||
|
"pytest>=7.0",
|
||||||
|
"pytest-timeout>=2.0",
|
||||||
|
"llama-index-core>=0.12.0",
|
||||||
|
"python-dotenv>=1.0.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
diskann = [
|
diskann = [
|
||||||
"leann-backend-diskann",
|
"leann-backend-diskann",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# Add a new optional dependency group for document processing
|
||||||
|
documents = [
|
||||||
|
"beautifulsoup4>=4.13.0", # For HTML parsing
|
||||||
|
"python-docx>=0.8.11", # For Word documents
|
||||||
|
"openpyxl>=3.1.0", # For Excel files
|
||||||
|
"pandas>=2.2.0", # For data processing
|
||||||
|
]
|
||||||
|
|
||||||
[tool.setuptools]
|
[tool.setuptools]
|
||||||
py-modules = []
|
py-modules = []
|
||||||
|
packages = ["wechat_exporter"]
|
||||||
|
package-dir = { "wechat_exporter" = "packages/wechat-exporter" }
|
||||||
|
|
||||||
|
[project.scripts]
|
||||||
|
wechat-exporter = "wechat_exporter.main:main"
|
||||||
|
|
||||||
|
|
||||||
[tool.uv.sources]
|
[tool.uv.sources]
|
||||||
leann-core = { path = "packages/leann-core", editable = true }
|
leann-core = { path = "packages/leann-core", editable = true }
|
||||||
leann-backend-diskann = { path = "packages/leann-backend-diskann", editable = true }
|
leann-backend-diskann = { path = "packages/leann-backend-diskann", editable = true }
|
||||||
leann-backend-hnsw = { path = "packages/leann-backend-hnsw", editable = true }
|
leann-backend-hnsw = { path = "packages/leann-backend-hnsw", editable = true }
|
||||||
|
|
||||||
|
[tool.ruff]
|
||||||
|
target-version = "py39"
|
||||||
|
line-length = 100
|
||||||
|
extend-exclude = ["third_party"]
|
||||||
|
|
||||||
|
|
||||||
|
[tool.ruff.lint]
|
||||||
|
select = [
|
||||||
|
"E", # pycodestyle errors
|
||||||
|
"W", # pycodestyle warnings
|
||||||
|
"F", # pyflakes
|
||||||
|
"I", # isort
|
||||||
|
"B", # flake8-bugbear
|
||||||
|
"C4", # flake8-comprehensions
|
||||||
|
"UP", # pyupgrade
|
||||||
|
"N", # pep8-naming
|
||||||
|
"RUF", # ruff-specific rules
|
||||||
|
]
|
||||||
|
ignore = [
|
||||||
|
"E501", # line too long (handled by formatter)
|
||||||
|
"B008", # do not perform function calls in argument defaults
|
||||||
|
"B904", # raise without from
|
||||||
|
"N812", # lowercase imported as non-lowercase
|
||||||
|
"N806", # variable in function should be lowercase
|
||||||
|
"RUF012", # mutable class attributes should be annotated with typing.ClassVar
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.ruff.format]
|
||||||
|
quote-style = "double"
|
||||||
|
indent-style = "space"
|
||||||
|
skip-magic-trailing-comma = false
|
||||||
|
line-ending = "auto"
|
||||||
|
|
||||||
|
[tool.lychee]
|
||||||
|
accept = ["200", "403", "429", "503"]
|
||||||
|
timeout = 20
|
||||||
|
max_retries = 2
|
||||||
|
exclude = ["localhost", "127.0.0.1", "example.com"]
|
||||||
|
exclude_path = [".git/", ".venv/", "__pycache__/", "third_party/"]
|
||||||
|
scheme = ["https", "http"]
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
testpaths = ["tests"]
|
||||||
|
python_files = ["test_*.py"]
|
||||||
|
python_classes = ["Test*"]
|
||||||
|
python_functions = ["test_*"]
|
||||||
|
markers = [
|
||||||
|
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
|
||||||
|
"openai: marks tests that require OpenAI API key",
|
||||||
|
]
|
||||||
|
timeout = 300 # Reduced from 600s (10min) to 300s (5min) for CI safety
|
||||||
|
addopts = [
|
||||||
|
"-v",
|
||||||
|
"--tb=short",
|
||||||
|
"--strict-markers",
|
||||||
|
"--disable-warnings",
|
||||||
|
]
|
||||||
|
env = [
|
||||||
|
"HF_HUB_DISABLE_SYMLINKS=1",
|
||||||
|
"TOKENIZERS_PARALLELISM=false",
|
||||||
|
]
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
import faiss
|
|
||||||
hnsw_index = faiss.read_index("/opt/dlami/nvme/scaling_out/indices/rpj_wiki/facebook/contriever-msmarco/hnsw/hnsw_IP_M30_efC128.index", faiss.IO_FLAG_ONDISK_SAME_DIR)
|
|
||||||
|
|
||||||
# print total number of nodes
|
|
||||||
print(hnsw_index.ntotal)
|
|
||||||
|
|
||||||
# print stats of the graph
|
|
||||||
print(hnsw_index.hnsw.print_neighbor_stats(0))
|
|
||||||
|
|
||||||
|
|
||||||
# save_degree_distribution
|
|
||||||
hnsw_index.hnsw.save_degree_distribution(0, "degree_distribution_HNSW_M30.txt")
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
import faiss
|
|
||||||
nsg_index = faiss.read_index("/opt/dlami/nvme/scaling_out/indices/rpj_wiki/facebook/contriever-msmarco/nsg_R16.index", faiss.IO_FLAG_ONDISK_SAME_DIR)
|
|
||||||
|
|
||||||
# print total number of nodes
|
|
||||||
print(nsg_index.ntotal)
|
|
||||||
|
|
||||||
# print stats of the graph
|
|
||||||
print(nsg_index.nsg.print_neighbor_stats(0))
|
|
||||||
|
|
||||||
# save degree distribution
|
|
||||||
nsg_index.nsg.save_degree_distribution("degree_distribution_NSG_R60.txt")
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user