Compare commits
124 Commits
v0.1.11
...
fix/ask-cl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
47aeb85f82 | ||
|
|
db7ba27ff6 | ||
|
|
5f7806e16f | ||
|
|
d034e2195b | ||
|
|
43894ff605 | ||
|
|
10311cc611 | ||
|
|
ad0d2faabc | ||
|
|
e93c0dec6f | ||
|
|
c5a29f849a | ||
|
|
3b8dc6368e | ||
|
|
e309f292de | ||
|
|
0d9f92ea0f | ||
|
|
b0b353d279 | ||
|
|
4dffdfedbe | ||
|
|
d41e467df9 | ||
|
|
4ca0489cb1 | ||
|
|
e83a671918 | ||
|
|
4e5b73ce7b | ||
|
|
31b4973141 | ||
|
|
dde2221513 | ||
|
|
6d11e86e71 | ||
|
|
13bb561aad | ||
|
|
0174ba5571 | ||
|
|
03af82d695 | ||
|
|
738f1dbab8 | ||
|
|
37d990d51c | ||
|
|
a6f07a54f1 | ||
|
|
46905e0687 | ||
|
|
838ade231e | ||
|
|
da6540decd | ||
|
|
39e18a7c11 | ||
|
|
6bde28584b | ||
|
|
f62632c41f | ||
|
|
27708243ca | ||
|
|
9a1e4652ca | ||
|
|
14e84d9e2d | ||
|
|
2dcfca19ff | ||
|
|
bee2167ee3 | ||
|
|
ef980d70b3 | ||
|
|
db3c63c441 | ||
|
|
00eeadb9dd | ||
|
|
42c8370709 | ||
|
|
fafdf8fcbe | ||
|
|
21f7d8e031 | ||
|
|
46565b9249 | ||
|
|
3dad76126a | ||
|
|
18e28bda32 | ||
|
|
609fa62fd5 | ||
|
|
eab13434ef | ||
|
|
b2390ccc14 | ||
|
|
e8fca2c84a | ||
|
|
790ae14f69 | ||
|
|
ac363072e6 | ||
|
|
93465af46c | ||
|
|
792ece67dc | ||
|
|
239e35e2e6 | ||
|
|
2fac0c6fbf | ||
|
|
9801aa581b | ||
|
|
5e97916608 | ||
|
|
8b9c2be8c9 | ||
|
|
3ff5aac8e0 | ||
|
|
67fef60466 | ||
|
|
b6ab6f1993 | ||
|
|
9f2e82a838 | ||
|
|
0b2b799d5a | ||
|
|
0f790fbbd9 | ||
|
|
387ae21eba | ||
|
|
3cc329c3e7 | ||
|
|
5567302316 | ||
|
|
075d4bd167 | ||
|
|
e4bcc76f88 | ||
|
|
710e83b1fd | ||
|
|
c96d653072 | ||
|
|
8b22d2b5d3 | ||
|
|
4cb544ee38 | ||
|
|
f94ce63d51 | ||
|
|
4271ff9d84 | ||
|
|
0d448c4a41 | ||
|
|
af5599e33c | ||
|
|
efdf6d917a | ||
|
|
dd71ac8d71 | ||
|
|
8bee1d4100 | ||
|
|
33521d6d00 | ||
|
|
8899734952 | ||
|
|
54df6310c5 | ||
|
|
19bcc07814 | ||
|
|
8356e3c668 | ||
|
|
08eac5c821 | ||
|
|
4671ed9b36 | ||
|
|
055c086398 | ||
|
|
d505dcc5e3 | ||
|
|
261006c36a | ||
|
|
b2eba23e21 | ||
|
|
e9ee687472 | ||
|
|
6f5d5e4a77 | ||
|
|
5c8921673a | ||
|
|
e9d2d420bd | ||
|
|
ebabfad066 | ||
|
|
e6f612b5e8 | ||
|
|
51c41acd82 | ||
|
|
455f93fb7c | ||
|
|
48207c3b69 | ||
|
|
4de1caa40f | ||
|
|
60eaa8165c | ||
|
|
c1a5d0c624 | ||
|
|
af1790395a | ||
|
|
383c6d8d7e | ||
|
|
bc0d839693 | ||
|
|
8596562de5 | ||
|
|
5d09586853 | ||
|
|
a7cba078dd | ||
|
|
b3e9ee96fa | ||
|
|
8537a6b17e | ||
|
|
7c8d7dc5c2 | ||
|
|
8e23d663e6 | ||
|
|
8a3994bf80 | ||
|
|
8375f601ba | ||
|
|
c87c0fe662 | ||
|
|
73927b68ef | ||
|
|
cc1a62e5aa | ||
|
|
802020cb41 | ||
|
|
cdb92f7cf4 | ||
|
|
dc69bdec00 | ||
|
|
98073e9868 |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +0,0 @@
|
|||||||
paper_plot/data/big_graph_degree_data.npz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
50
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
50
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
name: Bug Report
|
||||||
|
description: Report a bug in LEANN
|
||||||
|
labels: ["bug"]
|
||||||
|
|
||||||
|
body:
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: What happened?
|
||||||
|
description: A clear description of the bug
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: reproduce
|
||||||
|
attributes:
|
||||||
|
label: How to reproduce
|
||||||
|
placeholder: |
|
||||||
|
1. Install with...
|
||||||
|
2. Run command...
|
||||||
|
3. See error
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: error
|
||||||
|
attributes:
|
||||||
|
label: Error message
|
||||||
|
description: Paste any error messages
|
||||||
|
render: shell
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: version
|
||||||
|
attributes:
|
||||||
|
label: LEANN Version
|
||||||
|
placeholder: "0.1.0"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: os
|
||||||
|
attributes:
|
||||||
|
label: Operating System
|
||||||
|
options:
|
||||||
|
- macOS
|
||||||
|
- Linux
|
||||||
|
- Windows
|
||||||
|
- Docker
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
blank_issues_enabled: true
|
||||||
|
contact_links:
|
||||||
|
- name: Documentation
|
||||||
|
url: https://github.com/LEANN-RAG/LEANN-RAG/tree/main/docs
|
||||||
|
about: Read the docs first
|
||||||
|
- name: Discussions
|
||||||
|
url: https://github.com/LEANN-RAG/LEANN-RAG/discussions
|
||||||
|
about: Ask questions and share ideas
|
||||||
27
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
27
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
name: Feature Request
|
||||||
|
description: Suggest a new feature for LEANN
|
||||||
|
labels: ["enhancement"]
|
||||||
|
|
||||||
|
body:
|
||||||
|
- type: textarea
|
||||||
|
id: problem
|
||||||
|
attributes:
|
||||||
|
label: What problem does this solve?
|
||||||
|
description: Describe the problem or need
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: solution
|
||||||
|
attributes:
|
||||||
|
label: Proposed solution
|
||||||
|
description: How would you like this to work?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: example
|
||||||
|
attributes:
|
||||||
|
label: Example usage
|
||||||
|
description: Show how the API might look
|
||||||
|
render: python
|
||||||
13
.github/pull_request_template.md
vendored
Normal file
13
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
## What does this PR do?
|
||||||
|
|
||||||
|
<!-- Brief description of your changes -->
|
||||||
|
|
||||||
|
## Related Issues
|
||||||
|
|
||||||
|
Fixes #
|
||||||
|
|
||||||
|
## Checklist
|
||||||
|
|
||||||
|
- [ ] Tests pass (`uv run pytest`)
|
||||||
|
- [ ] Code formatted (`ruff format` and `ruff check`)
|
||||||
|
- [ ] Pre-commit hooks pass (`pre-commit run --all-files`)
|
||||||
1
.github/workflows/build-and-publish.yml
vendored
1
.github/workflows/build-and-publish.yml
vendored
@@ -5,6 +5,7 @@ on:
|
|||||||
branches: [ main ]
|
branches: [ main ]
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ main ]
|
branches: [ main ]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
|||||||
281
.github/workflows/build-reusable.yml
vendored
281
.github/workflows/build-reusable.yml
vendored
@@ -10,7 +10,36 @@ on:
|
|||||||
default: ''
|
default: ''
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
lint:
|
||||||
|
name: Lint and Format Check
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ inputs.ref }}
|
||||||
|
|
||||||
|
- name: Setup Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
uses: astral-sh/setup-uv@v4
|
||||||
|
|
||||||
|
- name: Install ruff
|
||||||
|
run: |
|
||||||
|
uv tool install ruff
|
||||||
|
|
||||||
|
- name: Run ruff check
|
||||||
|
run: |
|
||||||
|
ruff check .
|
||||||
|
|
||||||
|
- name: Run ruff format check
|
||||||
|
run: |
|
||||||
|
ruff format --check .
|
||||||
|
|
||||||
build:
|
build:
|
||||||
|
needs: lint
|
||||||
name: Build ${{ matrix.os }} Python ${{ matrix.python }}
|
name: Build ${{ matrix.os }} Python ${{ matrix.python }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
@@ -25,20 +54,51 @@ jobs:
|
|||||||
python: '3.12'
|
python: '3.12'
|
||||||
- os: ubuntu-22.04
|
- os: ubuntu-22.04
|
||||||
python: '3.13'
|
python: '3.13'
|
||||||
- os: macos-latest
|
# ARM64 Linux builds
|
||||||
|
- os: ubuntu-24.04-arm
|
||||||
python: '3.9'
|
python: '3.9'
|
||||||
- os: macos-latest
|
- os: ubuntu-24.04-arm
|
||||||
python: '3.10'
|
python: '3.10'
|
||||||
- os: macos-latest
|
- os: ubuntu-24.04-arm
|
||||||
python: '3.11'
|
python: '3.11'
|
||||||
- os: macos-latest
|
- os: ubuntu-24.04-arm
|
||||||
python: '3.12'
|
python: '3.12'
|
||||||
- os: macos-latest
|
- os: ubuntu-24.04-arm
|
||||||
python: '3.13'
|
python: '3.13'
|
||||||
|
- os: macos-14
|
||||||
|
python: '3.9'
|
||||||
|
- os: macos-14
|
||||||
|
python: '3.10'
|
||||||
|
- os: macos-14
|
||||||
|
python: '3.11'
|
||||||
|
- os: macos-14
|
||||||
|
python: '3.12'
|
||||||
|
- os: macos-14
|
||||||
|
python: '3.13'
|
||||||
|
- os: macos-15
|
||||||
|
python: '3.9'
|
||||||
|
- os: macos-15
|
||||||
|
python: '3.10'
|
||||||
|
- os: macos-15
|
||||||
|
python: '3.11'
|
||||||
|
- os: macos-15
|
||||||
|
python: '3.12'
|
||||||
|
- os: macos-15
|
||||||
|
python: '3.13'
|
||||||
|
- os: macos-13
|
||||||
|
python: '3.9'
|
||||||
|
- os: macos-13
|
||||||
|
python: '3.10'
|
||||||
|
- os: macos-13
|
||||||
|
python: '3.11'
|
||||||
|
- os: macos-13
|
||||||
|
python: '3.12'
|
||||||
|
# Note: macos-13 + Python 3.13 excluded due to PyTorch compatibility
|
||||||
|
# (PyTorch 2.5+ supports Python 3.13 but not Intel Mac x86_64)
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
ref: ${{ inputs.ref }}
|
ref: ${{ inputs.ref }}
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
@@ -49,26 +109,62 @@ jobs:
|
|||||||
python-version: ${{ matrix.python }}
|
python-version: ${{ matrix.python }}
|
||||||
|
|
||||||
- name: Install uv
|
- name: Install uv
|
||||||
uses: astral-sh/setup-uv@v4
|
uses: astral-sh/setup-uv@v6
|
||||||
|
|
||||||
- name: Install system dependencies (Ubuntu)
|
- name: Install system dependencies (Ubuntu)
|
||||||
if: runner.os == 'Linux'
|
if: runner.os == 'Linux'
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y libomp-dev libboost-all-dev protobuf-compiler libzmq3-dev \
|
sudo apt-get install -y libomp-dev libboost-all-dev protobuf-compiler libzmq3-dev \
|
||||||
pkg-config libopenblas-dev patchelf libabsl-dev libaio-dev libprotobuf-dev
|
pkg-config libabsl-dev libaio-dev libprotobuf-dev \
|
||||||
|
patchelf
|
||||||
|
|
||||||
# Install Intel MKL for DiskANN
|
# Debug: Show system information
|
||||||
|
echo "🔍 System Information:"
|
||||||
|
echo "Architecture: $(uname -m)"
|
||||||
|
echo "OS: $(uname -a)"
|
||||||
|
echo "CPU info: $(lscpu | head -5)"
|
||||||
|
|
||||||
|
# Install math library based on architecture
|
||||||
|
ARCH=$(uname -m)
|
||||||
|
echo "🔍 Setting up math library for architecture: $ARCH"
|
||||||
|
|
||||||
|
if [[ "$ARCH" == "x86_64" ]]; then
|
||||||
|
# Install Intel MKL for DiskANN on x86_64
|
||||||
|
echo "📦 Installing Intel MKL for x86_64..."
|
||||||
wget -q https://registrationcenter-download.intel.com/akdlm/IRC_NAS/79153e0f-74d7-45af-b8c2-258941adf58a/intel-onemkl-2025.0.0.940.sh
|
wget -q https://registrationcenter-download.intel.com/akdlm/IRC_NAS/79153e0f-74d7-45af-b8c2-258941adf58a/intel-onemkl-2025.0.0.940.sh
|
||||||
sudo sh intel-onemkl-2025.0.0.940.sh -a --components intel.oneapi.lin.mkl.devel --action install --eula accept -s
|
sudo sh intel-onemkl-2025.0.0.940.sh -a --components intel.oneapi.lin.mkl.devel --action install --eula accept -s
|
||||||
source /opt/intel/oneapi/setvars.sh
|
source /opt/intel/oneapi/setvars.sh
|
||||||
echo "MKLROOT=/opt/intel/oneapi/mkl/latest" >> $GITHUB_ENV
|
echo "MKLROOT=/opt/intel/oneapi/mkl/latest" >> $GITHUB_ENV
|
||||||
echo "LD_LIBRARY_PATH=/opt/intel/oneapi/mkl/latest/lib/intel64:$LD_LIBRARY_PATH" >> $GITHUB_ENV
|
echo "LD_LIBRARY_PATH=/opt/intel/oneapi/compiler/latest/linux/compiler/lib/intel64_lin" >> $GITHUB_ENV
|
||||||
|
echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/oneapi/mkl/latest/lib/intel64" >> $GITHUB_ENV
|
||||||
|
echo "✅ Intel MKL installed for x86_64"
|
||||||
|
|
||||||
|
# Debug: Check MKL installation
|
||||||
|
echo "🔍 MKL Installation Check:"
|
||||||
|
ls -la /opt/intel/oneapi/mkl/latest/ || echo "MKL directory not found"
|
||||||
|
ls -la /opt/intel/oneapi/mkl/latest/lib/ || echo "MKL lib directory not found"
|
||||||
|
|
||||||
|
elif [[ "$ARCH" == "aarch64" ]]; then
|
||||||
|
# Use OpenBLAS for ARM64 (MKL installer not compatible with ARM64)
|
||||||
|
echo "📦 Installing OpenBLAS for ARM64..."
|
||||||
|
sudo apt-get install -y libopenblas-dev liblapack-dev liblapacke-dev
|
||||||
|
echo "✅ OpenBLAS installed for ARM64"
|
||||||
|
|
||||||
|
# Debug: Check OpenBLAS installation
|
||||||
|
echo "🔍 OpenBLAS Installation Check:"
|
||||||
|
dpkg -l | grep openblas || echo "OpenBLAS package not found"
|
||||||
|
ls -la /usr/lib/aarch64-linux-gnu/openblas/ || echo "OpenBLAS directory not found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Debug: Show final library paths
|
||||||
|
echo "🔍 Final LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
|
||||||
|
|
||||||
- name: Install system dependencies (macOS)
|
- name: Install system dependencies (macOS)
|
||||||
if: runner.os == 'macOS'
|
if: runner.os == 'macOS'
|
||||||
run: |
|
run: |
|
||||||
brew install llvm libomp boost protobuf zeromq
|
# Don't install LLVM, use system clang for better compatibility
|
||||||
|
brew install libomp boost protobuf zeromq
|
||||||
|
|
||||||
- name: Install build dependencies
|
- name: Install build dependencies
|
||||||
run: |
|
run: |
|
||||||
@@ -79,39 +175,73 @@ jobs:
|
|||||||
uv pip install --system delocate
|
uv pip install --system delocate
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
- name: Set macOS environment variables
|
||||||
|
if: runner.os == 'macOS'
|
||||||
|
run: |
|
||||||
|
# Use brew --prefix to automatically detect Homebrew installation path
|
||||||
|
HOMEBREW_PREFIX=$(brew --prefix)
|
||||||
|
echo "HOMEBREW_PREFIX=${HOMEBREW_PREFIX}" >> $GITHUB_ENV
|
||||||
|
echo "OpenMP_ROOT=${HOMEBREW_PREFIX}/opt/libomp" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# Set CMAKE_PREFIX_PATH to let CMake find all packages automatically
|
||||||
|
echo "CMAKE_PREFIX_PATH=${HOMEBREW_PREFIX}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# Set compiler flags for OpenMP (required for both backends)
|
||||||
|
echo "LDFLAGS=-L${HOMEBREW_PREFIX}/opt/libomp/lib" >> $GITHUB_ENV
|
||||||
|
echo "CPPFLAGS=-I${HOMEBREW_PREFIX}/opt/libomp/include" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Build packages
|
- name: Build packages
|
||||||
run: |
|
run: |
|
||||||
# Build core (platform independent)
|
# Build core (platform independent)
|
||||||
if [[ "${{ matrix.os }}" == ubuntu-* ]]; then
|
|
||||||
cd packages/leann-core
|
cd packages/leann-core
|
||||||
uv build
|
uv build
|
||||||
cd ../..
|
cd ../..
|
||||||
fi
|
|
||||||
|
|
||||||
# Build HNSW backend
|
# Build HNSW backend
|
||||||
cd packages/leann-backend-hnsw
|
cd packages/leann-backend-hnsw
|
||||||
if [ "${{ matrix.os }}" == "macos-latest" ]; then
|
if [[ "${{ matrix.os }}" == macos-* ]]; then
|
||||||
CC=$(brew --prefix llvm)/bin/clang CXX=$(brew --prefix llvm)/bin/clang++ uv build --wheel --python python
|
# Use system clang for better compatibility
|
||||||
|
export CC=clang
|
||||||
|
export CXX=clang++
|
||||||
|
# Homebrew libraries on each macOS version require matching minimum version
|
||||||
|
if [[ "${{ matrix.os }}" == "macos-13" ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=13.0
|
||||||
|
elif [[ "${{ matrix.os }}" == "macos-14" ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=14.0
|
||||||
|
elif [[ "${{ matrix.os }}" == "macos-15" ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=15.0
|
||||||
|
fi
|
||||||
|
uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist
|
||||||
else
|
else
|
||||||
uv build --wheel --python python
|
uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist
|
||||||
fi
|
fi
|
||||||
cd ../..
|
cd ../..
|
||||||
|
|
||||||
# Build DiskANN backend
|
# Build DiskANN backend
|
||||||
cd packages/leann-backend-diskann
|
cd packages/leann-backend-diskann
|
||||||
if [ "${{ matrix.os }}" == "macos-latest" ]; then
|
if [[ "${{ matrix.os }}" == macos-* ]]; then
|
||||||
CC=$(brew --prefix llvm)/bin/clang CXX=$(brew --prefix llvm)/bin/clang++ uv build --wheel --python python
|
# Use system clang for better compatibility
|
||||||
|
export CC=clang
|
||||||
|
export CXX=clang++
|
||||||
|
# DiskANN requires macOS 13.3+ for sgesdd_ LAPACK function
|
||||||
|
# But Homebrew libraries on each macOS version require matching minimum version
|
||||||
|
if [[ "${{ matrix.os }}" == "macos-13" ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=13.3
|
||||||
|
elif [[ "${{ matrix.os }}" == "macos-14" ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=14.0
|
||||||
|
elif [[ "${{ matrix.os }}" == "macos-15" ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=15.0
|
||||||
|
fi
|
||||||
|
uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist
|
||||||
else
|
else
|
||||||
uv build --wheel --python python
|
uv build --wheel --python ${{ matrix.python }} --find-links ${GITHUB_WORKSPACE}/packages/leann-core/dist
|
||||||
fi
|
fi
|
||||||
cd ../..
|
cd ../..
|
||||||
|
|
||||||
# Build meta package (platform independent)
|
# Build meta package (platform independent)
|
||||||
if [[ "${{ matrix.os }}" == ubuntu-* ]]; then
|
|
||||||
cd packages/leann
|
cd packages/leann
|
||||||
uv build
|
uv build
|
||||||
cd ../..
|
cd ../..
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Repair wheels (Linux)
|
- name: Repair wheels (Linux)
|
||||||
if: runner.os == 'Linux'
|
if: runner.os == 'Linux'
|
||||||
@@ -137,10 +267,24 @@ jobs:
|
|||||||
- name: Repair wheels (macOS)
|
- name: Repair wheels (macOS)
|
||||||
if: runner.os == 'macOS'
|
if: runner.os == 'macOS'
|
||||||
run: |
|
run: |
|
||||||
|
# Determine deployment target based on runner OS
|
||||||
|
# Must match the Homebrew libraries for each macOS version
|
||||||
|
if [[ "${{ matrix.os }}" == "macos-13" ]]; then
|
||||||
|
HNSW_TARGET="13.0"
|
||||||
|
DISKANN_TARGET="13.3"
|
||||||
|
elif [[ "${{ matrix.os }}" == "macos-14" ]]; then
|
||||||
|
HNSW_TARGET="14.0"
|
||||||
|
DISKANN_TARGET="14.0"
|
||||||
|
elif [[ "${{ matrix.os }}" == "macos-15" ]]; then
|
||||||
|
HNSW_TARGET="15.0"
|
||||||
|
DISKANN_TARGET="15.0"
|
||||||
|
fi
|
||||||
|
|
||||||
# Repair HNSW wheel
|
# Repair HNSW wheel
|
||||||
cd packages/leann-backend-hnsw
|
cd packages/leann-backend-hnsw
|
||||||
if [ -d dist ]; then
|
if [ -d dist ]; then
|
||||||
delocate-wheel -w dist_repaired -v dist/*.whl
|
export MACOSX_DEPLOYMENT_TARGET=$HNSW_TARGET
|
||||||
|
delocate-wheel -w dist_repaired -v --require-target-macos-version $HNSW_TARGET dist/*.whl
|
||||||
rm -rf dist
|
rm -rf dist
|
||||||
mv dist_repaired dist
|
mv dist_repaired dist
|
||||||
fi
|
fi
|
||||||
@@ -149,7 +293,8 @@ jobs:
|
|||||||
# Repair DiskANN wheel
|
# Repair DiskANN wheel
|
||||||
cd packages/leann-backend-diskann
|
cd packages/leann-backend-diskann
|
||||||
if [ -d dist ]; then
|
if [ -d dist ]; then
|
||||||
delocate-wheel -w dist_repaired -v dist/*.whl
|
export MACOSX_DEPLOYMENT_TARGET=$DISKANN_TARGET
|
||||||
|
delocate-wheel -w dist_repaired -v --require-target-macos-version $DISKANN_TARGET dist/*.whl
|
||||||
rm -rf dist
|
rm -rf dist
|
||||||
mv dist_repaired dist
|
mv dist_repaired dist
|
||||||
fi
|
fi
|
||||||
@@ -160,8 +305,98 @@ jobs:
|
|||||||
echo "📦 Built packages:"
|
echo "📦 Built packages:"
|
||||||
find packages/*/dist -name "*.whl" -o -name "*.tar.gz" | sort
|
find packages/*/dist -name "*.whl" -o -name "*.tar.gz" | sort
|
||||||
|
|
||||||
|
|
||||||
|
- name: Install built packages for testing
|
||||||
|
run: |
|
||||||
|
# Create a virtual environment with the correct Python version
|
||||||
|
uv venv --python ${{ matrix.python }}
|
||||||
|
source .venv/bin/activate || source .venv/Scripts/activate
|
||||||
|
|
||||||
|
# Install packages using --find-links to prioritize local builds
|
||||||
|
uv pip install --find-links packages/leann-core/dist --find-links packages/leann-backend-hnsw/dist --find-links packages/leann-backend-diskann/dist packages/leann-core/dist/*.whl || uv pip install --find-links packages/leann-core/dist packages/leann-core/dist/*.tar.gz
|
||||||
|
uv pip install --find-links packages/leann-core/dist packages/leann-backend-hnsw/dist/*.whl
|
||||||
|
uv pip install --find-links packages/leann-core/dist packages/leann-backend-diskann/dist/*.whl
|
||||||
|
uv pip install packages/leann/dist/*.whl || uv pip install packages/leann/dist/*.tar.gz
|
||||||
|
|
||||||
|
# Install test dependencies using extras
|
||||||
|
uv pip install -e ".[test]"
|
||||||
|
|
||||||
|
- name: Run tests with pytest
|
||||||
|
env:
|
||||||
|
CI: true
|
||||||
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
HF_HUB_DISABLE_SYMLINKS: 1
|
||||||
|
TOKENIZERS_PARALLELISM: false
|
||||||
|
PYTORCH_ENABLE_MPS_FALLBACK: 0
|
||||||
|
OMP_NUM_THREADS: 1
|
||||||
|
MKL_NUM_THREADS: 1
|
||||||
|
run: |
|
||||||
|
source .venv/bin/activate || source .venv/Scripts/activate
|
||||||
|
pytest tests/ -v --tb=short
|
||||||
|
|
||||||
|
- name: Run sanity checks (optional)
|
||||||
|
run: |
|
||||||
|
# Activate virtual environment
|
||||||
|
source .venv/bin/activate || source .venv/Scripts/activate
|
||||||
|
|
||||||
|
# Run distance function tests if available
|
||||||
|
if [ -f test/sanity_checks/test_distance_functions.py ]; then
|
||||||
|
echo "Running distance function sanity checks..."
|
||||||
|
python test/sanity_checks/test_distance_functions.py || echo "⚠️ Distance function test failed, continuing..."
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: packages-${{ matrix.os }}-py${{ matrix.python }}
|
name: packages-${{ matrix.os }}-py${{ matrix.python }}
|
||||||
path: packages/*/dist/
|
path: packages/*/dist/
|
||||||
|
|
||||||
|
|
||||||
|
arch-smoke:
|
||||||
|
name: Arch Linux smoke test (install & import)
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: archlinux:latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Prepare system
|
||||||
|
run: |
|
||||||
|
pacman -Syu --noconfirm
|
||||||
|
pacman -S --noconfirm python python-pip gcc git zlib openssl
|
||||||
|
|
||||||
|
- name: Download ALL wheel artifacts from this run
|
||||||
|
uses: actions/download-artifact@v5
|
||||||
|
with:
|
||||||
|
# Don't specify name, download all artifacts
|
||||||
|
path: ./wheels
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
uses: astral-sh/setup-uv@v6
|
||||||
|
|
||||||
|
- name: Create virtual environment and install wheels
|
||||||
|
run: |
|
||||||
|
uv venv
|
||||||
|
source .venv/bin/activate || source .venv/Scripts/activate
|
||||||
|
uv pip install --find-links wheels leann-core
|
||||||
|
uv pip install --find-links wheels leann-backend-hnsw
|
||||||
|
uv pip install --find-links wheels leann-backend-diskann
|
||||||
|
uv pip install --find-links wheels leann
|
||||||
|
|
||||||
|
- name: Import & tiny runtime check
|
||||||
|
env:
|
||||||
|
OMP_NUM_THREADS: 1
|
||||||
|
MKL_NUM_THREADS: 1
|
||||||
|
run: |
|
||||||
|
source .venv/bin/activate || source .venv/Scripts/activate
|
||||||
|
python - <<'PY'
|
||||||
|
import leann
|
||||||
|
import leann_backend_hnsw as h
|
||||||
|
import leann_backend_diskann as d
|
||||||
|
from leann import LeannBuilder, LeannSearcher
|
||||||
|
b = LeannBuilder(backend_name="hnsw")
|
||||||
|
b.add_text("hello arch")
|
||||||
|
b.build_index("arch_demo.leann")
|
||||||
|
s = LeannSearcher("arch_demo.leann")
|
||||||
|
print("search:", s.search("hello", top_k=1))
|
||||||
|
PY
|
||||||
|
|||||||
19
.github/workflows/link-check.yml
vendored
Normal file
19
.github/workflows/link-check.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
name: Link Check
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main, master ]
|
||||||
|
pull_request:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 3 * * 1"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
link-check:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: lycheeverse/lychee-action@v2
|
||||||
|
with:
|
||||||
|
args: --no-progress --insecure --user-agent 'curl/7.68.0' README.md docs/ apps/ examples/ benchmarks/
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
9
.github/workflows/release-manual.yml
vendored
9
.github/workflows/release-manual.yml
vendored
@@ -22,11 +22,14 @@ jobs:
|
|||||||
|
|
||||||
- name: Validate version
|
- name: Validate version
|
||||||
run: |
|
run: |
|
||||||
if ! [[ "${{ inputs.version }}" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
# Remove 'v' prefix if present for validation
|
||||||
echo "❌ Invalid version format"
|
VERSION_CLEAN="${{ inputs.version }}"
|
||||||
|
VERSION_CLEAN="${VERSION_CLEAN#v}"
|
||||||
|
if ! [[ "$VERSION_CLEAN" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||||
|
echo "❌ Invalid version format. Expected format: X.Y.Z or vX.Y.Z"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "✅ Version format valid"
|
echo "✅ Version format valid: ${{ inputs.version }}"
|
||||||
|
|
||||||
- name: Update versions and push
|
- name: Update versions and push
|
||||||
id: push
|
id: push
|
||||||
|
|||||||
25
.gitignore
vendored
25
.gitignore
vendored
@@ -18,9 +18,11 @@ demo/experiment_results/**/*.json
|
|||||||
*.eml
|
*.eml
|
||||||
*.emlx
|
*.emlx
|
||||||
*.json
|
*.json
|
||||||
|
!.vscode/*.json
|
||||||
*.sh
|
*.sh
|
||||||
*.txt
|
*.txt
|
||||||
!CMakeLists.txt
|
!CMakeLists.txt
|
||||||
|
!llms.txt
|
||||||
latency_breakdown*.json
|
latency_breakdown*.json
|
||||||
experiment_results/eval_results/diskann/*.json
|
experiment_results/eval_results/diskann/*.json
|
||||||
aws/
|
aws/
|
||||||
@@ -34,11 +36,15 @@ build/
|
|||||||
nprobe_logs/
|
nprobe_logs/
|
||||||
micro/results
|
micro/results
|
||||||
micro/contriever-INT8
|
micro/contriever-INT8
|
||||||
examples/data/*
|
data/*
|
||||||
!examples/data/2501.14312v1 (1).pdf
|
!data/2501.14312v1 (1).pdf
|
||||||
!examples/data/2506.08276v1.pdf
|
!data/2506.08276v1.pdf
|
||||||
!examples/data/PrideandPrejudice.txt
|
!data/PrideandPrejudice.txt
|
||||||
!examples/data/README.md
|
!data/huawei_pangu.md
|
||||||
|
!data/ground_truth/
|
||||||
|
!data/indices/
|
||||||
|
!data/queries/
|
||||||
|
!data/.gitattributes
|
||||||
*.qdstrm
|
*.qdstrm
|
||||||
benchmark_results/
|
benchmark_results/
|
||||||
results/
|
results/
|
||||||
@@ -86,3 +92,12 @@ packages/leann-backend-diskann/third_party/DiskANN/_deps/
|
|||||||
*.passages.json
|
*.passages.json
|
||||||
|
|
||||||
batchtest.py
|
batchtest.py
|
||||||
|
tests/__pytest_cache__/
|
||||||
|
tests/__pycache__/
|
||||||
|
paru-bin/
|
||||||
|
|
||||||
|
CLAUDE.md
|
||||||
|
CLAUDE.local.md
|
||||||
|
.claude/*.local.*
|
||||||
|
.claude/local/*
|
||||||
|
benchmarks/data/
|
||||||
|
|||||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -14,3 +14,6 @@
|
|||||||
[submodule "packages/leann-backend-hnsw/third_party/libzmq"]
|
[submodule "packages/leann-backend-hnsw/third_party/libzmq"]
|
||||||
path = packages/leann-backend-hnsw/third_party/libzmq
|
path = packages/leann-backend-hnsw/third_party/libzmq
|
||||||
url = https://github.com/zeromq/libzmq.git
|
url = https://github.com/zeromq/libzmq.git
|
||||||
|
[submodule "packages/astchunk-leann"]
|
||||||
|
path = packages/astchunk-leann
|
||||||
|
url = https://github.com/yichuan-w/astchunk-leann.git
|
||||||
|
|||||||
17
.pre-commit-config.yaml
Normal file
17
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v5.0.0
|
||||||
|
hooks:
|
||||||
|
- id: trailing-whitespace
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: check-yaml
|
||||||
|
- id: check-added-large-files
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: debug-statements
|
||||||
|
|
||||||
|
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||||
|
rev: v0.12.7 # Fixed version to match pyproject.toml
|
||||||
|
hooks:
|
||||||
|
- id: ruff
|
||||||
|
args: [--fix, --exit-non-zero-on-fix]
|
||||||
|
- id: ruff-format
|
||||||
5
.vscode/extensions.json
vendored
Normal file
5
.vscode/extensions.json
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
"recommendations": [
|
||||||
|
"charliermarsh.ruff",
|
||||||
|
]
|
||||||
|
}
|
||||||
22
.vscode/settings.json
vendored
Normal file
22
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
{
|
||||||
|
"python.defaultInterpreterPath": ".venv/bin/python",
|
||||||
|
"python.terminal.activateEnvironment": true,
|
||||||
|
"[python]": {
|
||||||
|
"editor.defaultFormatter": "charliermarsh.ruff",
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.codeActionsOnSave": {
|
||||||
|
"source.organizeImports": "explicit",
|
||||||
|
"source.fixAll": "explicit"
|
||||||
|
},
|
||||||
|
"editor.insertSpaces": true,
|
||||||
|
"editor.tabSize": 4
|
||||||
|
},
|
||||||
|
"ruff.enable": true,
|
||||||
|
"files.watcherExclude": {
|
||||||
|
"**/.venv/**": true,
|
||||||
|
"**/__pycache__/**": true,
|
||||||
|
"**/*.egg-info/**": true,
|
||||||
|
"**/build/**": true,
|
||||||
|
"**/dist/**": true
|
||||||
|
}
|
||||||
|
}
|
||||||
548
README.md
548
README.md
@@ -3,20 +3,27 @@
|
|||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="https://img.shields.io/badge/Python-3.9%2B-blue.svg" alt="Python 3.9+">
|
<img src="https://img.shields.io/badge/Python-3.9%20%7C%203.10%20%7C%203.11%20%7C%203.12%20%7C%203.13-blue.svg" alt="Python Versions">
|
||||||
|
<img src="https://github.com/yichuan-w/LEANN/actions/workflows/build-and-publish.yml/badge.svg" alt="CI Status">
|
||||||
|
<img src="https://img.shields.io/badge/Platform-Ubuntu%20%26%20Arch%20%26%20WSL%20%7C%20macOS%20(ARM64%2FIntel)-lightgrey" alt="Platform">
|
||||||
<img src="https://img.shields.io/badge/License-MIT-green.svg" alt="MIT License">
|
<img src="https://img.shields.io/badge/License-MIT-green.svg" alt="MIT License">
|
||||||
<img src="https://img.shields.io/badge/Platform-Linux%20%7C%20macOS-lightgrey" alt="Platform">
|
<img src="https://img.shields.io/badge/MCP-Native%20Integration-blue" alt="MCP Integration">
|
||||||
|
<a href="https://join.slack.com/t/leann-e2u9779/shared_invite/zt-3ckd2f6w1-OX08~NN4gkWhh10PRVBj1Q"><img src="https://img.shields.io/badge/Slack-Join-4A154B?logo=slack&logoColor=white" alt="Join Slack">
|
||||||
|
<a href="assets/wechat_user_group.JPG" title="Join WeChat group"><img src="https://img.shields.io/badge/WeChat-Join-2DC100?logo=wechat&logoColor=white" alt="Join WeChat group"></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<h2 align="center" tabindex="-1" class="heading-element" dir="auto">
|
<h2 align="center" tabindex="-1" class="heading-element" dir="auto">
|
||||||
The smallest vector index in the world. RAG Everything with LEANN!
|
The smallest vector index in the world. RAG Everything with LEANN!
|
||||||
</h2>
|
</h2>
|
||||||
|
|
||||||
LEANN is a revolutionary vector database that democratizes personal AI. Transform your laptop into a powerful RAG system that can index and search through millions of documents while using **97% less storage** than traditional solutions **without accuracy loss**.
|
LEANN is an innovative vector database that democratizes personal AI. Transform your laptop into a powerful RAG system that can index and search through millions of documents while using **97% less storage** than traditional solutions **without accuracy loss**.
|
||||||
|
|
||||||
LEANN achieves this through *graph-based selective recomputation* with *high-degree preserving pruning*, computing embeddings on-demand instead of storing them all. [Illustration Fig →](#️-architecture--how-it-works) | [Paper →](https://arxiv.org/abs/2506.08276)
|
LEANN achieves this through *graph-based selective recomputation* with *high-degree preserving pruning*, computing embeddings on-demand instead of storing them all. [Illustration Fig →](#️-architecture--how-it-works) | [Paper →](https://arxiv.org/abs/2506.08276)
|
||||||
|
|
||||||
**Ready to RAG Everything?** Transform your laptop into a personal AI assistant that can search your **[file system](#-personal-data-manager-process-any-documents-pdf-txt-md)**, **[emails](#-your-personal-email-secretary-rag-on-apple-mail)**, **[browser history](#-time-machine-for-the-web-rag-your-entire-browser-history)**, **[chat history](#-wechat-detective-unlock-your-golden-memories)**, or external knowledge bases (i.e., 60M documents) - all on your laptop, with zero cloud costs and complete privacy.
|
**Ready to RAG Everything?** Transform your laptop into a personal AI assistant that can semantic search your **[file system](#-personal-data-manager-process-any-documents-pdf-txt-md)**, **[emails](#-your-personal-email-secretary-rag-on-apple-mail)**, **[browser history](#-time-machine-for-the-web-rag-your-entire-browser-history)**, **[chat history](#-wechat-detective-unlock-your-golden-memories)**, **[codebase](#-claude-code-integration-transform-your-development-workflow)**\* , or external knowledge bases (i.e., 60M documents) - all on your laptop, with zero cloud costs and complete privacy.
|
||||||
|
|
||||||
|
|
||||||
|
\* Claude Code only supports basic `grep`-style keyword search. **LEANN** is a drop-in **semantic search MCP service fully compatible with Claude Code**, unlocking intelligent retrieval without changing your workflow. 🔥 Check out [the easy setup →](packages/leann-mcp/README.md)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -26,49 +33,170 @@ LEANN achieves this through *graph-based selective recomputation* with *high-deg
|
|||||||
<img src="assets/effects.png" alt="LEANN vs Traditional Vector DB Storage Comparison" width="70%">
|
<img src="assets/effects.png" alt="LEANN vs Traditional Vector DB Storage Comparison" width="70%">
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
> **The numbers speak for themselves:** Index 60 million Wikipedia chunks in just 6GB instead of 201GB. From emails to browser history, everything fits on your laptop. [See detailed benchmarks for different applications below ↓](#storage-comparison)
|
> **The numbers speak for themselves:** Index 60 million text chunks in just 6GB instead of 201GB. From emails to browser history, everything fits on your laptop. [See detailed benchmarks for different applications below ↓](#-storage-comparison)
|
||||||
|
|
||||||
|
|
||||||
🔒 **Privacy:** Your data never leaves your laptop. No OpenAI, no cloud, no "terms of service".
|
🔒 **Privacy:** Your data never leaves your laptop. No OpenAI, no cloud, no "terms of service".
|
||||||
|
|
||||||
🪶 **Lightweight:** Graph-based recomputation eliminates heavy embedding storage, while smart graph pruning and CSR format minimize graph storage overhead. Always less storage, less memory usage!
|
🪶 **Lightweight:** Graph-based recomputation eliminates heavy embedding storage, while smart graph pruning and CSR format minimize graph storage overhead. Always less storage, less memory usage!
|
||||||
|
|
||||||
|
📦 **Portable:** Transfer your entire knowledge base between devices (even with others) with minimal cost - your personal AI memory travels with you.
|
||||||
|
|
||||||
📈 **Scalability:** Handle messy personal data that would crash traditional vector DBs, easily managing your growing personalized data and agent generated memory!
|
📈 **Scalability:** Handle messy personal data that would crash traditional vector DBs, easily managing your growing personalized data and agent generated memory!
|
||||||
|
|
||||||
✨ **No Accuracy Loss:** Maintain the same search quality as heavyweight solutions while using 97% less storage.
|
✨ **No Accuracy Loss:** Maintain the same search quality as heavyweight solutions while using 97% less storage.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
> `pip leann` coming soon!
|
|
||||||
|
### 📦 Prerequisites: Install uv
|
||||||
|
|
||||||
|
[Install uv](https://docs.astral.sh/uv/getting-started/installation/#installation-methods) first if you don't have it. Typically, you can install it with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone git@github.com:yichuan-w/LEANN.git leann
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🚀 Quick Install
|
||||||
|
|
||||||
|
Clone the repository to access all examples and try amazing applications,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/yichuan-w/LEANN.git leann
|
||||||
|
cd leann
|
||||||
|
```
|
||||||
|
|
||||||
|
and install LEANN from [PyPI](https://pypi.org/project/leann/) to run them immediately:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv venv
|
||||||
|
source .venv/bin/activate
|
||||||
|
uv pip install leann
|
||||||
|
```
|
||||||
|
<!--
|
||||||
|
> Low-resource? See “Low-resource setups” in the [Configuration Guide](docs/configuration-guide.md#low-resource-setups). -->
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>
|
||||||
|
<strong>🔧 Build from Source (Recommended for development)</strong>
|
||||||
|
</summary>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/yichuan-w/LEANN.git leann
|
||||||
cd leann
|
cd leann
|
||||||
git submodule update --init --recursive
|
git submodule update --init --recursive
|
||||||
```
|
```
|
||||||
|
|
||||||
**macOS:**
|
**macOS:**
|
||||||
```bash
|
|
||||||
brew install llvm libomp boost protobuf zeromq pkgconf
|
|
||||||
|
|
||||||
# Install with HNSW backend (default, recommended for most users)
|
Note: DiskANN requires MacOS 13.3 or later.
|
||||||
# Install uv first if you don't have it:
|
|
||||||
# curl -LsSf https://astral.sh/uv/install.sh | sh
|
```bash
|
||||||
# See: https://docs.astral.sh/uv/getting-started/installation/#installation-methods
|
brew install libomp boost protobuf zeromq pkgconf
|
||||||
CC=$(brew --prefix llvm)/bin/clang CXX=$(brew --prefix llvm)/bin/clang++ uv sync
|
uv sync --extra diskann
|
||||||
```
|
```
|
||||||
|
|
||||||
**Linux:**
|
**Linux (Ubuntu/Debian):**
|
||||||
```bash
|
|
||||||
sudo apt-get install libomp-dev libboost-all-dev protobuf-compiler libabsl-dev libmkl-full-dev libaio-dev libzmq3-dev
|
|
||||||
|
|
||||||
# Install with HNSW backend (default, recommended for most users)
|
Note: On Ubuntu 20.04, you may need to build a newer Abseil and pin Protobuf (e.g., v3.20.x) for building DiskANN. See [Issue #30](https://github.com/yichuan-w/LEANN/issues/30) for a step-by-step note.
|
||||||
uv sync
|
|
||||||
|
You can manually install [Intel oneAPI MKL](https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl.html) instead of `libmkl-full-dev` for DiskANN. You can also use `libopenblas-dev` for building HNSW only, by removing `--extra diskann` in the command below.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo apt-get update && sudo apt-get install -y \
|
||||||
|
libomp-dev libboost-all-dev protobuf-compiler libzmq3-dev \
|
||||||
|
pkg-config libabsl-dev libaio-dev libprotobuf-dev \
|
||||||
|
libmkl-full-dev
|
||||||
|
|
||||||
|
uv sync --extra diskann
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Linux (Arch Linux):**
|
||||||
|
|
||||||
**Ollama Setup (Recommended for full privacy):**
|
```bash
|
||||||
|
sudo pacman -Syu && sudo pacman -S --needed base-devel cmake pkgconf git gcc \
|
||||||
|
boost boost-libs protobuf abseil-cpp libaio zeromq
|
||||||
|
|
||||||
> *You can skip this installation if you only want to use OpenAI API for generation.*
|
# For MKL in DiskANN
|
||||||
|
sudo pacman -S --needed base-devel git
|
||||||
|
git clone https://aur.archlinux.org/paru-bin.git
|
||||||
|
cd paru-bin && makepkg -si
|
||||||
|
paru -S intel-oneapi-mkl intel-oneapi-compiler
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
|
||||||
|
uv sync --extra diskann
|
||||||
|
```
|
||||||
|
|
||||||
|
**Linux (RHEL / CentOS Stream / Oracle / Rocky / AlmaLinux):**
|
||||||
|
|
||||||
|
See [Issue #50](https://github.com/yichuan-w/LEANN/issues/50) for more details.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo dnf groupinstall -y "Development Tools"
|
||||||
|
sudo dnf install -y libomp-devel boost-devel protobuf-compiler protobuf-devel \
|
||||||
|
abseil-cpp-devel libaio-devel zeromq-devel pkgconf-pkg-config
|
||||||
|
|
||||||
|
# For MKL in DiskANN
|
||||||
|
sudo dnf install -y intel-oneapi-mkl intel-oneapi-mkl-devel \
|
||||||
|
intel-oneapi-openmp || sudo dnf install -y intel-oneapi-compiler
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
|
||||||
|
uv sync --extra diskann
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
Our declarative API makes RAG as easy as writing a config file.
|
||||||
|
|
||||||
|
Check out [demo.ipynb](demo.ipynb) or [](https://colab.research.google.com/github/yichuan-w/LEANN/blob/main/demo.ipynb)
|
||||||
|
|
||||||
|
```python
|
||||||
|
from leann import LeannBuilder, LeannSearcher, LeannChat
|
||||||
|
from pathlib import Path
|
||||||
|
INDEX_PATH = str(Path("./").resolve() / "demo.leann")
|
||||||
|
|
||||||
|
# Build an index
|
||||||
|
builder = LeannBuilder(backend_name="hnsw")
|
||||||
|
builder.add_text("LEANN saves 97% storage compared to traditional vector databases.")
|
||||||
|
builder.add_text("Tung Tung Tung Sahur called—they need their banana‑crocodile hybrid back")
|
||||||
|
builder.build_index(INDEX_PATH)
|
||||||
|
|
||||||
|
# Search
|
||||||
|
searcher = LeannSearcher(INDEX_PATH)
|
||||||
|
results = searcher.search("fantastical AI-generated creatures", top_k=1)
|
||||||
|
|
||||||
|
# Chat with your data
|
||||||
|
chat = LeannChat(INDEX_PATH, llm_config={"type": "hf", "model": "Qwen/Qwen3-0.6B"})
|
||||||
|
response = chat.ask("How much storage does LEANN save?", top_k=1)
|
||||||
|
```
|
||||||
|
|
||||||
|
## RAG on Everything!
|
||||||
|
|
||||||
|
LEANN supports RAG on various data sources including documents (`.pdf`, `.txt`, `.md`), Apple Mail, Google Search History, WeChat, and more.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Generation Model Setup
|
||||||
|
|
||||||
|
LEANN supports multiple LLM providers for text generation (OpenAI API, HuggingFace, Ollama).
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>🔑 OpenAI API Setup (Default)</strong></summary>
|
||||||
|
|
||||||
|
Set your OpenAI API key as an environment variable:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export OPENAI_API_KEY="your-api-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>🔧 Ollama Setup (Recommended for full privacy)</strong></summary>
|
||||||
|
|
||||||
**macOS:**
|
**macOS:**
|
||||||
|
|
||||||
@@ -80,6 +208,7 @@ ollama pull llama3.2:1b
|
|||||||
```
|
```
|
||||||
|
|
||||||
**Linux:**
|
**Linux:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install Ollama
|
# Install Ollama
|
||||||
curl -fsSL https://ollama.ai/install.sh | sh
|
curl -fsSL https://ollama.ai/install.sh | sh
|
||||||
@@ -91,45 +220,55 @@ ollama serve &
|
|||||||
ollama pull llama3.2:1b
|
ollama pull llama3.2:1b
|
||||||
```
|
```
|
||||||
|
|
||||||
## Quick Start in 30s
|
</details>
|
||||||
|
|
||||||
Our declarative API makes RAG as easy as writing a config file.
|
|
||||||
[Try in this ipynb file →](demo.ipynb) [](https://colab.research.google.com/github/yichuan-w/LEANN/blob/main/demo.ipynb)
|
|
||||||
|
|
||||||
```python
|
## ⭐ Flexible Configuration
|
||||||
from leann.api import LeannBuilder, LeannSearcher, LeannChat
|
|
||||||
|
|
||||||
# 1. Build the index (no embeddings stored!)
|
LEANN provides flexible parameters for embedding models, search strategies, and data processing to fit your specific needs.
|
||||||
builder = LeannBuilder(backend_name="hnsw")
|
|
||||||
builder.add_text("C# is a powerful programming language")
|
|
||||||
builder.add_text("Python is a powerful programming language and it is very popular")
|
|
||||||
builder.add_text("Machine learning transforms industries")
|
|
||||||
builder.add_text("Neural networks process complex data")
|
|
||||||
builder.add_text("Leann is a great storage saving engine for RAG on your MacBook")
|
|
||||||
builder.build_index("knowledge.leann")
|
|
||||||
|
|
||||||
# 2. Search with real-time embeddings
|
📚 **Need configuration best practices?** Check our [Configuration Guide](docs/configuration-guide.md) for detailed optimization tips, model selection advice, and solutions to common issues like slow embeddings or poor search quality.
|
||||||
searcher = LeannSearcher("knowledge.leann")
|
|
||||||
results = searcher.search("programming languages", top_k=2)
|
|
||||||
|
|
||||||
# 3. Chat with LEANN using retrieved results
|
<details>
|
||||||
llm_config = {
|
<summary><strong>📋 Click to expand: Common Parameters (Available in All Examples)</strong></summary>
|
||||||
"type": "ollama",
|
|
||||||
"model": "llama3.2:1b"
|
|
||||||
}
|
|
||||||
|
|
||||||
chat = LeannChat(index_path="knowledge.leann", llm_config=llm_config)
|
All RAG examples share these common parameters. **Interactive mode** is available in all examples - simply run without `--query` to start a continuous Q&A session where you can ask multiple questions. Type 'quit' to exit.
|
||||||
response = chat.ask(
|
|
||||||
"Compare the two retrieved programming languages and say which one is more popular today.",
|
```bash
|
||||||
top_k=2,
|
# Core Parameters (General preprocessing for all examples)
|
||||||
)
|
--index-dir DIR # Directory to store the index (default: current directory)
|
||||||
|
--query "YOUR QUESTION" # Single query mode. Omit for interactive chat (type 'quit' to exit), and now you can play with your index interactively
|
||||||
|
--max-items N # Limit data preprocessing (default: -1, process all data)
|
||||||
|
--force-rebuild # Force rebuild index even if it exists
|
||||||
|
|
||||||
|
# Embedding Parameters
|
||||||
|
--embedding-model MODEL # e.g., facebook/contriever, text-embedding-3-small, mlx-community/Qwen3-Embedding-0.6B-8bit or nomic-embed-text
|
||||||
|
--embedding-mode MODE # sentence-transformers, openai, mlx, or ollama
|
||||||
|
|
||||||
|
# LLM Parameters (Text generation models)
|
||||||
|
--llm TYPE # LLM backend: openai, ollama, or hf (default: openai)
|
||||||
|
--llm-model MODEL # Model name (default: gpt-4o) e.g., gpt-4o-mini, llama3.2:1b, Qwen/Qwen2.5-1.5B-Instruct
|
||||||
|
--thinking-budget LEVEL # Thinking budget for reasoning models: low/medium/high (supported by o3, o3-mini, GPT-Oss:20b, and other reasoning models)
|
||||||
|
|
||||||
|
# Search Parameters
|
||||||
|
--top-k N # Number of results to retrieve (default: 20)
|
||||||
|
--search-complexity N # Search complexity for graph traversal (default: 32)
|
||||||
|
|
||||||
|
# Chunking Parameters
|
||||||
|
--chunk-size N # Size of text chunks (default varies by source: 256 for most, 192 for WeChat)
|
||||||
|
--chunk-overlap N # Overlap between chunks (default varies: 25-128 depending on source)
|
||||||
|
|
||||||
|
# Index Building Parameters
|
||||||
|
--backend-name NAME # Backend to use: hnsw or diskann (default: hnsw)
|
||||||
|
--graph-degree N # Graph degree for index construction (default: 32)
|
||||||
|
--build-complexity N # Build complexity for index construction (default: 64)
|
||||||
|
--compact / --no-compact # Use compact storage (default: true). Must be `no-compact` for `no-recompute` build.
|
||||||
|
--recompute / --no-recompute # Enable/disable embedding recomputation (default: enabled). Should not do a `no-recompute` search in a `recompute` build.
|
||||||
```
|
```
|
||||||
|
|
||||||
## RAG on Everything!
|
</details>
|
||||||
|
|
||||||
LEANN supports RAG on various data sources including documents (.pdf, .txt, .md), Apple Mail, Google Search History, WeChat, and more.
|
### 📄 Personal Data Manager: Process Any Documents (`.pdf`, `.txt`, `.md`)!
|
||||||
|
|
||||||
### 📄 Personal Data Manager: Process Any Documents (.pdf, .txt, .md)!
|
|
||||||
|
|
||||||
Ask questions directly about your personal PDFs, documents, and any directory containing your files!
|
Ask questions directly about your personal PDFs, documents, and any directory containing your files!
|
||||||
|
|
||||||
@@ -137,51 +276,71 @@ Ask questions directly about your personal PDFs, documents, and any directory co
|
|||||||
<img src="videos/paper_clear.gif" alt="LEANN Document Search Demo" width="600">
|
<img src="videos/paper_clear.gif" alt="LEANN Document Search Demo" width="600">
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
The example below asks a question about summarizing two papers (uses default data in `examples/data`):
|
The example below asks a question about summarizing our paper (uses default data in `data/`, which is a directory with diverse data sources: two papers, Pride and Prejudice, and a Technical report about LLM in Huawei in Chinese), and this is the **easiest example** to run here:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Drop your PDFs, .txt, .md files into examples/data/
|
source .venv/bin/activate # Don't forget to activate the virtual environment
|
||||||
uv run ./examples/main_cli_example.py
|
python -m apps.document_rag --query "What are the main techniques LEANN explores?"
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
<details>
|
||||||
# Or use python directly
|
<summary><strong>📋 Click to expand: Document-Specific Arguments</strong></summary>
|
||||||
source .venv/bin/activate
|
|
||||||
python ./examples/main_cli_example.py
|
#### Parameters
|
||||||
|
```bash
|
||||||
|
--data-dir DIR # Directory containing documents to process (default: data)
|
||||||
|
--file-types .ext .ext # Filter by specific file types (optional - all LlamaIndex supported types if omitted)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Example Commands
|
||||||
|
```bash
|
||||||
|
# Process all documents with larger chunks for academic papers
|
||||||
|
python -m apps.document_rag --data-dir "~/Documents/Papers" --chunk-size 1024
|
||||||
|
|
||||||
|
# Filter only markdown and Python files with smaller chunks
|
||||||
|
python -m apps.document_rag --data-dir "./docs" --chunk-size 256 --file-types .md .py
|
||||||
|
|
||||||
|
# Enable AST-aware chunking for code files
|
||||||
|
python -m apps.document_rag --enable-code-chunking --data-dir "./my_project"
|
||||||
|
|
||||||
|
# Or use the specialized code RAG for better code understanding
|
||||||
|
python -m apps.code_rag --repo-dir "./my_codebase" --query "How does authentication work?"
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
### 📧 Your Personal Email Secretary: RAG on Apple Mail!
|
### 📧 Your Personal Email Secretary: RAG on Apple Mail!
|
||||||
|
|
||||||
|
> **Note:** The examples below currently support macOS only. Windows support coming soon.
|
||||||
|
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="videos/mail_clear.gif" alt="LEANN Email Search Demo" width="600">
|
<img src="videos/mail_clear.gif" alt="LEANN Email Search Demo" width="600">
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
**Note:** You need to grant full disk access to your terminal/VS Code in System Preferences → Privacy & Security → Full Disk Access.
|
Before running the example below, you need to grant full disk access to your terminal/VS Code in System Preferences → Privacy & Security → Full Disk Access.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python examples/mail_reader_leann.py --query "What's the food I ordered by doordash or Uber eat mostly?"
|
python -m apps.email_rag --query "What's the food I ordered by DoorDash or Uber Eats mostly?"
|
||||||
```
|
```
|
||||||
**780K email chunks → 78MB storage** Finally, search your email like you search Google.
|
**780K email chunks → 78MB storage.** Finally, search your email like you search Google.
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>📋 Click to expand: Command Examples</strong></summary>
|
<summary><strong>📋 Click to expand: Email-Specific Arguments</strong></summary>
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
```bash
|
```bash
|
||||||
# Use default mail path (works for most macOS setups)
|
--mail-path PATH # Path to specific mail directory (auto-detects if omitted)
|
||||||
python examples/mail_reader_leann.py
|
--include-html # Include HTML content in processing (useful for newsletters)
|
||||||
|
```
|
||||||
|
|
||||||
# Run with custom index directory
|
#### Example Commands
|
||||||
python examples/mail_reader_leann.py --index-dir "./my_mail_index"
|
```bash
|
||||||
|
# Search work emails from a specific account
|
||||||
|
python -m apps.email_rag --mail-path "~/Library/Mail/V10/WORK_ACCOUNT"
|
||||||
|
|
||||||
# Process all emails (may take time but indexes everything)
|
# Find all receipts and order confirmations (includes HTML)
|
||||||
python examples/mail_reader_leann.py --max-emails -1
|
python -m apps.email_rag --query "receipt order confirmation invoice" --include-html
|
||||||
|
|
||||||
# Limit number of emails processed (useful for testing)
|
|
||||||
python examples/mail_reader_leann.py --max-emails 1000
|
|
||||||
|
|
||||||
# Run a single query
|
|
||||||
python examples/mail_reader_leann.py --query "What did my boss say about deadlines?"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
@@ -202,25 +361,25 @@ Once the index is built, you can ask questions like:
|
|||||||
</p>
|
</p>
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python examples/google_history_reader_leann.py --query "Tell me my browser history about machine learning?"
|
python -m apps.browser_rag --query "Tell me my browser history about machine learning?"
|
||||||
```
|
```
|
||||||
**38K browser entries → 6MB storage.** Your browser history becomes your personal search engine.
|
**38K browser entries → 6MB storage.** Your browser history becomes your personal search engine.
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>📋 Click to expand: Command Examples</strong></summary>
|
<summary><strong>📋 Click to expand: Browser-Specific Arguments</strong></summary>
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
```bash
|
```bash
|
||||||
# Use default Chrome profile (auto-finds all profiles)
|
--chrome-profile PATH # Path to Chrome profile directory (auto-detects if omitted)
|
||||||
python examples/google_history_reader_leann.py
|
```
|
||||||
|
|
||||||
# Run with custom index directory
|
#### Example Commands
|
||||||
python examples/google_history_reader_leann.py --index-dir "./my_chrome_index"
|
```bash
|
||||||
|
# Search academic research from your browsing history
|
||||||
|
python -m apps.browser_rag --query "arxiv papers machine learning transformer architecture"
|
||||||
|
|
||||||
# Limit number of history entries processed (useful for testing)
|
# Track competitor analysis across work profile
|
||||||
python examples/google_history_reader_leann.py --max-entries 500
|
python -m apps.browser_rag --chrome-profile "~/Library/Application Support/Google/Chrome/Work Profile" --max-items 5000
|
||||||
|
|
||||||
# Run a single query
|
|
||||||
python examples/google_history_reader_leann.py --query "What websites did I visit about machine learning?"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
@@ -260,7 +419,7 @@ Once the index is built, you can ask questions like:
|
|||||||
</p>
|
</p>
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python examples/wechat_history_reader_leann.py --query "Show me all group chats about weekend plans"
|
python -m apps.wechat_rag --query "Show me all group chats about weekend plans"
|
||||||
```
|
```
|
||||||
**400K messages → 64MB storage** Search years of chat history in any language.
|
**400K messages → 64MB storage** Search years of chat history in any language.
|
||||||
|
|
||||||
@@ -268,7 +427,13 @@ python examples/wechat_history_reader_leann.py --query "Show me all group chats
|
|||||||
<details>
|
<details>
|
||||||
<summary><strong>🔧 Click to expand: Installation Requirements</strong></summary>
|
<summary><strong>🔧 Click to expand: Installation Requirements</strong></summary>
|
||||||
|
|
||||||
First, you need to install the WeChat exporter:
|
First, you need to install the [WeChat exporter](https://github.com/sunnyyoung/WeChatTweak-CLI),
|
||||||
|
|
||||||
|
```bash
|
||||||
|
brew install sunnyyoung/repo/wechattweak-cli
|
||||||
|
```
|
||||||
|
|
||||||
|
or install it manually (if you have issues with Homebrew):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo packages/wechat-exporter/wechattweak-cli install
|
sudo packages/wechat-exporter/wechattweak-cli install
|
||||||
@@ -277,30 +442,28 @@ sudo packages/wechat-exporter/wechattweak-cli install
|
|||||||
**Troubleshooting:**
|
**Troubleshooting:**
|
||||||
- **Installation issues**: Check the [WeChatTweak-CLI issues page](https://github.com/sunnyyoung/WeChatTweak-CLI/issues/41)
|
- **Installation issues**: Check the [WeChatTweak-CLI issues page](https://github.com/sunnyyoung/WeChatTweak-CLI/issues/41)
|
||||||
- **Export errors**: If you encounter the error below, try restarting WeChat
|
- **Export errors**: If you encounter the error below, try restarting WeChat
|
||||||
```
|
```bash
|
||||||
Failed to export WeChat data. Please ensure WeChat is running and WeChatTweak is installed.
|
Failed to export WeChat data. Please ensure WeChat is running and WeChatTweak is installed.
|
||||||
Failed to find or export WeChat data. Exiting.
|
Failed to find or export WeChat data. Exiting.
|
||||||
```
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>📋 Click to expand: Command Examples</strong></summary>
|
<summary><strong>📋 Click to expand: WeChat-Specific Arguments</strong></summary>
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
```bash
|
```bash
|
||||||
# Use default settings (recommended for first run)
|
--export-dir DIR # Directory to store exported WeChat data (default: wechat_export_direct)
|
||||||
python examples/wechat_history_reader_leann.py
|
--force-export # Force re-export even if data exists
|
||||||
|
```
|
||||||
|
|
||||||
# Run with custom export directory and wehn we run the first time, LEANN will export all chat history automatically for you
|
#### Example Commands
|
||||||
python examples/wechat_history_reader_leann.py --export-dir "./my_wechat_exports"
|
```bash
|
||||||
|
# Search for travel plans discussed in group chats
|
||||||
|
python -m apps.wechat_rag --query "travel plans" --max-items 10000
|
||||||
|
|
||||||
# Run with custom index directory
|
# Re-export and search recent chats (useful after new messages)
|
||||||
python examples/wechat_history_reader_leann.py --index-dir "./my_wechat_index"
|
python -m apps.wechat_rag --force-export --query "work schedule"
|
||||||
|
|
||||||
# Limit number of chat entries processed (useful for testing)
|
|
||||||
python examples/wechat_history_reader_leann.py --max-entries 1000
|
|
||||||
|
|
||||||
# Run a single query
|
|
||||||
python examples/wechat_history_reader_leann.py --query "Show me conversations about travel plans"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
@@ -314,15 +477,68 @@ Once the index is built, you can ask questions like:
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
### 🚀 Claude Code Integration: Transform Your Development Workflow!
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>NEW!! AST‑Aware Code Chunking</strong></summary>
|
||||||
|
|
||||||
|
LEANN features intelligent code chunking that preserves semantic boundaries (functions, classes, methods) for Python, Java, C#, and TypeScript, improving code understanding compared to text-based chunking.
|
||||||
|
|
||||||
|
📖 Read the [AST Chunking Guide →](docs/ast_chunking_guide.md)
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
**The future of code assistance is here.** Transform your development workflow with LEANN's native MCP integration for Claude Code. Index your entire codebase and get intelligent code assistance directly in your IDE.
|
||||||
|
|
||||||
|
**Key features:**
|
||||||
|
- 🔍 **Semantic code search** across your entire project, fully local index and lightweight
|
||||||
|
- 🧠 **AST-aware chunking** preserves code structure (functions, classes)
|
||||||
|
- 📚 **Context-aware assistance** for debugging and development
|
||||||
|
- 🚀 **Zero-config setup** with automatic language detection
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install LEANN globally for MCP integration
|
||||||
|
uv tool install leann-core --with leann
|
||||||
|
claude mcp add --scope user leann-server -- leann_mcp
|
||||||
|
# Setup is automatic - just start using Claude Code!
|
||||||
|
```
|
||||||
|
Try our fully agentic pipeline with auto query rewriting, semantic search planning, and more:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
**🔥 Ready to supercharge your coding?** [Complete Setup Guide →](packages/leann-mcp/README.md)
|
||||||
|
|
||||||
## 🖥️ Command Line Interface
|
## 🖥️ Command Line Interface
|
||||||
|
|
||||||
LEANN includes a powerful CLI for document processing and search. Perfect for quick document indexing and interactive chat.
|
LEANN includes a powerful CLI for document processing and search. Perfect for quick document indexing and interactive chat.
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
If you followed the Quick Start, `leann` is already installed in your virtual environment:
|
||||||
```bash
|
```bash
|
||||||
# Build an index from documents
|
source .venv/bin/activate
|
||||||
leann build my-docs --docs ./documents
|
leann --help
|
||||||
|
```
|
||||||
|
|
||||||
|
**To make it globally available:**
|
||||||
|
```bash
|
||||||
|
# Install the LEANN CLI globally using uv tool
|
||||||
|
uv tool install leann-core --with leann
|
||||||
|
|
||||||
|
|
||||||
|
# Now you can use leann from anywhere without activating venv
|
||||||
|
leann --help
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Note**: Global installation is required for Claude Code integration. The `leann_mcp` server depends on the globally available `leann` command.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Usage Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# build from a specific directory, and my_docs is the index name(Here you can also build from multiple dict or multiple files)
|
||||||
|
leann build my-docs --docs ./your_documents
|
||||||
|
|
||||||
# Search your documents
|
# Search your documents
|
||||||
leann search my-docs "machine learning concepts"
|
leann search my-docs "machine learning concepts"
|
||||||
@@ -330,23 +546,32 @@ leann search my-docs "machine learning concepts"
|
|||||||
# Interactive chat with your documents
|
# Interactive chat with your documents
|
||||||
leann ask my-docs --interactive
|
leann ask my-docs --interactive
|
||||||
|
|
||||||
|
# Ask a single question (non-interactive)
|
||||||
|
leann ask my-docs "Where are prompts configured?"
|
||||||
|
|
||||||
# List all your indexes
|
# List all your indexes
|
||||||
leann list
|
leann list
|
||||||
|
|
||||||
|
# Remove an index
|
||||||
|
leann remove my-docs
|
||||||
```
|
```
|
||||||
|
|
||||||
**Key CLI features:**
|
**Key CLI features:**
|
||||||
- Auto-detects document formats (PDF, TXT, MD, DOCX)
|
- Auto-detects document formats (PDF, TXT, MD, DOCX, PPTX + code files)
|
||||||
- Smart text chunking with overlap
|
- **🧠 AST-aware chunking** for Python, Java, C#, TypeScript files
|
||||||
|
- Smart text chunking with overlap for all other content
|
||||||
- Multiple LLM providers (Ollama, OpenAI, HuggingFace)
|
- Multiple LLM providers (Ollama, OpenAI, HuggingFace)
|
||||||
- Organized index storage in `~/.leann/indexes/`
|
- Organized index storage in `.leann/indexes/` (project-local)
|
||||||
- Support for advanced search parameters
|
- Support for advanced search parameters
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>📋 Click to expand: Complete CLI Reference</strong></summary>
|
<summary><strong>📋 Click to expand: Complete CLI Reference</strong></summary>
|
||||||
|
|
||||||
|
You can use `leann --help`, or `leann build --help`, `leann search --help`, `leann ask --help`, `leann list --help`, `leann remove --help` to get the complete CLI reference.
|
||||||
|
|
||||||
**Build Command:**
|
**Build Command:**
|
||||||
```bash
|
```bash
|
||||||
leann build INDEX_NAME --docs DIRECTORY [OPTIONS]
|
leann build INDEX_NAME --docs DIRECTORY|FILE [DIRECTORY|FILE ...] [OPTIONS]
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
--backend {hnsw,diskann} Backend to use (default: hnsw)
|
--backend {hnsw,diskann} Backend to use (default: hnsw)
|
||||||
@@ -354,8 +579,8 @@ Options:
|
|||||||
--graph-degree N Graph degree (default: 32)
|
--graph-degree N Graph degree (default: 32)
|
||||||
--complexity N Build complexity (default: 64)
|
--complexity N Build complexity (default: 64)
|
||||||
--force Force rebuild existing index
|
--force Force rebuild existing index
|
||||||
--compact Use compact storage (default: true)
|
--compact / --no-compact Use compact storage (default: true). Must be `no-compact` for `no-recompute` build.
|
||||||
--recompute Enable recomputation (default: true)
|
--recompute / --no-recompute Enable recomputation (default: true)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Search Command:**
|
**Search Command:**
|
||||||
@@ -365,7 +590,7 @@ leann search INDEX_NAME QUERY [OPTIONS]
|
|||||||
Options:
|
Options:
|
||||||
--top-k N Number of results (default: 5)
|
--top-k N Number of results (default: 5)
|
||||||
--complexity N Search complexity (default: 64)
|
--complexity N Search complexity (default: 64)
|
||||||
--recompute-embeddings Use recomputation for highest accuracy
|
--recompute / --no-recompute Enable/disable embedding recomputation (default: enabled). Should not do a `no-recompute` search in a `recompute` build.
|
||||||
--pruning-strategy {global,local,proportional}
|
--pruning-strategy {global,local,proportional}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -380,8 +605,73 @@ Options:
|
|||||||
--top-k N Retrieval count (default: 20)
|
--top-k N Retrieval count (default: 20)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**List Command:**
|
||||||
|
```bash
|
||||||
|
leann list
|
||||||
|
|
||||||
|
# Lists all indexes across all projects with status indicators:
|
||||||
|
# ✅ - Index is complete and ready to use
|
||||||
|
# ❌ - Index is incomplete or corrupted
|
||||||
|
# 📁 - CLI-created index (in .leann/indexes/)
|
||||||
|
# 📄 - App-created index (*.leann.meta.json files)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Remove Command:**
|
||||||
|
```bash
|
||||||
|
leann remove INDEX_NAME [OPTIONS]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--force, -f Force removal without confirmation
|
||||||
|
|
||||||
|
# Smart removal: automatically finds and safely removes indexes
|
||||||
|
# - Shows all matching indexes across projects
|
||||||
|
# - Requires confirmation for cross-project removal
|
||||||
|
# - Interactive selection when multiple matches found
|
||||||
|
# - Supports both CLI and app-created indexes
|
||||||
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
## 🚀 Advanced Features
|
||||||
|
|
||||||
|
### 🎯 Metadata Filtering
|
||||||
|
|
||||||
|
LEANN supports a simple metadata filtering system to enable sophisticated use cases like document filtering by date/type, code search by file extension, and content management based on custom criteria.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Add metadata during indexing
|
||||||
|
builder.add_text(
|
||||||
|
"def authenticate_user(token): ...",
|
||||||
|
metadata={"file_extension": ".py", "lines_of_code": 25}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Search with filters
|
||||||
|
results = searcher.search(
|
||||||
|
query="authentication function",
|
||||||
|
metadata_filters={
|
||||||
|
"file_extension": {"==": ".py"},
|
||||||
|
"lines_of_code": {"<": 100}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Supported operators**: `==`, `!=`, `<`, `<=`, `>`, `>=`, `in`, `not_in`, `contains`, `starts_with`, `ends_with`, `is_true`, `is_false`
|
||||||
|
|
||||||
|
📖 **[Complete Metadata filtering guide →](docs/metadata_filtering.md)**
|
||||||
|
|
||||||
|
### 🔍 Grep Search
|
||||||
|
|
||||||
|
For exact text matching instead of semantic search, use the `use_grep` parameter:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Exact text search
|
||||||
|
results = searcher.search("banana‑crocodile", use_grep=True, top_k=1)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Use cases**: Finding specific code patterns, error messages, function names, or exact phrases where semantic similarity isn't needed.
|
||||||
|
|
||||||
|
📖 **[Complete grep search guide →](docs/grep_search.md)**
|
||||||
|
|
||||||
## 🏗️ Architecture & How It Works
|
## 🏗️ Architecture & How It Works
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
@@ -396,13 +686,17 @@ Options:
|
|||||||
- **Dynamic batching:** Efficiently batch embedding computations for GPU utilization
|
- **Dynamic batching:** Efficiently batch embedding computations for GPU utilization
|
||||||
- **Two-level search:** Smart graph traversal that prioritizes promising nodes
|
- **Two-level search:** Smart graph traversal that prioritizes promising nodes
|
||||||
|
|
||||||
**Backends:** DiskANN or HNSW - pick what works for your data size.
|
**Backends:**
|
||||||
|
- **HNSW** (default): Ideal for most datasets with maximum storage savings through full recomputation
|
||||||
|
- **DiskANN**: Advanced option with superior search performance, using PQ-based graph traversal with real-time reranking for the best speed-accuracy trade-off
|
||||||
|
|
||||||
## Benchmarks
|
## Benchmarks
|
||||||
|
|
||||||
|
**[DiskANN vs HNSW Performance Comparison →](benchmarks/diskann_vs_hnsw_speed_comparison.py)** - Compare search performance between both backends
|
||||||
|
|
||||||
📊 **[Simple Example: Compare LEANN vs FAISS →](examples/compare_faiss_vs_leann.py)**
|
**[Simple Example: Compare LEANN vs FAISS →](benchmarks/compare_faiss_vs_leann.py)** - See storage savings in action
|
||||||
### Storage Comparison
|
|
||||||
|
### 📊 Storage Comparison
|
||||||
|
|
||||||
| System | DPR (2.1M) | Wiki (60M) | Chat (400K) | Email (780K) | Browser (38K) |
|
| System | DPR (2.1M) | Wiki (60M) | Chat (400K) | Email (780K) | Browser (38K) |
|
||||||
|--------|-------------|------------|-------------|--------------|---------------|
|
|--------|-------------|------------|-------------|--------------|---------------|
|
||||||
@@ -416,8 +710,8 @@ Options:
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
uv pip install -e ".[dev]" # Install dev dependencies
|
uv pip install -e ".[dev]" # Install dev dependencies
|
||||||
python examples/run_evaluation.py data/indices/dpr/dpr_diskann # DPR dataset
|
python benchmarks/run_evaluation.py # Will auto-download evaluation data and run benchmarks
|
||||||
python examples/run_evaluation.py data/indices/rpj_wiki/rpj_wiki.index # Wikipedia
|
python benchmarks/run_evaluation.py benchmarks/data/indices/rpj_wiki/rpj_wiki --num-queries 2000 # After downloading data, you can run the benchmark with our biggest index
|
||||||
```
|
```
|
||||||
|
|
||||||
The evaluation script downloads data automatically on first run. The last three results were tested with partial personal data, and you can reproduce them with your own data!
|
The evaluation script downloads data automatically on first run. The last three results were tested with partial personal data, and you can reproduce them with your own data!
|
||||||
@@ -441,10 +735,10 @@ If you find Leann useful, please cite:
|
|||||||
|
|
||||||
## ✨ [Detailed Features →](docs/features.md)
|
## ✨ [Detailed Features →](docs/features.md)
|
||||||
|
|
||||||
## 🤝 [Contributing →](docs/contributing.md)
|
## 🤝 [CONTRIBUTING →](docs/CONTRIBUTING.md)
|
||||||
|
|
||||||
|
|
||||||
## [FAQ →](docs/faq.md)
|
## ❓ [FAQ →](docs/faq.md)
|
||||||
|
|
||||||
|
|
||||||
## 📈 [Roadmap →](docs/roadmap.md)
|
## 📈 [Roadmap →](docs/roadmap.md)
|
||||||
@@ -455,9 +749,18 @@ MIT License - see [LICENSE](LICENSE) for details.
|
|||||||
|
|
||||||
## 🙏 Acknowledgments
|
## 🙏 Acknowledgments
|
||||||
|
|
||||||
This work is done at [**Berkeley Sky Computing Lab**](https://sky.cs.berkeley.edu/)
|
Core Contributors: [Yichuan Wang](https://yichuan-w.github.io/) & [Zhifei Li](https://github.com/andylizf).
|
||||||
---
|
|
||||||
|
|
||||||
|
Active Contributors: [Gabriel Dehan](https://github.com/gabriel-dehan)
|
||||||
|
|
||||||
|
|
||||||
|
We welcome more contributors! Feel free to open issues or submit PRs.
|
||||||
|
|
||||||
|
This work is done at [**Berkeley Sky Computing Lab**](https://sky.cs.berkeley.edu/).
|
||||||
|
|
||||||
|
## Star History
|
||||||
|
|
||||||
|
[](https://www.star-history.com/#yichuan-w/LEANN&Date)
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<strong>⭐ Star us on GitHub if Leann is useful for your research or applications!</strong>
|
<strong>⭐ Star us on GitHub if Leann is useful for your research or applications!</strong>
|
||||||
</p>
|
</p>
|
||||||
@@ -465,4 +768,3 @@ This work is done at [**Berkeley Sky Computing Lab**](https://sky.cs.berkeley.e
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
Made with ❤️ by the Leann team
|
Made with ❤️ by the Leann team
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|||||||
0
apps/__init__.py
Normal file
0
apps/__init__.py
Normal file
387
apps/base_rag_example.py
Normal file
387
apps/base_rag_example.py
Normal file
@@ -0,0 +1,387 @@
|
|||||||
|
"""
|
||||||
|
Base class for unified RAG examples interface.
|
||||||
|
Provides common parameters and functionality for all RAG examples.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import dotenv
|
||||||
|
from leann.api import LeannBuilder, LeannChat
|
||||||
|
from leann.registry import register_project_directory
|
||||||
|
from leann.settings import resolve_ollama_host, resolve_openai_api_key, resolve_openai_base_url
|
||||||
|
|
||||||
|
dotenv.load_dotenv()
|
||||||
|
|
||||||
|
|
||||||
|
class BaseRAGExample(ABC):
|
||||||
|
"""Base class for all RAG examples with unified interface."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
description: str,
|
||||||
|
default_index_name: str,
|
||||||
|
):
|
||||||
|
self.name = name
|
||||||
|
self.description = description
|
||||||
|
self.default_index_name = default_index_name
|
||||||
|
self.parser = self._create_parser()
|
||||||
|
|
||||||
|
def _create_parser(self) -> argparse.ArgumentParser:
|
||||||
|
"""Create argument parser with common parameters."""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=self.description, formatter_class=argparse.RawDescriptionHelpFormatter
|
||||||
|
)
|
||||||
|
|
||||||
|
# Core parameters (all examples share these)
|
||||||
|
core_group = parser.add_argument_group("Core Parameters")
|
||||||
|
core_group.add_argument(
|
||||||
|
"--index-dir",
|
||||||
|
type=str,
|
||||||
|
default=f"./{self.default_index_name}",
|
||||||
|
help=f"Directory to store the index (default: ./{self.default_index_name})",
|
||||||
|
)
|
||||||
|
core_group.add_argument(
|
||||||
|
"--query",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Query to run (if not provided, will run in interactive mode)",
|
||||||
|
)
|
||||||
|
# Allow subclasses to override default max_items
|
||||||
|
max_items_default = getattr(self, "max_items_default", -1)
|
||||||
|
core_group.add_argument(
|
||||||
|
"--max-items",
|
||||||
|
type=int,
|
||||||
|
default=max_items_default,
|
||||||
|
help="Maximum number of items to process -1 for all, means index all documents, and you should set it to a reasonable number if you have a large dataset and try at the first time)",
|
||||||
|
)
|
||||||
|
core_group.add_argument(
|
||||||
|
"--force-rebuild", action="store_true", help="Force rebuild index even if it exists"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Embedding parameters
|
||||||
|
embedding_group = parser.add_argument_group("Embedding Parameters")
|
||||||
|
# Allow subclasses to override default embedding_model
|
||||||
|
embedding_model_default = getattr(self, "embedding_model_default", "facebook/contriever")
|
||||||
|
embedding_group.add_argument(
|
||||||
|
"--embedding-model",
|
||||||
|
type=str,
|
||||||
|
default=embedding_model_default,
|
||||||
|
help=f"Embedding model to use (default: {embedding_model_default}), we provide facebook/contriever, text-embedding-3-small,mlx-community/Qwen3-Embedding-0.6B-8bit or nomic-embed-text",
|
||||||
|
)
|
||||||
|
embedding_group.add_argument(
|
||||||
|
"--embedding-mode",
|
||||||
|
type=str,
|
||||||
|
default="sentence-transformers",
|
||||||
|
choices=["sentence-transformers", "openai", "mlx", "ollama"],
|
||||||
|
help="Embedding backend mode (default: sentence-transformers), we provide sentence-transformers, openai, mlx, or ollama",
|
||||||
|
)
|
||||||
|
embedding_group.add_argument(
|
||||||
|
"--embedding-host",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Override Ollama-compatible embedding host",
|
||||||
|
)
|
||||||
|
embedding_group.add_argument(
|
||||||
|
"--embedding-api-base",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Base URL for OpenAI-compatible embedding services",
|
||||||
|
)
|
||||||
|
embedding_group.add_argument(
|
||||||
|
"--embedding-api-key",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="API key for embedding service (defaults to OPENAI_API_KEY)",
|
||||||
|
)
|
||||||
|
|
||||||
|
# LLM parameters
|
||||||
|
llm_group = parser.add_argument_group("LLM Parameters")
|
||||||
|
llm_group.add_argument(
|
||||||
|
"--llm",
|
||||||
|
type=str,
|
||||||
|
default="openai",
|
||||||
|
choices=["openai", "ollama", "hf", "simulated"],
|
||||||
|
help="LLM backend: openai, ollama, or hf (default: openai)",
|
||||||
|
)
|
||||||
|
llm_group.add_argument(
|
||||||
|
"--llm-model",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Model name (default: gpt-4o) e.g., gpt-4o-mini, llama3.2:1b, Qwen/Qwen2.5-1.5B-Instruct",
|
||||||
|
)
|
||||||
|
llm_group.add_argument(
|
||||||
|
"--llm-host",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Host for Ollama-compatible APIs (defaults to LEANN_OLLAMA_HOST/OLLAMA_HOST)",
|
||||||
|
)
|
||||||
|
llm_group.add_argument(
|
||||||
|
"--thinking-budget",
|
||||||
|
type=str,
|
||||||
|
choices=["low", "medium", "high"],
|
||||||
|
default=None,
|
||||||
|
help="Thinking budget for reasoning models (low/medium/high). Supported by GPT-Oss:20b and other reasoning models.",
|
||||||
|
)
|
||||||
|
llm_group.add_argument(
|
||||||
|
"--llm-api-base",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Base URL for OpenAI-compatible APIs",
|
||||||
|
)
|
||||||
|
llm_group.add_argument(
|
||||||
|
"--llm-api-key",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="API key for OpenAI-compatible APIs (defaults to OPENAI_API_KEY)",
|
||||||
|
)
|
||||||
|
|
||||||
|
# AST Chunking parameters
|
||||||
|
ast_group = parser.add_argument_group("AST Chunking Parameters")
|
||||||
|
ast_group.add_argument(
|
||||||
|
"--use-ast-chunking",
|
||||||
|
action="store_true",
|
||||||
|
help="Enable AST-aware chunking for code files (requires astchunk)",
|
||||||
|
)
|
||||||
|
ast_group.add_argument(
|
||||||
|
"--ast-chunk-size",
|
||||||
|
type=int,
|
||||||
|
default=512,
|
||||||
|
help="Maximum characters per AST chunk (default: 512)",
|
||||||
|
)
|
||||||
|
ast_group.add_argument(
|
||||||
|
"--ast-chunk-overlap",
|
||||||
|
type=int,
|
||||||
|
default=64,
|
||||||
|
help="Overlap between AST chunks (default: 64)",
|
||||||
|
)
|
||||||
|
ast_group.add_argument(
|
||||||
|
"--code-file-extensions",
|
||||||
|
nargs="+",
|
||||||
|
default=None,
|
||||||
|
help="Additional code file extensions to process with AST chunking (e.g., .py .java .cs .ts)",
|
||||||
|
)
|
||||||
|
ast_group.add_argument(
|
||||||
|
"--ast-fallback-traditional",
|
||||||
|
action="store_true",
|
||||||
|
default=True,
|
||||||
|
help="Fall back to traditional chunking if AST chunking fails (default: True)",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Search parameters
|
||||||
|
search_group = parser.add_argument_group("Search Parameters")
|
||||||
|
search_group.add_argument(
|
||||||
|
"--top-k", type=int, default=20, help="Number of results to retrieve (default: 20)"
|
||||||
|
)
|
||||||
|
search_group.add_argument(
|
||||||
|
"--search-complexity",
|
||||||
|
type=int,
|
||||||
|
default=32,
|
||||||
|
help="Search complexity for graph traversal (default: 64)",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Index building parameters
|
||||||
|
index_group = parser.add_argument_group("Index Building Parameters")
|
||||||
|
index_group.add_argument(
|
||||||
|
"--backend-name",
|
||||||
|
type=str,
|
||||||
|
default="hnsw",
|
||||||
|
choices=["hnsw", "diskann"],
|
||||||
|
help="Backend to use for index (default: hnsw)",
|
||||||
|
)
|
||||||
|
index_group.add_argument(
|
||||||
|
"--graph-degree",
|
||||||
|
type=int,
|
||||||
|
default=32,
|
||||||
|
help="Graph degree for index construction (default: 32)",
|
||||||
|
)
|
||||||
|
index_group.add_argument(
|
||||||
|
"--build-complexity",
|
||||||
|
type=int,
|
||||||
|
default=64,
|
||||||
|
help="Build complexity for index construction (default: 64)",
|
||||||
|
)
|
||||||
|
index_group.add_argument(
|
||||||
|
"--no-compact",
|
||||||
|
action="store_true",
|
||||||
|
help="Disable compact index storage",
|
||||||
|
)
|
||||||
|
index_group.add_argument(
|
||||||
|
"--no-recompute",
|
||||||
|
action="store_true",
|
||||||
|
help="Disable embedding recomputation",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add source-specific parameters
|
||||||
|
self._add_specific_arguments(parser)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _add_specific_arguments(self, parser: argparse.ArgumentParser):
|
||||||
|
"""Add source-specific arguments. Override in subclasses."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def load_data(self, args) -> list[str]:
|
||||||
|
"""Load data from the source. Returns list of text chunks."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_llm_config(self, args) -> dict[str, Any]:
|
||||||
|
"""Get LLM configuration based on arguments."""
|
||||||
|
config = {"type": args.llm}
|
||||||
|
|
||||||
|
if args.llm == "openai":
|
||||||
|
config["model"] = args.llm_model or "gpt-4o"
|
||||||
|
config["base_url"] = resolve_openai_base_url(args.llm_api_base)
|
||||||
|
resolved_key = resolve_openai_api_key(args.llm_api_key)
|
||||||
|
if resolved_key:
|
||||||
|
config["api_key"] = resolved_key
|
||||||
|
elif args.llm == "ollama":
|
||||||
|
config["model"] = args.llm_model or "llama3.2:1b"
|
||||||
|
config["host"] = resolve_ollama_host(args.llm_host)
|
||||||
|
elif args.llm == "hf":
|
||||||
|
config["model"] = args.llm_model or "Qwen/Qwen2.5-1.5B-Instruct"
|
||||||
|
elif args.llm == "simulated":
|
||||||
|
# Simulated LLM doesn't need additional configuration
|
||||||
|
pass
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
|
async def build_index(self, args, texts: list[str]) -> str:
|
||||||
|
"""Build LEANN index from texts."""
|
||||||
|
index_path = str(Path(args.index_dir) / f"{self.default_index_name}.leann")
|
||||||
|
|
||||||
|
print(f"\n[Building Index] Creating {self.name} index...")
|
||||||
|
print(f"Total text chunks: {len(texts)}")
|
||||||
|
|
||||||
|
embedding_options: dict[str, Any] = {}
|
||||||
|
if args.embedding_mode == "ollama":
|
||||||
|
embedding_options["host"] = resolve_ollama_host(args.embedding_host)
|
||||||
|
elif args.embedding_mode == "openai":
|
||||||
|
embedding_options["base_url"] = resolve_openai_base_url(args.embedding_api_base)
|
||||||
|
resolved_embedding_key = resolve_openai_api_key(args.embedding_api_key)
|
||||||
|
if resolved_embedding_key:
|
||||||
|
embedding_options["api_key"] = resolved_embedding_key
|
||||||
|
|
||||||
|
builder = LeannBuilder(
|
||||||
|
backend_name=args.backend_name,
|
||||||
|
embedding_model=args.embedding_model,
|
||||||
|
embedding_mode=args.embedding_mode,
|
||||||
|
embedding_options=embedding_options or None,
|
||||||
|
graph_degree=args.graph_degree,
|
||||||
|
complexity=args.build_complexity,
|
||||||
|
is_compact=not args.no_compact,
|
||||||
|
is_recompute=not args.no_recompute,
|
||||||
|
num_threads=1, # Force single-threaded mode
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add texts in batches for better progress tracking
|
||||||
|
batch_size = 1000
|
||||||
|
for i in range(0, len(texts), batch_size):
|
||||||
|
batch = texts[i : i + batch_size]
|
||||||
|
for text in batch:
|
||||||
|
builder.add_text(text)
|
||||||
|
print(f"Added {min(i + batch_size, len(texts))}/{len(texts)} texts...")
|
||||||
|
|
||||||
|
print("Building index structure...")
|
||||||
|
builder.build_index(index_path)
|
||||||
|
print(f"Index saved to: {index_path}")
|
||||||
|
|
||||||
|
# Register project directory so leann list can discover this index
|
||||||
|
# The index is saved as args.index_dir/index_name.leann
|
||||||
|
# We want to register the current working directory where the app is run
|
||||||
|
register_project_directory(Path.cwd())
|
||||||
|
|
||||||
|
return index_path
|
||||||
|
|
||||||
|
async def run_interactive_chat(self, args, index_path: str):
|
||||||
|
"""Run interactive chat with the index."""
|
||||||
|
chat = LeannChat(
|
||||||
|
index_path,
|
||||||
|
llm_config=self.get_llm_config(args),
|
||||||
|
system_prompt=f"You are a helpful assistant that answers questions about {self.name} data.",
|
||||||
|
complexity=args.search_complexity,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"\n[Interactive Mode] Chat with your {self.name} data!")
|
||||||
|
print("Type 'quit' or 'exit' to stop.\n")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
query = input("You: ").strip()
|
||||||
|
if query.lower() in ["quit", "exit", "q"]:
|
||||||
|
print("Goodbye!")
|
||||||
|
break
|
||||||
|
|
||||||
|
if not query:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Prepare LLM kwargs with thinking budget if specified
|
||||||
|
llm_kwargs = {}
|
||||||
|
if hasattr(args, "thinking_budget") and args.thinking_budget:
|
||||||
|
llm_kwargs["thinking_budget"] = args.thinking_budget
|
||||||
|
|
||||||
|
response = chat.ask(
|
||||||
|
query,
|
||||||
|
top_k=args.top_k,
|
||||||
|
complexity=args.search_complexity,
|
||||||
|
llm_kwargs=llm_kwargs,
|
||||||
|
)
|
||||||
|
print(f"\nAssistant: {response}\n")
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\nGoodbye!")
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}")
|
||||||
|
|
||||||
|
async def run_single_query(self, args, index_path: str, query: str):
|
||||||
|
"""Run a single query against the index."""
|
||||||
|
chat = LeannChat(
|
||||||
|
index_path,
|
||||||
|
llm_config=self.get_llm_config(args),
|
||||||
|
complexity=args.search_complexity,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"\n[Query]: \033[36m{query}\033[0m")
|
||||||
|
|
||||||
|
# Prepare LLM kwargs with thinking budget if specified
|
||||||
|
llm_kwargs = {}
|
||||||
|
if hasattr(args, "thinking_budget") and args.thinking_budget:
|
||||||
|
llm_kwargs["thinking_budget"] = args.thinking_budget
|
||||||
|
|
||||||
|
response = chat.ask(
|
||||||
|
query, top_k=args.top_k, complexity=args.search_complexity, llm_kwargs=llm_kwargs
|
||||||
|
)
|
||||||
|
print(f"\n[Response]: \033[36m{response}\033[0m")
|
||||||
|
|
||||||
|
async def run(self):
|
||||||
|
"""Main entry point for the example."""
|
||||||
|
args = self.parser.parse_args()
|
||||||
|
|
||||||
|
# Check if index exists
|
||||||
|
index_path = str(Path(args.index_dir) / f"{self.default_index_name}.leann")
|
||||||
|
index_exists = Path(args.index_dir).exists()
|
||||||
|
|
||||||
|
if not index_exists or args.force_rebuild:
|
||||||
|
# Load data and build index
|
||||||
|
print(f"\n{'Rebuilding' if index_exists else 'Building'} index...")
|
||||||
|
texts = await self.load_data(args)
|
||||||
|
|
||||||
|
if not texts:
|
||||||
|
print("No data found to index!")
|
||||||
|
return
|
||||||
|
|
||||||
|
index_path = await self.build_index(args, texts)
|
||||||
|
else:
|
||||||
|
print(f"\nUsing existing index in {args.index_dir}")
|
||||||
|
|
||||||
|
# Run query or interactive mode
|
||||||
|
if args.query:
|
||||||
|
await self.run_single_query(args, index_path, args.query)
|
||||||
|
else:
|
||||||
|
await self.run_interactive_chat(args, index_path)
|
||||||
171
apps/browser_rag.py
Normal file
171
apps/browser_rag.py
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
"""
|
||||||
|
Browser History RAG example using the unified interface.
|
||||||
|
Supports Chrome browser history.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add parent directory to path for imports
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent))
|
||||||
|
|
||||||
|
from base_rag_example import BaseRAGExample
|
||||||
|
from chunking import create_text_chunks
|
||||||
|
|
||||||
|
from .history_data.history import ChromeHistoryReader
|
||||||
|
|
||||||
|
|
||||||
|
class BrowserRAG(BaseRAGExample):
|
||||||
|
"""RAG example for Chrome browser history."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
# Set default values BEFORE calling super().__init__
|
||||||
|
self.embedding_model_default = (
|
||||||
|
"sentence-transformers/all-MiniLM-L6-v2" # Fast 384-dim model
|
||||||
|
)
|
||||||
|
|
||||||
|
super().__init__(
|
||||||
|
name="Browser History",
|
||||||
|
description="Process and query Chrome browser history with LEANN",
|
||||||
|
default_index_name="google_history_index",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _add_specific_arguments(self, parser):
|
||||||
|
"""Add browser-specific arguments."""
|
||||||
|
browser_group = parser.add_argument_group("Browser Parameters")
|
||||||
|
browser_group.add_argument(
|
||||||
|
"--chrome-profile",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Path to Chrome profile directory (auto-detected if not specified)",
|
||||||
|
)
|
||||||
|
browser_group.add_argument(
|
||||||
|
"--auto-find-profiles",
|
||||||
|
action="store_true",
|
||||||
|
default=True,
|
||||||
|
help="Automatically find all Chrome profiles (default: True)",
|
||||||
|
)
|
||||||
|
browser_group.add_argument(
|
||||||
|
"--chunk-size", type=int, default=256, help="Text chunk size (default: 256)"
|
||||||
|
)
|
||||||
|
browser_group.add_argument(
|
||||||
|
"--chunk-overlap", type=int, default=128, help="Text chunk overlap (default: 128)"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_chrome_base_path(self) -> Path:
|
||||||
|
"""Get the base Chrome profile path based on OS."""
|
||||||
|
if sys.platform == "darwin":
|
||||||
|
return Path.home() / "Library" / "Application Support" / "Google" / "Chrome"
|
||||||
|
elif sys.platform.startswith("linux"):
|
||||||
|
return Path.home() / ".config" / "google-chrome"
|
||||||
|
elif sys.platform == "win32":
|
||||||
|
return Path(os.environ["LOCALAPPDATA"]) / "Google" / "Chrome" / "User Data"
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported platform: {sys.platform}")
|
||||||
|
|
||||||
|
def _find_chrome_profiles(self) -> list[Path]:
|
||||||
|
"""Auto-detect all Chrome profiles."""
|
||||||
|
base_path = self._get_chrome_base_path()
|
||||||
|
if not base_path.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
profiles = []
|
||||||
|
|
||||||
|
# Check Default profile
|
||||||
|
default_profile = base_path / "Default"
|
||||||
|
if default_profile.exists() and (default_profile / "History").exists():
|
||||||
|
profiles.append(default_profile)
|
||||||
|
|
||||||
|
# Check numbered profiles
|
||||||
|
for item in base_path.iterdir():
|
||||||
|
if item.is_dir() and item.name.startswith("Profile "):
|
||||||
|
if (item / "History").exists():
|
||||||
|
profiles.append(item)
|
||||||
|
|
||||||
|
return profiles
|
||||||
|
|
||||||
|
async def load_data(self, args) -> list[str]:
|
||||||
|
"""Load browser history and convert to text chunks."""
|
||||||
|
# Determine Chrome profiles
|
||||||
|
if args.chrome_profile and not args.auto_find_profiles:
|
||||||
|
profile_dirs = [Path(args.chrome_profile)]
|
||||||
|
else:
|
||||||
|
print("Auto-detecting Chrome profiles...")
|
||||||
|
profile_dirs = self._find_chrome_profiles()
|
||||||
|
|
||||||
|
# If specific profile given, filter to just that one
|
||||||
|
if args.chrome_profile:
|
||||||
|
profile_path = Path(args.chrome_profile)
|
||||||
|
profile_dirs = [p for p in profile_dirs if p == profile_path]
|
||||||
|
|
||||||
|
if not profile_dirs:
|
||||||
|
print("No Chrome profiles found!")
|
||||||
|
print("Please specify --chrome-profile manually")
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"Found {len(profile_dirs)} Chrome profiles")
|
||||||
|
|
||||||
|
# Create reader
|
||||||
|
reader = ChromeHistoryReader()
|
||||||
|
|
||||||
|
# Process each profile
|
||||||
|
all_documents = []
|
||||||
|
total_processed = 0
|
||||||
|
|
||||||
|
for i, profile_dir in enumerate(profile_dirs):
|
||||||
|
print(f"\nProcessing profile {i + 1}/{len(profile_dirs)}: {profile_dir.name}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Apply max_items limit per profile
|
||||||
|
max_per_profile = -1
|
||||||
|
if args.max_items > 0:
|
||||||
|
remaining = args.max_items - total_processed
|
||||||
|
if remaining <= 0:
|
||||||
|
break
|
||||||
|
max_per_profile = remaining
|
||||||
|
|
||||||
|
# Load history
|
||||||
|
documents = reader.load_data(
|
||||||
|
chrome_profile_path=str(profile_dir),
|
||||||
|
max_count=max_per_profile,
|
||||||
|
)
|
||||||
|
|
||||||
|
if documents:
|
||||||
|
all_documents.extend(documents)
|
||||||
|
total_processed += len(documents)
|
||||||
|
print(f"Processed {len(documents)} history entries from this profile")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error processing {profile_dir}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not all_documents:
|
||||||
|
print("No browser history found to process!")
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"\nTotal history entries processed: {len(all_documents)}")
|
||||||
|
|
||||||
|
# Convert to text chunks
|
||||||
|
all_texts = create_text_chunks(
|
||||||
|
all_documents, chunk_size=args.chunk_size, chunk_overlap=args.chunk_overlap
|
||||||
|
)
|
||||||
|
|
||||||
|
return all_texts
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
# Example queries for browser history RAG
|
||||||
|
print("\n🌐 Browser History RAG Example")
|
||||||
|
print("=" * 50)
|
||||||
|
print("\nExample queries you can try:")
|
||||||
|
print("- 'What websites did I visit about machine learning?'")
|
||||||
|
print("- 'Find my search history about programming'")
|
||||||
|
print("- 'What YouTube videos did I watch recently?'")
|
||||||
|
print("- 'Show me websites about travel planning'")
|
||||||
|
print("\nNote: Make sure Chrome is closed before running\n")
|
||||||
|
|
||||||
|
rag = BrowserRAG()
|
||||||
|
asyncio.run(rag.run())
|
||||||
44
apps/chunking/__init__.py
Normal file
44
apps/chunking/__init__.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
"""Unified chunking utilities facade.
|
||||||
|
|
||||||
|
This module re-exports the packaged utilities from `leann.chunking_utils` so
|
||||||
|
that both repo apps (importing `chunking`) and installed wheels share one
|
||||||
|
single implementation. When running from the repo without installation, it
|
||||||
|
adds the `packages/leann-core/src` directory to `sys.path` as a fallback.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
try:
|
||||||
|
from leann.chunking_utils import (
|
||||||
|
CODE_EXTENSIONS,
|
||||||
|
create_ast_chunks,
|
||||||
|
create_text_chunks,
|
||||||
|
create_traditional_chunks,
|
||||||
|
detect_code_files,
|
||||||
|
get_language_from_extension,
|
||||||
|
)
|
||||||
|
except Exception: # pragma: no cover - best-effort fallback for dev environment
|
||||||
|
repo_root = Path(__file__).resolve().parents[2]
|
||||||
|
leann_src = repo_root / "packages" / "leann-core" / "src"
|
||||||
|
if leann_src.exists():
|
||||||
|
sys.path.insert(0, str(leann_src))
|
||||||
|
from leann.chunking_utils import (
|
||||||
|
CODE_EXTENSIONS,
|
||||||
|
create_ast_chunks,
|
||||||
|
create_text_chunks,
|
||||||
|
create_traditional_chunks,
|
||||||
|
detect_code_files,
|
||||||
|
get_language_from_extension,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"CODE_EXTENSIONS",
|
||||||
|
"create_ast_chunks",
|
||||||
|
"create_text_chunks",
|
||||||
|
"create_traditional_chunks",
|
||||||
|
"detect_code_files",
|
||||||
|
"get_language_from_extension",
|
||||||
|
]
|
||||||
211
apps/code_rag.py
Normal file
211
apps/code_rag.py
Normal file
@@ -0,0 +1,211 @@
|
|||||||
|
"""
|
||||||
|
Code RAG example using AST-aware chunking for optimal code understanding.
|
||||||
|
Specialized for code repositories with automatic language detection and
|
||||||
|
optimized chunking parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add parent directory to path for imports
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent))
|
||||||
|
|
||||||
|
from base_rag_example import BaseRAGExample
|
||||||
|
from chunking import CODE_EXTENSIONS, create_text_chunks
|
||||||
|
from llama_index.core import SimpleDirectoryReader
|
||||||
|
|
||||||
|
|
||||||
|
class CodeRAG(BaseRAGExample):
|
||||||
|
"""Specialized RAG example for code repositories with AST-aware chunking."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
name="Code",
|
||||||
|
description="Process and query code repositories with AST-aware chunking",
|
||||||
|
default_index_name="code_index",
|
||||||
|
)
|
||||||
|
# Override defaults for code-specific usage
|
||||||
|
self.embedding_model_default = "facebook/contriever" # Good for code
|
||||||
|
self.max_items_default = -1 # Process all code files by default
|
||||||
|
|
||||||
|
def _add_specific_arguments(self, parser):
|
||||||
|
"""Add code-specific arguments."""
|
||||||
|
code_group = parser.add_argument_group("Code Repository Parameters")
|
||||||
|
|
||||||
|
code_group.add_argument(
|
||||||
|
"--repo-dir",
|
||||||
|
type=str,
|
||||||
|
default=".",
|
||||||
|
help="Code repository directory to index (default: current directory)",
|
||||||
|
)
|
||||||
|
code_group.add_argument(
|
||||||
|
"--include-extensions",
|
||||||
|
nargs="+",
|
||||||
|
default=list(CODE_EXTENSIONS.keys()),
|
||||||
|
help="File extensions to include (default: supported code extensions)",
|
||||||
|
)
|
||||||
|
code_group.add_argument(
|
||||||
|
"--exclude-dirs",
|
||||||
|
nargs="+",
|
||||||
|
default=[
|
||||||
|
".git",
|
||||||
|
"__pycache__",
|
||||||
|
"node_modules",
|
||||||
|
"venv",
|
||||||
|
".venv",
|
||||||
|
"build",
|
||||||
|
"dist",
|
||||||
|
"target",
|
||||||
|
],
|
||||||
|
help="Directories to exclude from indexing",
|
||||||
|
)
|
||||||
|
code_group.add_argument(
|
||||||
|
"--max-file-size",
|
||||||
|
type=int,
|
||||||
|
default=1000000, # 1MB
|
||||||
|
help="Maximum file size in bytes to process (default: 1MB)",
|
||||||
|
)
|
||||||
|
code_group.add_argument(
|
||||||
|
"--include-comments",
|
||||||
|
action="store_true",
|
||||||
|
help="Include comments in chunking (useful for documentation)",
|
||||||
|
)
|
||||||
|
code_group.add_argument(
|
||||||
|
"--preserve-imports",
|
||||||
|
action="store_true",
|
||||||
|
default=True,
|
||||||
|
help="Try to preserve import statements in chunks (default: True)",
|
||||||
|
)
|
||||||
|
|
||||||
|
async def load_data(self, args) -> list[str]:
|
||||||
|
"""Load code files and convert to AST-aware chunks."""
|
||||||
|
print(f"🔍 Scanning code repository: {args.repo_dir}")
|
||||||
|
print(f"📁 Including extensions: {args.include_extensions}")
|
||||||
|
print(f"🚫 Excluding directories: {args.exclude_dirs}")
|
||||||
|
|
||||||
|
# Check if repository directory exists
|
||||||
|
repo_path = Path(args.repo_dir)
|
||||||
|
if not repo_path.exists():
|
||||||
|
raise ValueError(f"Repository directory not found: {args.repo_dir}")
|
||||||
|
|
||||||
|
# Load code files with filtering
|
||||||
|
reader_kwargs = {
|
||||||
|
"recursive": True,
|
||||||
|
"encoding": "utf-8",
|
||||||
|
"required_exts": args.include_extensions,
|
||||||
|
"exclude_hidden": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create exclusion filter
|
||||||
|
def file_filter(file_path: str) -> bool:
|
||||||
|
"""Filter out unwanted files and directories."""
|
||||||
|
path = Path(file_path)
|
||||||
|
|
||||||
|
# Check file size
|
||||||
|
try:
|
||||||
|
if path.stat().st_size > args.max_file_size:
|
||||||
|
print(f"⚠️ Skipping large file: {path.name} ({path.stat().st_size} bytes)")
|
||||||
|
return False
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check if in excluded directory
|
||||||
|
for exclude_dir in args.exclude_dirs:
|
||||||
|
if exclude_dir in path.parts:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Load documents with file filtering
|
||||||
|
documents = SimpleDirectoryReader(
|
||||||
|
args.repo_dir,
|
||||||
|
file_extractor=None, # Use default extractors
|
||||||
|
**reader_kwargs,
|
||||||
|
).load_data(show_progress=True)
|
||||||
|
|
||||||
|
# Apply custom filtering
|
||||||
|
filtered_docs = []
|
||||||
|
for doc in documents:
|
||||||
|
file_path = doc.metadata.get("file_path", "")
|
||||||
|
if file_filter(file_path):
|
||||||
|
filtered_docs.append(doc)
|
||||||
|
|
||||||
|
documents = filtered_docs
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ Error loading code files: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
if not documents:
|
||||||
|
print(
|
||||||
|
f"❌ No code files found in {args.repo_dir} with extensions {args.include_extensions}"
|
||||||
|
)
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"✅ Loaded {len(documents)} code files")
|
||||||
|
|
||||||
|
# Show breakdown by language/extension
|
||||||
|
ext_counts = {}
|
||||||
|
for doc in documents:
|
||||||
|
file_path = doc.metadata.get("file_path", "")
|
||||||
|
if file_path:
|
||||||
|
ext = Path(file_path).suffix.lower()
|
||||||
|
ext_counts[ext] = ext_counts.get(ext, 0) + 1
|
||||||
|
|
||||||
|
print("📊 Files by extension:")
|
||||||
|
for ext, count in sorted(ext_counts.items()):
|
||||||
|
print(f" {ext}: {count} files")
|
||||||
|
|
||||||
|
# Use AST-aware chunking by default for code
|
||||||
|
print(
|
||||||
|
f"🧠 Using AST-aware chunking (chunk_size: {args.ast_chunk_size}, overlap: {args.ast_chunk_overlap})"
|
||||||
|
)
|
||||||
|
|
||||||
|
all_texts = create_text_chunks(
|
||||||
|
documents,
|
||||||
|
chunk_size=256, # Fallback for non-code files
|
||||||
|
chunk_overlap=64,
|
||||||
|
use_ast_chunking=True, # Always use AST for code RAG
|
||||||
|
ast_chunk_size=args.ast_chunk_size,
|
||||||
|
ast_chunk_overlap=args.ast_chunk_overlap,
|
||||||
|
code_file_extensions=args.include_extensions,
|
||||||
|
ast_fallback_traditional=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply max_items limit if specified
|
||||||
|
if args.max_items > 0 and len(all_texts) > args.max_items:
|
||||||
|
print(f"⏳ Limiting to {args.max_items} chunks (from {len(all_texts)})")
|
||||||
|
all_texts = all_texts[: args.max_items]
|
||||||
|
|
||||||
|
print(f"✅ Generated {len(all_texts)} code chunks")
|
||||||
|
return all_texts
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
# Example queries for code RAG
|
||||||
|
print("\n💻 Code RAG Example")
|
||||||
|
print("=" * 50)
|
||||||
|
print("\nExample queries you can try:")
|
||||||
|
print("- 'How does the embedding computation work?'")
|
||||||
|
print("- 'What are the main classes in this codebase?'")
|
||||||
|
print("- 'Show me the search implementation'")
|
||||||
|
print("- 'How is error handling implemented?'")
|
||||||
|
print("- 'What design patterns are used?'")
|
||||||
|
print("- 'Explain the chunking logic'")
|
||||||
|
print("\n🚀 Features:")
|
||||||
|
print("- ✅ AST-aware chunking preserves code structure")
|
||||||
|
print("- ✅ Automatic language detection")
|
||||||
|
print("- ✅ Smart filtering of large files and common excludes")
|
||||||
|
print("- ✅ Optimized for code understanding")
|
||||||
|
print("\nUsage examples:")
|
||||||
|
print(" python -m apps.code_rag --repo-dir ./my_project")
|
||||||
|
print(
|
||||||
|
" python -m apps.code_rag --include-extensions .py .js --query 'How does authentication work?'"
|
||||||
|
)
|
||||||
|
print("\nOr run without --query for interactive mode\n")
|
||||||
|
|
||||||
|
rag = CodeRAG()
|
||||||
|
asyncio.run(rag.run())
|
||||||
131
apps/document_rag.py
Normal file
131
apps/document_rag.py
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
"""
|
||||||
|
Document RAG example using the unified interface.
|
||||||
|
Supports PDF, TXT, MD, and other document formats.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add parent directory to path for imports
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent))
|
||||||
|
|
||||||
|
from base_rag_example import BaseRAGExample
|
||||||
|
from chunking import create_text_chunks
|
||||||
|
from llama_index.core import SimpleDirectoryReader
|
||||||
|
|
||||||
|
|
||||||
|
class DocumentRAG(BaseRAGExample):
|
||||||
|
"""RAG example for document processing (PDF, TXT, MD, etc.)."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
name="Document",
|
||||||
|
description="Process and query documents (PDF, TXT, MD, etc.) with LEANN",
|
||||||
|
default_index_name="test_doc_files",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _add_specific_arguments(self, parser):
|
||||||
|
"""Add document-specific arguments."""
|
||||||
|
doc_group = parser.add_argument_group("Document Parameters")
|
||||||
|
doc_group.add_argument(
|
||||||
|
"--data-dir",
|
||||||
|
type=str,
|
||||||
|
default="data",
|
||||||
|
help="Directory containing documents to index (default: data)",
|
||||||
|
)
|
||||||
|
doc_group.add_argument(
|
||||||
|
"--file-types",
|
||||||
|
nargs="+",
|
||||||
|
default=None,
|
||||||
|
help="Filter by file types (e.g., .pdf .txt .md). If not specified, all supported types are processed",
|
||||||
|
)
|
||||||
|
doc_group.add_argument(
|
||||||
|
"--chunk-size", type=int, default=256, help="Text chunk size (default: 256)"
|
||||||
|
)
|
||||||
|
doc_group.add_argument(
|
||||||
|
"--chunk-overlap", type=int, default=128, help="Text chunk overlap (default: 128)"
|
||||||
|
)
|
||||||
|
doc_group.add_argument(
|
||||||
|
"--enable-code-chunking",
|
||||||
|
action="store_true",
|
||||||
|
help="Enable AST-aware chunking for code files in the data directory",
|
||||||
|
)
|
||||||
|
|
||||||
|
async def load_data(self, args) -> list[str]:
|
||||||
|
"""Load documents and convert to text chunks."""
|
||||||
|
print(f"Loading documents from: {args.data_dir}")
|
||||||
|
if args.file_types:
|
||||||
|
print(f"Filtering by file types: {args.file_types}")
|
||||||
|
else:
|
||||||
|
print("Processing all supported file types")
|
||||||
|
|
||||||
|
# Check if data directory exists
|
||||||
|
data_path = Path(args.data_dir)
|
||||||
|
if not data_path.exists():
|
||||||
|
raise ValueError(f"Data directory not found: {args.data_dir}")
|
||||||
|
|
||||||
|
# Load documents
|
||||||
|
reader_kwargs = {
|
||||||
|
"recursive": True,
|
||||||
|
"encoding": "utf-8",
|
||||||
|
}
|
||||||
|
if args.file_types:
|
||||||
|
reader_kwargs["required_exts"] = args.file_types
|
||||||
|
|
||||||
|
documents = SimpleDirectoryReader(args.data_dir, **reader_kwargs).load_data(
|
||||||
|
show_progress=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not documents:
|
||||||
|
print(f"No documents found in {args.data_dir} with extensions {args.file_types}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"Loaded {len(documents)} documents")
|
||||||
|
|
||||||
|
# Determine chunking strategy
|
||||||
|
use_ast = args.enable_code_chunking or getattr(args, "use_ast_chunking", False)
|
||||||
|
|
||||||
|
if use_ast:
|
||||||
|
print("Using AST-aware chunking for code files")
|
||||||
|
|
||||||
|
# Convert to text chunks with optional AST support
|
||||||
|
all_texts = create_text_chunks(
|
||||||
|
documents,
|
||||||
|
chunk_size=args.chunk_size,
|
||||||
|
chunk_overlap=args.chunk_overlap,
|
||||||
|
use_ast_chunking=use_ast,
|
||||||
|
ast_chunk_size=getattr(args, "ast_chunk_size", 512),
|
||||||
|
ast_chunk_overlap=getattr(args, "ast_chunk_overlap", 64),
|
||||||
|
code_file_extensions=getattr(args, "code_file_extensions", None),
|
||||||
|
ast_fallback_traditional=getattr(args, "ast_fallback_traditional", True),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply max_items limit if specified
|
||||||
|
if args.max_items > 0 and len(all_texts) > args.max_items:
|
||||||
|
print(f"Limiting to {args.max_items} chunks (from {len(all_texts)})")
|
||||||
|
all_texts = all_texts[: args.max_items]
|
||||||
|
|
||||||
|
return all_texts
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
# Example queries for document RAG
|
||||||
|
print("\n📄 Document RAG Example")
|
||||||
|
print("=" * 50)
|
||||||
|
print("\nExample queries you can try:")
|
||||||
|
print("- 'What are the main techniques LEANN uses?'")
|
||||||
|
print("- 'What is the technique DLPM?'")
|
||||||
|
print("- 'Who does Elizabeth Bennet marry?'")
|
||||||
|
print(
|
||||||
|
"- 'What is the problem of developing pan gu model Huawei meets? (盘古大模型开发中遇到什么问题?)'"
|
||||||
|
)
|
||||||
|
print("\n🚀 NEW: Code-aware chunking available!")
|
||||||
|
print("- Use --enable-code-chunking to enable AST-aware chunking for code files")
|
||||||
|
print("- Supports Python, Java, C#, TypeScript files")
|
||||||
|
print("- Better semantic understanding of code structure")
|
||||||
|
print("\nOr run without --query for interactive mode\n")
|
||||||
|
|
||||||
|
rag = DocumentRAG()
|
||||||
|
asyncio.run(rag.run())
|
||||||
167
apps/email_data/LEANN_email_reader.py
Normal file
167
apps/email_data/LEANN_email_reader.py
Normal file
@@ -0,0 +1,167 @@
|
|||||||
|
import email
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from llama_index.core import Document
|
||||||
|
from llama_index.core.readers.base import BaseReader
|
||||||
|
|
||||||
|
|
||||||
|
def find_all_messages_directories(root: str | None = None) -> list[Path]:
|
||||||
|
"""
|
||||||
|
Recursively find all 'Messages' directories under the given root.
|
||||||
|
Returns a list of Path objects.
|
||||||
|
"""
|
||||||
|
if root is None:
|
||||||
|
# Auto-detect user's mail path
|
||||||
|
home_dir = os.path.expanduser("~")
|
||||||
|
root = os.path.join(home_dir, "Library", "Mail")
|
||||||
|
|
||||||
|
messages_dirs = []
|
||||||
|
for dirpath, _dirnames, _filenames in os.walk(root):
|
||||||
|
if os.path.basename(dirpath) == "Messages":
|
||||||
|
messages_dirs.append(Path(dirpath))
|
||||||
|
return messages_dirs
|
||||||
|
|
||||||
|
|
||||||
|
class EmlxReader(BaseReader):
|
||||||
|
"""
|
||||||
|
Apple Mail .emlx file reader with embedded metadata.
|
||||||
|
|
||||||
|
Reads individual .emlx files from Apple Mail's storage format.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, include_html: bool = False) -> None:
|
||||||
|
"""
|
||||||
|
Initialize.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
include_html: Whether to include HTML content in the email body (default: False)
|
||||||
|
"""
|
||||||
|
self.include_html = include_html
|
||||||
|
|
||||||
|
def load_data(self, input_dir: str, **load_kwargs: Any) -> list[Document]:
|
||||||
|
"""
|
||||||
|
Load data from the input directory containing .emlx files.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_dir: Directory containing .emlx files
|
||||||
|
**load_kwargs:
|
||||||
|
max_count (int): Maximum amount of messages to read.
|
||||||
|
"""
|
||||||
|
docs: list[Document] = []
|
||||||
|
max_count = load_kwargs.get("max_count", 1000)
|
||||||
|
count = 0
|
||||||
|
total_files = 0
|
||||||
|
successful_files = 0
|
||||||
|
failed_files = 0
|
||||||
|
|
||||||
|
print(f"Starting to process directory: {input_dir}")
|
||||||
|
|
||||||
|
# Walk through the directory recursively
|
||||||
|
for dirpath, dirnames, filenames in os.walk(input_dir):
|
||||||
|
# Skip hidden directories
|
||||||
|
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
|
||||||
|
|
||||||
|
for filename in filenames:
|
||||||
|
# Check if we've reached the max count (skip if max_count == -1)
|
||||||
|
if max_count > 0 and count >= max_count:
|
||||||
|
break
|
||||||
|
|
||||||
|
if filename.endswith(".emlx"):
|
||||||
|
total_files += 1
|
||||||
|
filepath = os.path.join(dirpath, filename)
|
||||||
|
try:
|
||||||
|
# Read the .emlx file
|
||||||
|
with open(filepath, encoding="utf-8", errors="ignore") as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
# .emlx files have a length prefix followed by the email content
|
||||||
|
# The first line contains the length, followed by the email
|
||||||
|
lines = content.split("\n", 1)
|
||||||
|
if len(lines) >= 2:
|
||||||
|
email_content = lines[1]
|
||||||
|
|
||||||
|
# Parse the email using Python's email module
|
||||||
|
try:
|
||||||
|
msg = email.message_from_string(email_content)
|
||||||
|
|
||||||
|
# Extract email metadata
|
||||||
|
subject = msg.get("Subject", "No Subject")
|
||||||
|
from_addr = msg.get("From", "Unknown")
|
||||||
|
to_addr = msg.get("To", "Unknown")
|
||||||
|
date = msg.get("Date", "Unknown")
|
||||||
|
|
||||||
|
# Extract email body
|
||||||
|
body = ""
|
||||||
|
if msg.is_multipart():
|
||||||
|
for part in msg.walk():
|
||||||
|
if (
|
||||||
|
part.get_content_type() == "text/plain"
|
||||||
|
or part.get_content_type() == "text/html"
|
||||||
|
):
|
||||||
|
if (
|
||||||
|
part.get_content_type() == "text/html"
|
||||||
|
and not self.include_html
|
||||||
|
):
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
payload = part.get_payload(decode=True)
|
||||||
|
if payload:
|
||||||
|
body += payload.decode("utf-8", errors="ignore")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error decoding payload: {e}")
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
payload = msg.get_payload(decode=True)
|
||||||
|
if payload:
|
||||||
|
body = payload.decode("utf-8", errors="ignore")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error decoding single part payload: {e}")
|
||||||
|
body = ""
|
||||||
|
|
||||||
|
# Only create document if we have some content
|
||||||
|
if body.strip() or subject != "No Subject":
|
||||||
|
# Create document content with metadata embedded in text
|
||||||
|
doc_content = f"""
|
||||||
|
[File]: {filename}
|
||||||
|
[From]: {from_addr}
|
||||||
|
[To]: {to_addr}
|
||||||
|
[Subject]: {subject}
|
||||||
|
[Date]: {date}
|
||||||
|
[EMAIL BODY Start]:
|
||||||
|
{body}
|
||||||
|
"""
|
||||||
|
|
||||||
|
# No separate metadata - everything is in the text
|
||||||
|
doc = Document(text=doc_content, metadata={})
|
||||||
|
docs.append(doc)
|
||||||
|
count += 1
|
||||||
|
successful_files += 1
|
||||||
|
|
||||||
|
# Print first few successful files for debugging
|
||||||
|
if successful_files <= 3:
|
||||||
|
print(
|
||||||
|
f"Successfully loaded: {filename} - Subject: {subject[:50]}..."
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
failed_files += 1
|
||||||
|
if failed_files <= 5: # Only print first few errors
|
||||||
|
print(f"Error parsing email from {filepath}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
failed_files += 1
|
||||||
|
if failed_files <= 5: # Only print first few errors
|
||||||
|
print(f"Error reading file {filepath}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
print("Processing summary:")
|
||||||
|
print(f" Total .emlx files found: {total_files}")
|
||||||
|
print(f" Successfully loaded: {successful_files}")
|
||||||
|
print(f" Failed to load: {failed_files}")
|
||||||
|
print(f" Final documents: {len(docs)}")
|
||||||
|
|
||||||
|
return docs
|
||||||
@@ -7,9 +7,9 @@ Contains simple parser for mbox files.
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any
|
||||||
from fsspec import AbstractFileSystem
|
|
||||||
|
|
||||||
|
from fsspec import AbstractFileSystem
|
||||||
from llama_index.core.readers.base import BaseReader
|
from llama_index.core.readers.base import BaseReader
|
||||||
from llama_index.core.schema import Document
|
from llama_index.core.schema import Document
|
||||||
|
|
||||||
@@ -27,11 +27,7 @@ class MboxReader(BaseReader):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
DEFAULT_MESSAGE_FORMAT: str = (
|
DEFAULT_MESSAGE_FORMAT: str = (
|
||||||
"Date: {_date}\n"
|
"Date: {_date}\nFrom: {_from}\nTo: {_to}\nSubject: {_subject}\nContent: {_content}"
|
||||||
"From: {_from}\n"
|
|
||||||
"To: {_to}\n"
|
|
||||||
"Subject: {_subject}\n"
|
|
||||||
"Content: {_content}"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -45,9 +41,7 @@ class MboxReader(BaseReader):
|
|||||||
try:
|
try:
|
||||||
from bs4 import BeautifulSoup # noqa
|
from bs4 import BeautifulSoup # noqa
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise ImportError(
|
raise ImportError("`beautifulsoup4` package not found: `pip install beautifulsoup4`")
|
||||||
"`beautifulsoup4` package not found: `pip install beautifulsoup4`"
|
|
||||||
)
|
|
||||||
|
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self.max_count = max_count
|
self.max_count = max_count
|
||||||
@@ -56,9 +50,9 @@ class MboxReader(BaseReader):
|
|||||||
def load_data(
|
def load_data(
|
||||||
self,
|
self,
|
||||||
file: Path,
|
file: Path,
|
||||||
extra_info: Optional[Dict] = None,
|
extra_info: dict | None = None,
|
||||||
fs: Optional[AbstractFileSystem] = None,
|
fs: AbstractFileSystem | None = None,
|
||||||
) -> List[Document]:
|
) -> list[Document]:
|
||||||
"""Parse file into string."""
|
"""Parse file into string."""
|
||||||
# Import required libraries
|
# Import required libraries
|
||||||
import mailbox
|
import mailbox
|
||||||
@@ -74,7 +68,7 @@ class MboxReader(BaseReader):
|
|||||||
)
|
)
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
results: List[str] = []
|
results: list[str] = []
|
||||||
# Load file using mailbox
|
# Load file using mailbox
|
||||||
bytes_parser = BytesParser(policy=default).parse
|
bytes_parser = BytesParser(policy=default).parse
|
||||||
mbox = mailbox.mbox(file, factory=bytes_parser) # type: ignore
|
mbox = mailbox.mbox(file, factory=bytes_parser) # type: ignore
|
||||||
@@ -134,12 +128,12 @@ class EmlxMboxReader(MboxReader):
|
|||||||
def load_data(
|
def load_data(
|
||||||
self,
|
self,
|
||||||
directory: Path,
|
directory: Path,
|
||||||
extra_info: Optional[Dict] = None,
|
extra_info: dict | None = None,
|
||||||
fs: Optional[AbstractFileSystem] = None,
|
fs: AbstractFileSystem | None = None,
|
||||||
) -> List[Document]:
|
) -> list[Document]:
|
||||||
"""Parse .emlx files from directory into strings using MboxReader logic."""
|
"""Parse .emlx files from directory into strings using MboxReader logic."""
|
||||||
import tempfile
|
|
||||||
import os
|
import os
|
||||||
|
import tempfile
|
||||||
|
|
||||||
if fs:
|
if fs:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
@@ -156,18 +150,18 @@ class EmlxMboxReader(MboxReader):
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
# Create a temporary mbox file
|
# Create a temporary mbox file
|
||||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.mbox', delete=False) as temp_mbox:
|
with tempfile.NamedTemporaryFile(mode="w", suffix=".mbox", delete=False) as temp_mbox:
|
||||||
temp_mbox_path = temp_mbox.name
|
temp_mbox_path = temp_mbox.name
|
||||||
|
|
||||||
# Convert .emlx files to mbox format
|
# Convert .emlx files to mbox format
|
||||||
for emlx_file in emlx_files:
|
for emlx_file in emlx_files:
|
||||||
try:
|
try:
|
||||||
# Read the .emlx file
|
# Read the .emlx file
|
||||||
with open(emlx_file, 'r', encoding='utf-8', errors='ignore') as f:
|
with open(emlx_file, encoding="utf-8", errors="ignore") as f:
|
||||||
content = f.read()
|
content = f.read()
|
||||||
|
|
||||||
# .emlx format: first line is length, rest is email content
|
# .emlx format: first line is length, rest is email content
|
||||||
lines = content.split('\n', 1)
|
lines = content.split("\n", 1)
|
||||||
if len(lines) >= 2:
|
if len(lines) >= 2:
|
||||||
email_content = lines[1] # Skip the length line
|
email_content = lines[1] # Skip the length line
|
||||||
|
|
||||||
@@ -188,5 +182,5 @@ class EmlxMboxReader(MboxReader):
|
|||||||
# Clean up temporary file
|
# Clean up temporary file
|
||||||
try:
|
try:
|
||||||
os.unlink(temp_mbox_path)
|
os.unlink(temp_mbox_path)
|
||||||
except:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
157
apps/email_rag.py
Normal file
157
apps/email_rag.py
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
"""
|
||||||
|
Email RAG example using the unified interface.
|
||||||
|
Supports Apple Mail on macOS.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add parent directory to path for imports
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent))
|
||||||
|
|
||||||
|
from base_rag_example import BaseRAGExample
|
||||||
|
from chunking import create_text_chunks
|
||||||
|
|
||||||
|
from .email_data.LEANN_email_reader import EmlxReader
|
||||||
|
|
||||||
|
|
||||||
|
class EmailRAG(BaseRAGExample):
|
||||||
|
"""RAG example for Apple Mail processing."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
# Set default values BEFORE calling super().__init__
|
||||||
|
self.max_items_default = -1 # Process all emails by default
|
||||||
|
self.embedding_model_default = (
|
||||||
|
"sentence-transformers/all-MiniLM-L6-v2" # Fast 384-dim model
|
||||||
|
)
|
||||||
|
|
||||||
|
super().__init__(
|
||||||
|
name="Email",
|
||||||
|
description="Process and query Apple Mail emails with LEANN",
|
||||||
|
default_index_name="mail_index",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _add_specific_arguments(self, parser):
|
||||||
|
"""Add email-specific arguments."""
|
||||||
|
email_group = parser.add_argument_group("Email Parameters")
|
||||||
|
email_group.add_argument(
|
||||||
|
"--mail-path",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Path to Apple Mail directory (auto-detected if not specified)",
|
||||||
|
)
|
||||||
|
email_group.add_argument(
|
||||||
|
"--include-html", action="store_true", help="Include HTML content in email processing"
|
||||||
|
)
|
||||||
|
email_group.add_argument(
|
||||||
|
"--chunk-size", type=int, default=256, help="Text chunk size (default: 256)"
|
||||||
|
)
|
||||||
|
email_group.add_argument(
|
||||||
|
"--chunk-overlap", type=int, default=25, help="Text chunk overlap (default: 25)"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _find_mail_directories(self) -> list[Path]:
|
||||||
|
"""Auto-detect all Apple Mail directories."""
|
||||||
|
mail_base = Path.home() / "Library" / "Mail"
|
||||||
|
if not mail_base.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Find all Messages directories
|
||||||
|
messages_dirs = []
|
||||||
|
for item in mail_base.rglob("Messages"):
|
||||||
|
if item.is_dir():
|
||||||
|
messages_dirs.append(item)
|
||||||
|
|
||||||
|
return messages_dirs
|
||||||
|
|
||||||
|
async def load_data(self, args) -> list[str]:
|
||||||
|
"""Load emails and convert to text chunks."""
|
||||||
|
# Determine mail directories
|
||||||
|
if args.mail_path:
|
||||||
|
messages_dirs = [Path(args.mail_path)]
|
||||||
|
else:
|
||||||
|
print("Auto-detecting Apple Mail directories...")
|
||||||
|
messages_dirs = self._find_mail_directories()
|
||||||
|
|
||||||
|
if not messages_dirs:
|
||||||
|
print("No Apple Mail directories found!")
|
||||||
|
print("Please specify --mail-path manually")
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"Found {len(messages_dirs)} mail directories")
|
||||||
|
|
||||||
|
# Create reader
|
||||||
|
reader = EmlxReader(include_html=args.include_html)
|
||||||
|
|
||||||
|
# Process each directory
|
||||||
|
all_documents = []
|
||||||
|
total_processed = 0
|
||||||
|
|
||||||
|
for i, messages_dir in enumerate(messages_dirs):
|
||||||
|
print(f"\nProcessing directory {i + 1}/{len(messages_dirs)}: {messages_dir}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Count emlx files
|
||||||
|
emlx_files = list(messages_dir.glob("*.emlx"))
|
||||||
|
print(f"Found {len(emlx_files)} email files")
|
||||||
|
|
||||||
|
# Apply max_items limit per directory
|
||||||
|
max_per_dir = -1 # Default to process all
|
||||||
|
if args.max_items > 0:
|
||||||
|
remaining = args.max_items - total_processed
|
||||||
|
if remaining <= 0:
|
||||||
|
break
|
||||||
|
max_per_dir = remaining
|
||||||
|
# If args.max_items == -1, max_per_dir stays -1 (process all)
|
||||||
|
|
||||||
|
# Load emails - fix the parameter passing
|
||||||
|
documents = reader.load_data(
|
||||||
|
input_dir=str(messages_dir),
|
||||||
|
max_count=max_per_dir,
|
||||||
|
)
|
||||||
|
|
||||||
|
if documents:
|
||||||
|
all_documents.extend(documents)
|
||||||
|
total_processed += len(documents)
|
||||||
|
print(f"Processed {len(documents)} emails from this directory")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error processing {messages_dir}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not all_documents:
|
||||||
|
print("No emails found to process!")
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"\nTotal emails processed: {len(all_documents)}")
|
||||||
|
print("now starting to split into text chunks ... take some time")
|
||||||
|
|
||||||
|
# Convert to text chunks
|
||||||
|
# Email reader uses chunk_overlap=25 as in original
|
||||||
|
all_texts = create_text_chunks(
|
||||||
|
all_documents, chunk_size=args.chunk_size, chunk_overlap=args.chunk_overlap
|
||||||
|
)
|
||||||
|
|
||||||
|
return all_texts
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
# Check platform
|
||||||
|
if sys.platform != "darwin":
|
||||||
|
print("\n⚠️ Warning: This example is designed for macOS (Apple Mail)")
|
||||||
|
print(" Windows/Linux support coming soon!\n")
|
||||||
|
|
||||||
|
# Example queries for email RAG
|
||||||
|
print("\n📧 Email RAG Example")
|
||||||
|
print("=" * 50)
|
||||||
|
print("\nExample queries you can try:")
|
||||||
|
print("- 'What did my boss say about deadlines?'")
|
||||||
|
print("- 'Find emails about travel expenses'")
|
||||||
|
print("- 'Show me emails from last month about the project'")
|
||||||
|
print("- 'What food did I order from DoorDash?'")
|
||||||
|
print("\nNote: You may need to grant Full Disk Access to your terminal\n")
|
||||||
|
|
||||||
|
rag = EmailRAG()
|
||||||
|
asyncio.run(rag.run())
|
||||||
@@ -1,3 +1,3 @@
|
|||||||
from .history import ChromeHistoryReader
|
from .history import ChromeHistoryReader
|
||||||
|
|
||||||
__all__ = ['ChromeHistoryReader']
|
__all__ = ["ChromeHistoryReader"]
|
||||||
@@ -1,10 +1,12 @@
|
|||||||
import sqlite3
|
|
||||||
import os
|
import os
|
||||||
|
import sqlite3
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Any
|
from typing import Any
|
||||||
|
|
||||||
from llama_index.core import Document
|
from llama_index.core import Document
|
||||||
from llama_index.core.readers.base import BaseReader
|
from llama_index.core.readers.base import BaseReader
|
||||||
|
|
||||||
|
|
||||||
class ChromeHistoryReader(BaseReader):
|
class ChromeHistoryReader(BaseReader):
|
||||||
"""
|
"""
|
||||||
Chrome browser history reader that extracts browsing data from SQLite database.
|
Chrome browser history reader that extracts browsing data from SQLite database.
|
||||||
@@ -17,7 +19,7 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
"""Initialize."""
|
"""Initialize."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def load_data(self, input_dir: str = None, **load_kwargs: Any) -> List[Document]:
|
def load_data(self, input_dir: str | None = None, **load_kwargs: Any) -> list[Document]:
|
||||||
"""
|
"""
|
||||||
Load Chrome history data from the default Chrome profile location.
|
Load Chrome history data from the default Chrome profile location.
|
||||||
|
|
||||||
@@ -27,13 +29,15 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
max_count (int): Maximum amount of history entries to read.
|
max_count (int): Maximum amount of history entries to read.
|
||||||
chrome_profile_path (str): Custom path to Chrome profile directory.
|
chrome_profile_path (str): Custom path to Chrome profile directory.
|
||||||
"""
|
"""
|
||||||
docs: List[Document] = []
|
docs: list[Document] = []
|
||||||
max_count = load_kwargs.get('max_count', 1000)
|
max_count = load_kwargs.get("max_count", 1000)
|
||||||
chrome_profile_path = load_kwargs.get('chrome_profile_path', None)
|
chrome_profile_path = load_kwargs.get("chrome_profile_path", None)
|
||||||
|
|
||||||
# Default Chrome profile path on macOS
|
# Default Chrome profile path on macOS
|
||||||
if chrome_profile_path is None:
|
if chrome_profile_path is None:
|
||||||
chrome_profile_path = os.path.expanduser("~/Library/Application Support/Google/Chrome/Default")
|
chrome_profile_path = os.path.expanduser(
|
||||||
|
"~/Library/Application Support/Google/Chrome/Default"
|
||||||
|
)
|
||||||
|
|
||||||
history_db_path = os.path.join(chrome_profile_path, "History")
|
history_db_path = os.path.join(chrome_profile_path, "History")
|
||||||
|
|
||||||
@@ -70,7 +74,7 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
if count >= max_count and max_count > 0:
|
if count >= max_count and max_count > 0:
|
||||||
break
|
break
|
||||||
|
|
||||||
last_visit, url, title, visit_count, typed_count, hidden = row
|
last_visit, url, title, visit_count, typed_count, _hidden = row
|
||||||
|
|
||||||
# Create document content with metadata embedded in text
|
# Create document content with metadata embedded in text
|
||||||
doc_content = f"""
|
doc_content = f"""
|
||||||
@@ -82,7 +86,7 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# Create document with embedded metadata
|
# Create document with embedded metadata
|
||||||
doc = Document(text=doc_content, metadata={ "title": title[0:150]})
|
doc = Document(text=doc_content, metadata={"title": title[0:150]})
|
||||||
# if len(title) > 150:
|
# if len(title) > 150:
|
||||||
# print(f"Title is too long: {title}")
|
# print(f"Title is too long: {title}")
|
||||||
docs.append(doc)
|
docs.append(doc)
|
||||||
@@ -93,12 +97,17 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error reading Chrome history: {e}")
|
print(f"Error reading Chrome history: {e}")
|
||||||
|
# add you may need to close your browser to make the database file available
|
||||||
|
# also highlight in red
|
||||||
|
print(
|
||||||
|
"\033[91mYou may need to close your browser to make the database file available\033[0m"
|
||||||
|
)
|
||||||
return docs
|
return docs
|
||||||
|
|
||||||
return docs
|
return docs
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def find_chrome_profiles() -> List[Path]:
|
def find_chrome_profiles() -> list[Path]:
|
||||||
"""
|
"""
|
||||||
Find all Chrome profile directories.
|
Find all Chrome profile directories.
|
||||||
|
|
||||||
@@ -124,7 +133,9 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
return profile_dirs
|
return profile_dirs
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def export_history_to_file(output_file: str = "chrome_history_export.txt", max_count: int = 1000):
|
def export_history_to_file(
|
||||||
|
output_file: str = "chrome_history_export.txt", max_count: int = 1000
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Export Chrome history to a text file using the same SQL query format.
|
Export Chrome history to a text file using the same SQL query format.
|
||||||
|
|
||||||
@@ -132,7 +143,9 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
output_file: Path to the output file
|
output_file: Path to the output file
|
||||||
max_count: Maximum number of entries to export
|
max_count: Maximum number of entries to export
|
||||||
"""
|
"""
|
||||||
chrome_profile_path = os.path.expanduser("~/Library/Application Support/Google/Chrome/Default")
|
chrome_profile_path = os.path.expanduser(
|
||||||
|
"~/Library/Application Support/Google/Chrome/Default"
|
||||||
|
)
|
||||||
history_db_path = os.path.join(chrome_profile_path, "History")
|
history_db_path = os.path.join(chrome_profile_path, "History")
|
||||||
|
|
||||||
if not os.path.exists(history_db_path):
|
if not os.path.exists(history_db_path):
|
||||||
@@ -159,10 +172,12 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
cursor.execute(query, (max_count,))
|
cursor.execute(query, (max_count,))
|
||||||
rows = cursor.fetchall()
|
rows = cursor.fetchall()
|
||||||
|
|
||||||
with open(output_file, 'w', encoding='utf-8') as f:
|
with open(output_file, "w", encoding="utf-8") as f:
|
||||||
for row in rows:
|
for row in rows:
|
||||||
last_visit, url, title, visit_count, typed_count, hidden = row
|
last_visit, url, title, visit_count, typed_count, hidden = row
|
||||||
f.write(f"{last_visit}\t{url}\t{title}\t{visit_count}\t{typed_count}\t{hidden}\n")
|
f.write(
|
||||||
|
f"{last_visit}\t{url}\t{title}\t{visit_count}\t{typed_count}\t{hidden}\n"
|
||||||
|
)
|
||||||
|
|
||||||
conn.close()
|
conn.close()
|
||||||
print(f"Exported {len(rows)} history entries to {output_file}")
|
print(f"Exported {len(rows)} history entries to {output_file}")
|
||||||
@@ -2,13 +2,14 @@ import json
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
|
||||||
import time
|
import time
|
||||||
|
from datetime import datetime
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Any, Dict, Optional
|
from typing import Any
|
||||||
|
|
||||||
from llama_index.core import Document
|
from llama_index.core import Document
|
||||||
from llama_index.core.readers.base import BaseReader
|
from llama_index.core.readers.base import BaseReader
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
class WeChatHistoryReader(BaseReader):
|
class WeChatHistoryReader(BaseReader):
|
||||||
"""
|
"""
|
||||||
@@ -43,10 +44,16 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
wechattweak_path = self.wechat_exporter_dir / "wechattweak-cli"
|
wechattweak_path = self.wechat_exporter_dir / "wechattweak-cli"
|
||||||
if not wechattweak_path.exists():
|
if not wechattweak_path.exists():
|
||||||
print("Downloading WeChatTweak CLI...")
|
print("Downloading WeChatTweak CLI...")
|
||||||
subprocess.run([
|
subprocess.run(
|
||||||
"curl", "-L", "-o", str(wechattweak_path),
|
[
|
||||||
"https://github.com/JettChenT/WeChatTweak-CLI/releases/latest/download/wechattweak-cli"
|
"curl",
|
||||||
], check=True)
|
"-L",
|
||||||
|
"-o",
|
||||||
|
str(wechattweak_path),
|
||||||
|
"https://github.com/JettChenT/WeChatTweak-CLI/releases/latest/download/wechattweak-cli",
|
||||||
|
],
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
|
||||||
# Make executable
|
# Make executable
|
||||||
wechattweak_path.chmod(0o755)
|
wechattweak_path.chmod(0o755)
|
||||||
@@ -73,16 +80,16 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
def check_api_available(self) -> bool:
|
def check_api_available(self) -> bool:
|
||||||
"""Check if WeChatTweak API is available."""
|
"""Check if WeChatTweak API is available."""
|
||||||
try:
|
try:
|
||||||
result = subprocess.run([
|
result = subprocess.run(
|
||||||
"curl", "-s", "http://localhost:48065/wechat/allcontacts"
|
["curl", "-s", "http://localhost:48065/wechat/allcontacts"],
|
||||||
], capture_output=True, text=True, timeout=5)
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=5,
|
||||||
|
)
|
||||||
return result.returncode == 0 and result.stdout.strip()
|
return result.returncode == 0 and result.stdout.strip()
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def _extract_readable_text(self, content: str) -> str:
|
def _extract_readable_text(self, content: str) -> str:
|
||||||
"""
|
"""
|
||||||
Extract readable text from message content, removing XML and system messages.
|
Extract readable text from message content, removing XML and system messages.
|
||||||
@@ -100,14 +107,14 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
if isinstance(content, dict):
|
if isinstance(content, dict):
|
||||||
# Extract text from dictionary structure
|
# Extract text from dictionary structure
|
||||||
text_parts = []
|
text_parts = []
|
||||||
if 'title' in content:
|
if "title" in content:
|
||||||
text_parts.append(str(content['title']))
|
text_parts.append(str(content["title"]))
|
||||||
if 'quoted' in content:
|
if "quoted" in content:
|
||||||
text_parts.append(str(content['quoted']))
|
text_parts.append(str(content["quoted"]))
|
||||||
if 'content' in content:
|
if "content" in content:
|
||||||
text_parts.append(str(content['content']))
|
text_parts.append(str(content["content"]))
|
||||||
if 'text' in content:
|
if "text" in content:
|
||||||
text_parts.append(str(content['text']))
|
text_parts.append(str(content["text"]))
|
||||||
|
|
||||||
if text_parts:
|
if text_parts:
|
||||||
return " | ".join(text_parts)
|
return " | ".join(text_parts)
|
||||||
@@ -120,11 +127,11 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
return ""
|
return ""
|
||||||
|
|
||||||
# Remove common prefixes like "wxid_xxx:\n"
|
# Remove common prefixes like "wxid_xxx:\n"
|
||||||
clean_content = re.sub(r'^wxid_[^:]+:\s*', '', content)
|
clean_content = re.sub(r"^wxid_[^:]+:\s*", "", content)
|
||||||
clean_content = re.sub(r'^[^:]+:\s*', '', clean_content)
|
clean_content = re.sub(r"^[^:]+:\s*", "", clean_content)
|
||||||
|
|
||||||
# If it's just XML or system message, return empty
|
# If it's just XML or system message, return empty
|
||||||
if clean_content.strip().startswith('<') or 'recalled a message' in clean_content:
|
if clean_content.strip().startswith("<") or "recalled a message" in clean_content:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
return clean_content.strip()
|
return clean_content.strip()
|
||||||
@@ -145,9 +152,9 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
# Handle dictionary content
|
# Handle dictionary content
|
||||||
if isinstance(content, dict):
|
if isinstance(content, dict):
|
||||||
# Check if dict has any readable text fields
|
# Check if dict has any readable text fields
|
||||||
text_fields = ['title', 'quoted', 'content', 'text']
|
text_fields = ["title", "quoted", "content", "text"]
|
||||||
for field in text_fields:
|
for field in text_fields:
|
||||||
if field in content and content[field]:
|
if content.get(field):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@@ -156,42 +163,47 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
# Skip image messages (contain XML with img tags)
|
# Skip image messages (contain XML with img tags)
|
||||||
if '<img' in content and 'cdnurl' in content:
|
if "<img" in content and "cdnurl" in content:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Skip emoji messages (contain emoji XML tags)
|
# Skip emoji messages (contain emoji XML tags)
|
||||||
if '<emoji' in content and 'productid' in content:
|
if "<emoji" in content and "productid" in content:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Skip voice messages
|
# Skip voice messages
|
||||||
if '<voice' in content:
|
if "<voice" in content:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Skip video messages
|
# Skip video messages
|
||||||
if '<video' in content:
|
if "<video" in content:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Skip file messages
|
# Skip file messages
|
||||||
if '<appmsg' in content and 'appid' in content:
|
if "<appmsg" in content and "appid" in content:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Skip system messages (like "recalled a message")
|
# Skip system messages (like "recalled a message")
|
||||||
if 'recalled a message' in content:
|
if "recalled a message" in content:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Check if there's actual readable text (not just XML or system messages)
|
# Check if there's actual readable text (not just XML or system messages)
|
||||||
# Remove common prefixes like "wxid_xxx:\n" and check for actual content
|
# Remove common prefixes like "wxid_xxx:\n" and check for actual content
|
||||||
clean_content = re.sub(r'^wxid_[^:]+:\s*', '', content)
|
clean_content = re.sub(r"^wxid_[^:]+:\s*", "", content)
|
||||||
clean_content = re.sub(r'^[^:]+:\s*', '', clean_content)
|
clean_content = re.sub(r"^[^:]+:\s*", "", clean_content)
|
||||||
|
|
||||||
# If after cleaning we have meaningful text, consider it readable
|
# If after cleaning we have meaningful text, consider it readable
|
||||||
if len(clean_content.strip()) > 0 and not clean_content.strip().startswith('<'):
|
if len(clean_content.strip()) > 0 and not clean_content.strip().startswith("<"):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _concatenate_messages(self, messages: List[Dict], max_length: int = 128,
|
def _concatenate_messages(
|
||||||
time_window_minutes: int = 30, overlap_messages: int = 0) -> List[Dict]:
|
self,
|
||||||
|
messages: list[dict],
|
||||||
|
max_length: int = 128,
|
||||||
|
time_window_minutes: int = 30,
|
||||||
|
overlap_messages: int = 0,
|
||||||
|
) -> list[dict]:
|
||||||
"""
|
"""
|
||||||
Concatenate messages based on length and time rules.
|
Concatenate messages based on length and time rules.
|
||||||
|
|
||||||
@@ -214,12 +226,12 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
|
|
||||||
for message in messages:
|
for message in messages:
|
||||||
# Extract message info
|
# Extract message info
|
||||||
content = message.get('content', '')
|
content = message.get("content", "")
|
||||||
message_text = message.get('message', '')
|
message_text = message.get("message", "")
|
||||||
create_time = message.get('createTime', 0)
|
create_time = message.get("createTime", 0)
|
||||||
from_user = message.get('fromUser', '')
|
message.get("fromUser", "")
|
||||||
to_user = message.get('toUser', '')
|
message.get("toUser", "")
|
||||||
is_sent_from_self = message.get('isSentFromSelf', False)
|
message.get("isSentFromSelf", False)
|
||||||
|
|
||||||
# Extract readable text
|
# Extract readable text
|
||||||
readable_text = self._extract_readable_text(content)
|
readable_text = self._extract_readable_text(content)
|
||||||
@@ -236,16 +248,24 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
if time_diff_minutes > time_window_minutes:
|
if time_diff_minutes > time_window_minutes:
|
||||||
# Time gap too large, start new group
|
# Time gap too large, start new group
|
||||||
if current_group:
|
if current_group:
|
||||||
concatenated_groups.append({
|
concatenated_groups.append(
|
||||||
'messages': current_group,
|
{
|
||||||
'total_length': current_length,
|
"messages": current_group,
|
||||||
'start_time': current_group[0].get('createTime', 0),
|
"total_length": current_length,
|
||||||
'end_time': current_group[-1].get('createTime', 0)
|
"start_time": current_group[0].get("createTime", 0),
|
||||||
})
|
"end_time": current_group[-1].get("createTime", 0),
|
||||||
|
}
|
||||||
|
)
|
||||||
# Keep last few messages for overlap
|
# Keep last few messages for overlap
|
||||||
if overlap_messages > 0 and len(current_group) > overlap_messages:
|
if overlap_messages > 0 and len(current_group) > overlap_messages:
|
||||||
current_group = current_group[-overlap_messages:]
|
current_group = current_group[-overlap_messages:]
|
||||||
current_length = sum(len(self._extract_readable_text(msg.get('content', '')) or msg.get('message', '')) for msg in current_group)
|
current_length = sum(
|
||||||
|
len(
|
||||||
|
self._extract_readable_text(msg.get("content", ""))
|
||||||
|
or msg.get("message", "")
|
||||||
|
)
|
||||||
|
for msg in current_group
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
current_group = []
|
current_group = []
|
||||||
current_length = 0
|
current_length = 0
|
||||||
@@ -254,16 +274,24 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
message_length = len(readable_text)
|
message_length = len(readable_text)
|
||||||
if max_length != -1 and current_length + message_length > max_length and current_group:
|
if max_length != -1 and current_length + message_length > max_length and current_group:
|
||||||
# Current group would exceed max length, save it and start new
|
# Current group would exceed max length, save it and start new
|
||||||
concatenated_groups.append({
|
concatenated_groups.append(
|
||||||
'messages': current_group,
|
{
|
||||||
'total_length': current_length,
|
"messages": current_group,
|
||||||
'start_time': current_group[0].get('createTime', 0),
|
"total_length": current_length,
|
||||||
'end_time': current_group[-1].get('createTime', 0)
|
"start_time": current_group[0].get("createTime", 0),
|
||||||
})
|
"end_time": current_group[-1].get("createTime", 0),
|
||||||
|
}
|
||||||
|
)
|
||||||
# Keep last few messages for overlap
|
# Keep last few messages for overlap
|
||||||
if overlap_messages > 0 and len(current_group) > overlap_messages:
|
if overlap_messages > 0 and len(current_group) > overlap_messages:
|
||||||
current_group = current_group[-overlap_messages:]
|
current_group = current_group[-overlap_messages:]
|
||||||
current_length = sum(len(self._extract_readable_text(msg.get('content', '')) or msg.get('message', '')) for msg in current_group)
|
current_length = sum(
|
||||||
|
len(
|
||||||
|
self._extract_readable_text(msg.get("content", ""))
|
||||||
|
or msg.get("message", "")
|
||||||
|
)
|
||||||
|
for msg in current_group
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
current_group = []
|
current_group = []
|
||||||
current_length = 0
|
current_length = 0
|
||||||
@@ -275,16 +303,18 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
|
|
||||||
# Add the last group if it exists
|
# Add the last group if it exists
|
||||||
if current_group:
|
if current_group:
|
||||||
concatenated_groups.append({
|
concatenated_groups.append(
|
||||||
'messages': current_group,
|
{
|
||||||
'total_length': current_length,
|
"messages": current_group,
|
||||||
'start_time': current_group[0].get('createTime', 0),
|
"total_length": current_length,
|
||||||
'end_time': current_group[-1].get('createTime', 0)
|
"start_time": current_group[0].get("createTime", 0),
|
||||||
})
|
"end_time": current_group[-1].get("createTime", 0),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
return concatenated_groups
|
return concatenated_groups
|
||||||
|
|
||||||
def _create_concatenated_content(self, message_group: Dict, contact_name: str) -> str:
|
def _create_concatenated_content(self, message_group: dict, contact_name: str) -> str:
|
||||||
"""
|
"""
|
||||||
Create concatenated content from a group of messages.
|
Create concatenated content from a group of messages.
|
||||||
|
|
||||||
@@ -295,16 +325,16 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
Returns:
|
Returns:
|
||||||
Formatted concatenated content
|
Formatted concatenated content
|
||||||
"""
|
"""
|
||||||
messages = message_group['messages']
|
messages = message_group["messages"]
|
||||||
start_time = message_group['start_time']
|
start_time = message_group["start_time"]
|
||||||
end_time = message_group['end_time']
|
end_time = message_group["end_time"]
|
||||||
|
|
||||||
# Format timestamps
|
# Format timestamps
|
||||||
if start_time:
|
if start_time:
|
||||||
try:
|
try:
|
||||||
start_timestamp = datetime.fromtimestamp(start_time)
|
start_timestamp = datetime.fromtimestamp(start_time)
|
||||||
start_time_str = start_timestamp.strftime('%Y-%m-%d %H:%M:%S')
|
start_time_str = start_timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
||||||
except:
|
except (ValueError, OSError):
|
||||||
start_time_str = str(start_time)
|
start_time_str = str(start_time)
|
||||||
else:
|
else:
|
||||||
start_time_str = "Unknown"
|
start_time_str = "Unknown"
|
||||||
@@ -312,8 +342,8 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
if end_time:
|
if end_time:
|
||||||
try:
|
try:
|
||||||
end_timestamp = datetime.fromtimestamp(end_time)
|
end_timestamp = datetime.fromtimestamp(end_time)
|
||||||
end_time_str = end_timestamp.strftime('%Y-%m-%d %H:%M:%S')
|
end_time_str = end_timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
||||||
except:
|
except (ValueError, OSError):
|
||||||
end_time_str = str(end_time)
|
end_time_str = str(end_time)
|
||||||
else:
|
else:
|
||||||
end_time_str = "Unknown"
|
end_time_str = "Unknown"
|
||||||
@@ -321,10 +351,10 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
# Build concatenated message content
|
# Build concatenated message content
|
||||||
message_parts = []
|
message_parts = []
|
||||||
for message in messages:
|
for message in messages:
|
||||||
content = message.get('content', '')
|
content = message.get("content", "")
|
||||||
message_text = message.get('message', '')
|
message_text = message.get("message", "")
|
||||||
create_time = message.get('createTime', 0)
|
create_time = message.get("createTime", 0)
|
||||||
is_sent_from_self = message.get('isSentFromSelf', False)
|
is_sent_from_self = message.get("isSentFromSelf", False)
|
||||||
|
|
||||||
# Extract readable text
|
# Extract readable text
|
||||||
readable_text = self._extract_readable_text(content)
|
readable_text = self._extract_readable_text(content)
|
||||||
@@ -336,8 +366,8 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
try:
|
try:
|
||||||
timestamp = datetime.fromtimestamp(create_time)
|
timestamp = datetime.fromtimestamp(create_time)
|
||||||
# change to YYYY-MM-DD HH:MM:SS
|
# change to YYYY-MM-DD HH:MM:SS
|
||||||
time_str = timestamp.strftime('%Y-%m-%d %H:%M:%S')
|
time_str = timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
||||||
except:
|
except (ValueError, OSError):
|
||||||
time_str = str(create_time)
|
time_str = str(create_time)
|
||||||
else:
|
else:
|
||||||
time_str = "Unknown"
|
time_str = "Unknown"
|
||||||
@@ -351,7 +381,7 @@ class WeChatHistoryReader(BaseReader):
|
|||||||
doc_content = f"""
|
doc_content = f"""
|
||||||
Contact: {contact_name}
|
Contact: {contact_name}
|
||||||
Time Range: {start_time_str} - {end_time_str}
|
Time Range: {start_time_str} - {end_time_str}
|
||||||
Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
Messages ({len(messages)} messages, {message_group["total_length"]} chars):
|
||||||
|
|
||||||
{concatenated_text}
|
{concatenated_text}
|
||||||
"""
|
"""
|
||||||
@@ -361,7 +391,7 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
"""
|
"""
|
||||||
return doc_content, contact_name
|
return doc_content, contact_name
|
||||||
|
|
||||||
def load_data(self, input_dir: str = None, **load_kwargs: Any) -> List[Document]:
|
def load_data(self, input_dir: str | None = None, **load_kwargs: Any) -> list[Document]:
|
||||||
"""
|
"""
|
||||||
Load WeChat chat history data from exported JSON files.
|
Load WeChat chat history data from exported JSON files.
|
||||||
|
|
||||||
@@ -376,13 +406,13 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
time_window_minutes (int): Time window in minutes to group messages together (default: 30).
|
time_window_minutes (int): Time window in minutes to group messages together (default: 30).
|
||||||
overlap_messages (int): Number of messages to overlap between consecutive groups (default: 2).
|
overlap_messages (int): Number of messages to overlap between consecutive groups (default: 2).
|
||||||
"""
|
"""
|
||||||
docs: List[Document] = []
|
docs: list[Document] = []
|
||||||
max_count = load_kwargs.get('max_count', 1000)
|
max_count = load_kwargs.get("max_count", 1000)
|
||||||
wechat_export_dir = load_kwargs.get('wechat_export_dir', None)
|
wechat_export_dir = load_kwargs.get("wechat_export_dir", None)
|
||||||
include_non_text = load_kwargs.get('include_non_text', False)
|
include_non_text = load_kwargs.get("include_non_text", False)
|
||||||
concatenate_messages = load_kwargs.get('concatenate_messages', False)
|
concatenate_messages = load_kwargs.get("concatenate_messages", False)
|
||||||
max_length = load_kwargs.get('max_length', 1000)
|
max_length = load_kwargs.get("max_length", 1000)
|
||||||
time_window_minutes = load_kwargs.get('time_window_minutes', 30)
|
time_window_minutes = load_kwargs.get("time_window_minutes", 30)
|
||||||
|
|
||||||
# Default WeChat export path
|
# Default WeChat export path
|
||||||
if wechat_export_dir is None:
|
if wechat_export_dir is None:
|
||||||
@@ -403,7 +433,7 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
break
|
break
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(json_file, 'r', encoding='utf-8') as f:
|
with open(json_file, encoding="utf-8") as f:
|
||||||
chat_data = json.load(f)
|
chat_data = json.load(f)
|
||||||
|
|
||||||
# Extract contact name from filename
|
# Extract contact name from filename
|
||||||
@@ -414,7 +444,7 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
readable_messages = []
|
readable_messages = []
|
||||||
for message in chat_data:
|
for message in chat_data:
|
||||||
try:
|
try:
|
||||||
content = message.get('content', '')
|
content = message.get("content", "")
|
||||||
if not include_non_text and not self._is_text_message(content):
|
if not include_non_text and not self._is_text_message(content):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@@ -430,9 +460,9 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
# Concatenate messages based on rules
|
# Concatenate messages based on rules
|
||||||
message_groups = self._concatenate_messages(
|
message_groups = self._concatenate_messages(
|
||||||
readable_messages,
|
readable_messages,
|
||||||
max_length=-1,
|
max_length=max_length,
|
||||||
time_window_minutes=-1,
|
time_window_minutes=time_window_minutes,
|
||||||
overlap_messages=0 # Keep 2 messages overlap between groups
|
overlap_messages=0, # No overlap between groups
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create documents from concatenated groups
|
# Create documents from concatenated groups
|
||||||
@@ -440,12 +470,19 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
if count >= max_count and max_count > 0:
|
if count >= max_count and max_count > 0:
|
||||||
break
|
break
|
||||||
|
|
||||||
doc_content, contact_name = self._create_concatenated_content(message_group, contact_name)
|
doc_content, contact_name = self._create_concatenated_content(
|
||||||
doc = Document(text=doc_content, metadata={"contact_name": contact_name})
|
message_group, contact_name
|
||||||
|
)
|
||||||
|
doc = Document(
|
||||||
|
text=doc_content,
|
||||||
|
metadata={"contact_name": contact_name},
|
||||||
|
)
|
||||||
docs.append(doc)
|
docs.append(doc)
|
||||||
count += 1
|
count += 1
|
||||||
|
|
||||||
print(f"Created {len(message_groups)} concatenated message groups for {contact_name}")
|
print(
|
||||||
|
f"Created {len(message_groups)} concatenated message groups for {contact_name}"
|
||||||
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# Original single-message processing
|
# Original single-message processing
|
||||||
@@ -454,12 +491,12 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
break
|
break
|
||||||
|
|
||||||
# Extract message information
|
# Extract message information
|
||||||
from_user = message.get('fromUser', '')
|
message.get("fromUser", "")
|
||||||
to_user = message.get('toUser', '')
|
message.get("toUser", "")
|
||||||
content = message.get('content', '')
|
content = message.get("content", "")
|
||||||
message_text = message.get('message', '')
|
message_text = message.get("message", "")
|
||||||
create_time = message.get('createTime', 0)
|
create_time = message.get("createTime", 0)
|
||||||
is_sent_from_self = message.get('isSentFromSelf', False)
|
is_sent_from_self = message.get("isSentFromSelf", False)
|
||||||
|
|
||||||
# Handle content that might be dict or string
|
# Handle content that might be dict or string
|
||||||
try:
|
try:
|
||||||
@@ -480,8 +517,8 @@ Messages ({len(messages)} messages, {message_group['total_length']} chars):
|
|||||||
if create_time:
|
if create_time:
|
||||||
try:
|
try:
|
||||||
timestamp = datetime.fromtimestamp(create_time)
|
timestamp = datetime.fromtimestamp(create_time)
|
||||||
time_str = timestamp.strftime('%Y-%m-%d %H:%M:%S')
|
time_str = timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
||||||
except:
|
except (ValueError, OSError):
|
||||||
time_str = str(create_time)
|
time_str = str(create_time)
|
||||||
else:
|
else:
|
||||||
time_str = "Unknown"
|
time_str = "Unknown"
|
||||||
@@ -495,7 +532,9 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# Create document with embedded metadata
|
# Create document with embedded metadata
|
||||||
doc = Document(text=doc_content, metadata={})
|
doc = Document(
|
||||||
|
text=doc_content, metadata={"contact_name": contact_name}
|
||||||
|
)
|
||||||
docs.append(doc)
|
docs.append(doc)
|
||||||
count += 1
|
count += 1
|
||||||
|
|
||||||
@@ -512,7 +551,7 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
return docs
|
return docs
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def find_wechat_export_dirs() -> List[Path]:
|
def find_wechat_export_dirs() -> list[Path]:
|
||||||
"""
|
"""
|
||||||
Find all WeChat export directories.
|
Find all WeChat export directories.
|
||||||
|
|
||||||
@@ -523,10 +562,10 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
|
|
||||||
# Look for common export directory names
|
# Look for common export directory names
|
||||||
possible_dirs = [
|
possible_dirs = [
|
||||||
Path("./wechat_export_test"),
|
|
||||||
Path("./wechat_export"),
|
Path("./wechat_export"),
|
||||||
|
Path("./wechat_export_direct"),
|
||||||
Path("./wechat_chat_history"),
|
Path("./wechat_chat_history"),
|
||||||
Path("./chat_export")
|
Path("./chat_export"),
|
||||||
]
|
]
|
||||||
|
|
||||||
for export_dir in possible_dirs:
|
for export_dir in possible_dirs:
|
||||||
@@ -534,13 +573,20 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
json_files = list(export_dir.glob("*.json"))
|
json_files = list(export_dir.glob("*.json"))
|
||||||
if json_files:
|
if json_files:
|
||||||
export_dirs.append(export_dir)
|
export_dirs.append(export_dir)
|
||||||
print(f"Found WeChat export directory: {export_dir} with {len(json_files)} files")
|
print(
|
||||||
|
f"Found WeChat export directory: {export_dir} with {len(json_files)} files"
|
||||||
|
)
|
||||||
|
|
||||||
print(f"Found {len(export_dirs)} WeChat export directories")
|
print(f"Found {len(export_dirs)} WeChat export directories")
|
||||||
return export_dirs
|
return export_dirs
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def export_chat_to_file(output_file: str = "wechat_chat_export.txt", max_count: int = 1000, export_dir: str = None, include_non_text: bool = False):
|
def export_chat_to_file(
|
||||||
|
output_file: str = "wechat_chat_export.txt",
|
||||||
|
max_count: int = 1000,
|
||||||
|
export_dir: str | None = None,
|
||||||
|
include_non_text: bool = False,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Export WeChat chat history to a text file.
|
Export WeChat chat history to a text file.
|
||||||
|
|
||||||
@@ -560,14 +606,14 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
try:
|
try:
|
||||||
json_files = list(Path(export_dir).glob("*.json"))
|
json_files = list(Path(export_dir).glob("*.json"))
|
||||||
|
|
||||||
with open(output_file, 'w', encoding='utf-8') as f:
|
with open(output_file, "w", encoding="utf-8") as f:
|
||||||
count = 0
|
count = 0
|
||||||
for json_file in json_files:
|
for json_file in json_files:
|
||||||
if count >= max_count and max_count > 0:
|
if count >= max_count and max_count > 0:
|
||||||
break
|
break
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(json_file, 'r', encoding='utf-8') as json_f:
|
with open(json_file, encoding="utf-8") as json_f:
|
||||||
chat_data = json.load(json_f)
|
chat_data = json.load(json_f)
|
||||||
|
|
||||||
contact_name = json_file.stem
|
contact_name = json_file.stem
|
||||||
@@ -577,10 +623,10 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
if count >= max_count and max_count > 0:
|
if count >= max_count and max_count > 0:
|
||||||
break
|
break
|
||||||
|
|
||||||
from_user = message.get('fromUser', '')
|
from_user = message.get("fromUser", "")
|
||||||
content = message.get('content', '')
|
content = message.get("content", "")
|
||||||
message_text = message.get('message', '')
|
message_text = message.get("message", "")
|
||||||
create_time = message.get('createTime', 0)
|
create_time = message.get("createTime", 0)
|
||||||
|
|
||||||
# Skip non-text messages unless requested
|
# Skip non-text messages unless requested
|
||||||
if not include_non_text:
|
if not include_non_text:
|
||||||
@@ -595,8 +641,8 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
if create_time:
|
if create_time:
|
||||||
try:
|
try:
|
||||||
timestamp = datetime.fromtimestamp(create_time)
|
timestamp = datetime.fromtimestamp(create_time)
|
||||||
time_str = timestamp.strftime('%Y-%m-%d %H:%M:%S')
|
time_str = timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
||||||
except:
|
except (ValueError, OSError):
|
||||||
time_str = str(create_time)
|
time_str = str(create_time)
|
||||||
else:
|
else:
|
||||||
time_str = "Unknown"
|
time_str = "Unknown"
|
||||||
@@ -613,7 +659,7 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error exporting WeChat chat history: {e}")
|
print(f"Error exporting WeChat chat history: {e}")
|
||||||
|
|
||||||
def export_wechat_chat_history(self, export_dir: str = "./wechat_export_direct") -> Optional[Path]:
|
def export_wechat_chat_history(self, export_dir: str = "./wechat_export_direct") -> Path | None:
|
||||||
"""
|
"""
|
||||||
Export WeChat chat history using wechat-exporter tool.
|
Export WeChat chat history using wechat-exporter tool.
|
||||||
|
|
||||||
@@ -642,16 +688,21 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
requirements_file = self.wechat_exporter_dir / "requirements.txt"
|
requirements_file = self.wechat_exporter_dir / "requirements.txt"
|
||||||
if requirements_file.exists():
|
if requirements_file.exists():
|
||||||
print("Installing wechat-exporter requirements...")
|
print("Installing wechat-exporter requirements...")
|
||||||
subprocess.run([
|
subprocess.run(["uv", "pip", "install", "-r", str(requirements_file)], check=True)
|
||||||
"uv", "pip", "install", "-r", str(requirements_file)
|
|
||||||
], check=True)
|
|
||||||
|
|
||||||
# Run the export command
|
# Run the export command
|
||||||
print("Running wechat-exporter...")
|
print("Running wechat-exporter...")
|
||||||
result = subprocess.run([
|
result = subprocess.run(
|
||||||
sys.executable, str(self.wechat_exporter_dir / "main.py"),
|
[
|
||||||
"export-all", str(export_path)
|
sys.executable,
|
||||||
], capture_output=True, text=True, check=True)
|
str(self.wechat_exporter_dir / "main.py"),
|
||||||
|
"export-all",
|
||||||
|
str(export_path),
|
||||||
|
],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
|
||||||
print("Export command output:")
|
print("Export command output:")
|
||||||
print(result.stdout)
|
print(result.stdout)
|
||||||
@@ -662,7 +713,9 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
# Check if export was successful
|
# Check if export was successful
|
||||||
if export_path.exists() and any(export_path.glob("*.json")):
|
if export_path.exists() and any(export_path.glob("*.json")):
|
||||||
json_files = list(export_path.glob("*.json"))
|
json_files = list(export_path.glob("*.json"))
|
||||||
print(f"Successfully exported {len(json_files)} chat history files to {export_path}")
|
print(
|
||||||
|
f"Successfully exported {len(json_files)} chat history files to {export_path}"
|
||||||
|
)
|
||||||
return export_path
|
return export_path
|
||||||
else:
|
else:
|
||||||
print("Export completed but no JSON files found")
|
print("Export completed but no JSON files found")
|
||||||
@@ -678,7 +731,7 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
print("Please ensure WeChat is running and WeChatTweak is installed.")
|
print("Please ensure WeChat is running and WeChatTweak is installed.")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def find_or_export_wechat_data(self, export_dir: str = "./wechat_export_direct") -> List[Path]:
|
def find_or_export_wechat_data(self, export_dir: str = "./wechat_export_direct") -> list[Path]:
|
||||||
"""
|
"""
|
||||||
Find existing WeChat exports or create new ones.
|
Find existing WeChat exports or create new ones.
|
||||||
|
|
||||||
@@ -697,7 +750,7 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
Path("./wechat_export"),
|
Path("./wechat_export"),
|
||||||
Path("./wechat_export_direct"),
|
Path("./wechat_export_direct"),
|
||||||
Path("./wechat_chat_history"),
|
Path("./wechat_chat_history"),
|
||||||
Path("./chat_export")
|
Path("./chat_export"),
|
||||||
]
|
]
|
||||||
|
|
||||||
for export_dir_path in possible_export_dirs:
|
for export_dir_path in possible_export_dirs:
|
||||||
@@ -714,6 +767,8 @@ Message: {readable_text if readable_text else message_text}
|
|||||||
if exported_path:
|
if exported_path:
|
||||||
export_dirs = [exported_path]
|
export_dirs = [exported_path]
|
||||||
else:
|
else:
|
||||||
print("Failed to export WeChat data. Please ensure WeChat is running and WeChatTweak is installed.")
|
print(
|
||||||
|
"Failed to export WeChat data. Please ensure WeChat is running and WeChatTweak is installed."
|
||||||
|
)
|
||||||
|
|
||||||
return export_dirs
|
return export_dirs
|
||||||
189
apps/wechat_rag.py
Normal file
189
apps/wechat_rag.py
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
"""
|
||||||
|
WeChat History RAG example using the unified interface.
|
||||||
|
Supports WeChat chat history export and search.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add parent directory to path for imports
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent))
|
||||||
|
|
||||||
|
from base_rag_example import BaseRAGExample
|
||||||
|
|
||||||
|
from .history_data.wechat_history import WeChatHistoryReader
|
||||||
|
|
||||||
|
|
||||||
|
class WeChatRAG(BaseRAGExample):
|
||||||
|
"""RAG example for WeChat chat history."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
# Set default values BEFORE calling super().__init__
|
||||||
|
self.max_items_default = -1 # Match original default
|
||||||
|
self.embedding_model_default = (
|
||||||
|
"sentence-transformers/all-MiniLM-L6-v2" # Fast 384-dim model
|
||||||
|
)
|
||||||
|
|
||||||
|
super().__init__(
|
||||||
|
name="WeChat History",
|
||||||
|
description="Process and query WeChat chat history with LEANN",
|
||||||
|
default_index_name="wechat_history_magic_test_11Debug_new",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _add_specific_arguments(self, parser):
|
||||||
|
"""Add WeChat-specific arguments."""
|
||||||
|
wechat_group = parser.add_argument_group("WeChat Parameters")
|
||||||
|
wechat_group.add_argument(
|
||||||
|
"--export-dir",
|
||||||
|
type=str,
|
||||||
|
default="./wechat_export",
|
||||||
|
help="Directory to store WeChat exports (default: ./wechat_export)",
|
||||||
|
)
|
||||||
|
wechat_group.add_argument(
|
||||||
|
"--force-export",
|
||||||
|
action="store_true",
|
||||||
|
help="Force re-export of WeChat data even if exports exist",
|
||||||
|
)
|
||||||
|
wechat_group.add_argument(
|
||||||
|
"--chunk-size", type=int, default=192, help="Text chunk size (default: 192)"
|
||||||
|
)
|
||||||
|
wechat_group.add_argument(
|
||||||
|
"--chunk-overlap", type=int, default=64, help="Text chunk overlap (default: 64)"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _export_wechat_data(self, export_dir: Path) -> bool:
|
||||||
|
"""Export WeChat data using wechattweak-cli."""
|
||||||
|
print("Exporting WeChat data...")
|
||||||
|
|
||||||
|
# Check if WeChat is running
|
||||||
|
try:
|
||||||
|
result = subprocess.run(["pgrep", "WeChat"], capture_output=True, text=True)
|
||||||
|
if result.returncode != 0:
|
||||||
|
print("WeChat is not running. Please start WeChat first.")
|
||||||
|
return False
|
||||||
|
except Exception:
|
||||||
|
pass # pgrep might not be available on all systems
|
||||||
|
|
||||||
|
# Create export directory
|
||||||
|
export_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Run export command
|
||||||
|
cmd = ["packages/wechat-exporter/wechattweak-cli", "export", str(export_dir)]
|
||||||
|
|
||||||
|
try:
|
||||||
|
print(f"Running: {' '.join(cmd)}")
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||||
|
|
||||||
|
if result.returncode == 0:
|
||||||
|
print("WeChat data exported successfully!")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
print(f"Export failed: {result.stderr}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
except FileNotFoundError:
|
||||||
|
print("\nError: wechattweak-cli not found!")
|
||||||
|
print("Please install it first:")
|
||||||
|
print(" sudo packages/wechat-exporter/wechattweak-cli install")
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Export error: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def load_data(self, args) -> list[str]:
|
||||||
|
"""Load WeChat history and convert to text chunks."""
|
||||||
|
# Initialize WeChat reader with export capabilities
|
||||||
|
reader = WeChatHistoryReader()
|
||||||
|
|
||||||
|
# Find existing exports or create new ones using the centralized method
|
||||||
|
export_dirs = reader.find_or_export_wechat_data(args.export_dir)
|
||||||
|
if not export_dirs:
|
||||||
|
print("Failed to find or export WeChat data. Trying to find any existing exports...")
|
||||||
|
# Try to find any existing exports in common locations
|
||||||
|
export_dirs = reader.find_wechat_export_dirs()
|
||||||
|
if not export_dirs:
|
||||||
|
print("No WeChat data found. Please ensure WeChat exports exist.")
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Load documents from all found export directories
|
||||||
|
all_documents = []
|
||||||
|
total_processed = 0
|
||||||
|
|
||||||
|
for i, export_dir in enumerate(export_dirs):
|
||||||
|
print(f"\nProcessing WeChat export {i + 1}/{len(export_dirs)}: {export_dir}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Apply max_items limit per export
|
||||||
|
max_per_export = -1
|
||||||
|
if args.max_items > 0:
|
||||||
|
remaining = args.max_items - total_processed
|
||||||
|
if remaining <= 0:
|
||||||
|
break
|
||||||
|
max_per_export = remaining
|
||||||
|
|
||||||
|
documents = reader.load_data(
|
||||||
|
wechat_export_dir=str(export_dir),
|
||||||
|
max_count=max_per_export,
|
||||||
|
concatenate_messages=True, # Enable message concatenation for better context
|
||||||
|
)
|
||||||
|
|
||||||
|
if documents:
|
||||||
|
print(f"Loaded {len(documents)} chat documents from {export_dir}")
|
||||||
|
all_documents.extend(documents)
|
||||||
|
total_processed += len(documents)
|
||||||
|
else:
|
||||||
|
print(f"No documents loaded from {export_dir}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error processing {export_dir}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not all_documents:
|
||||||
|
print("No documents loaded from any source. Exiting.")
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"\nTotal loaded {len(all_documents)} chat documents from {len(export_dirs)} exports")
|
||||||
|
print("now starting to split into text chunks ... take some time")
|
||||||
|
|
||||||
|
# Convert to text chunks with contact information
|
||||||
|
all_texts = []
|
||||||
|
for doc in all_documents:
|
||||||
|
# Split the document into chunks
|
||||||
|
from llama_index.core.node_parser import SentenceSplitter
|
||||||
|
|
||||||
|
text_splitter = SentenceSplitter(
|
||||||
|
chunk_size=args.chunk_size, chunk_overlap=args.chunk_overlap
|
||||||
|
)
|
||||||
|
nodes = text_splitter.get_nodes_from_documents([doc])
|
||||||
|
|
||||||
|
for node in nodes:
|
||||||
|
# Add contact information to each chunk
|
||||||
|
contact_name = doc.metadata.get("contact_name", "Unknown")
|
||||||
|
text = f"[Contact] means the message is from: {contact_name}\n" + node.get_content()
|
||||||
|
all_texts.append(text)
|
||||||
|
|
||||||
|
print(f"Created {len(all_texts)} text chunks from {len(all_documents)} documents")
|
||||||
|
return all_texts
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
# Check platform
|
||||||
|
if sys.platform != "darwin":
|
||||||
|
print("\n⚠️ Warning: WeChat export is only supported on macOS")
|
||||||
|
print(" You can still query existing exports on other platforms\n")
|
||||||
|
|
||||||
|
# Example queries for WeChat RAG
|
||||||
|
print("\n💬 WeChat History RAG Example")
|
||||||
|
print("=" * 50)
|
||||||
|
print("\nExample queries you can try:")
|
||||||
|
print("- 'Show me conversations about travel plans'")
|
||||||
|
print("- 'Find group chats about weekend activities'")
|
||||||
|
print("- '我想买魔术师约翰逊的球衣,给我一些对应聊天记录?'")
|
||||||
|
print("- 'What did we discuss about the project last month?'")
|
||||||
|
print("\nNote: WeChat must be running for export to work\n")
|
||||||
|
|
||||||
|
rag = WeChatRAG()
|
||||||
|
asyncio.run(rag.run())
|
||||||
BIN
assets/claude_code_leann.png
Normal file
BIN
assets/claude_code_leann.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 73 KiB |
BIN
assets/mcp_leann.png
Normal file
BIN
assets/mcp_leann.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 224 KiB |
BIN
assets/wechat_user_group.JPG
Normal file
BIN
assets/wechat_user_group.JPG
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 152 KiB |
@@ -1,9 +1,24 @@
|
|||||||
# 🧪 Leann Sanity Checks
|
# 🧪 LEANN Benchmarks & Testing
|
||||||
|
|
||||||
This directory contains comprehensive sanity checks for the Leann system, ensuring all components work correctly across different configurations.
|
This directory contains performance benchmarks and comprehensive tests for the LEANN system, including backend comparisons and sanity checks across different configurations.
|
||||||
|
|
||||||
## 📁 Test Files
|
## 📁 Test Files
|
||||||
|
|
||||||
|
### `diskann_vs_hnsw_speed_comparison.py`
|
||||||
|
Performance comparison between DiskANN and HNSW backends:
|
||||||
|
- ✅ **Search latency** comparison with both backends using recompute
|
||||||
|
- ✅ **Index size** and **build time** measurements
|
||||||
|
- ✅ **Score validity** testing (ensures no -inf scores)
|
||||||
|
- ✅ **Configurable dataset sizes** for different scales
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Quick comparison with 500 docs, 10 queries
|
||||||
|
python benchmarks/diskann_vs_hnsw_speed_comparison.py
|
||||||
|
|
||||||
|
# Large-scale comparison with 2000 docs, 20 queries
|
||||||
|
python benchmarks/diskann_vs_hnsw_speed_comparison.py 2000 20
|
||||||
|
```
|
||||||
|
|
||||||
### `test_distance_functions.py`
|
### `test_distance_functions.py`
|
||||||
Tests all supported distance functions across DiskANN backend:
|
Tests all supported distance functions across DiskANN backend:
|
||||||
- ✅ **MIPS** (Maximum Inner Product Search)
|
- ✅ **MIPS** (Maximum Inner Product Search)
|
||||||
@@ -1,10 +1,11 @@
|
|||||||
import time
|
import time
|
||||||
import numpy as np
|
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import torch
|
|
||||||
from sentence_transformers import SentenceTransformer
|
|
||||||
import mlx.core as mx
|
import mlx.core as mx
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
from mlx_lm import load
|
from mlx_lm import load
|
||||||
|
from sentence_transformers import SentenceTransformer
|
||||||
|
|
||||||
# --- Configuration ---
|
# --- Configuration ---
|
||||||
MODEL_NAME_TORCH = "Qwen/Qwen3-Embedding-0.6B"
|
MODEL_NAME_TORCH = "Qwen/Qwen3-Embedding-0.6B"
|
||||||
@@ -18,12 +19,14 @@ DUMMY_SENTENCES = ["This is a test sentence for benchmarking." * 5] * max(BATCH_
|
|||||||
|
|
||||||
# --- Benchmark Functions ---b
|
# --- Benchmark Functions ---b
|
||||||
|
|
||||||
|
|
||||||
def benchmark_torch(model, sentences):
|
def benchmark_torch(model, sentences):
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
model.encode(sentences, convert_to_numpy=True)
|
model.encode(sentences, convert_to_numpy=True)
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
return (end_time - start_time) * 1000 # Return time in ms
|
return (end_time - start_time) * 1000 # Return time in ms
|
||||||
|
|
||||||
|
|
||||||
def benchmark_mlx(model, tokenizer, sentences):
|
def benchmark_mlx(model, tokenizer, sentences):
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
@@ -63,6 +66,7 @@ def benchmark_mlx(model, tokenizer, sentences):
|
|||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
return (end_time - start_time) * 1000 # Return time in ms
|
return (end_time - start_time) * 1000 # Return time in ms
|
||||||
|
|
||||||
|
|
||||||
# --- Main Execution ---
|
# --- Main Execution ---
|
||||||
def main():
|
def main():
|
||||||
print("--- Initializing Models ---")
|
print("--- Initializing Models ---")
|
||||||
@@ -98,7 +102,9 @@ def main():
|
|||||||
results_torch.append(np.mean(torch_times))
|
results_torch.append(np.mean(torch_times))
|
||||||
|
|
||||||
# Benchmark MLX
|
# Benchmark MLX
|
||||||
mlx_times = [benchmark_mlx(model_mlx, tokenizer_mlx, sentences_batch) for _ in range(NUM_RUNS)]
|
mlx_times = [
|
||||||
|
benchmark_mlx(model_mlx, tokenizer_mlx, sentences_batch) for _ in range(NUM_RUNS)
|
||||||
|
]
|
||||||
results_mlx.append(np.mean(mlx_times))
|
results_mlx.append(np.mean(mlx_times))
|
||||||
|
|
||||||
print("\n--- Benchmark Results (Average time per batch in ms) ---")
|
print("\n--- Benchmark Results (Average time per batch in ms) ---")
|
||||||
@@ -109,10 +115,16 @@ def main():
|
|||||||
# --- Plotting ---
|
# --- Plotting ---
|
||||||
print("\n--- Generating Plot ---")
|
print("\n--- Generating Plot ---")
|
||||||
plt.figure(figsize=(10, 6))
|
plt.figure(figsize=(10, 6))
|
||||||
plt.plot(BATCH_SIZES, results_torch, marker='o', linestyle='-', label=f'PyTorch ({device})')
|
plt.plot(
|
||||||
plt.plot(BATCH_SIZES, results_mlx, marker='s', linestyle='-', label='MLX')
|
BATCH_SIZES,
|
||||||
|
results_torch,
|
||||||
|
marker="o",
|
||||||
|
linestyle="-",
|
||||||
|
label=f"PyTorch ({device})",
|
||||||
|
)
|
||||||
|
plt.plot(BATCH_SIZES, results_mlx, marker="s", linestyle="-", label="MLX")
|
||||||
|
|
||||||
plt.title(f'Embedding Performance: MLX vs PyTorch\nModel: {MODEL_NAME_TORCH}')
|
plt.title(f"Embedding Performance: MLX vs PyTorch\nModel: {MODEL_NAME_TORCH}")
|
||||||
plt.xlabel("Batch Size")
|
plt.xlabel("Batch Size")
|
||||||
plt.ylabel("Average Time per Batch (ms)")
|
plt.ylabel("Average Time per Batch (ms)")
|
||||||
plt.xticks(BATCH_SIZES)
|
plt.xticks(BATCH_SIZES)
|
||||||
@@ -124,5 +136,6 @@ def main():
|
|||||||
plt.savefig(output_filename)
|
plt.savefig(output_filename)
|
||||||
print(f"Plot saved to {output_filename}")
|
print(f"Plot saved to {output_filename}")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
148
benchmarks/benchmark_no_recompute.py
Normal file
148
benchmarks/benchmark_no_recompute.py
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from leann import LeannBuilder, LeannSearcher
|
||||||
|
|
||||||
|
|
||||||
|
def _meta_exists(index_path: str) -> bool:
|
||||||
|
p = Path(index_path)
|
||||||
|
return (p.parent / f"{p.stem}.meta.json").exists()
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_index(index_path: str, backend_name: str, num_docs: int, is_recompute: bool) -> None:
|
||||||
|
# if _meta_exists(index_path):
|
||||||
|
# return
|
||||||
|
kwargs = {}
|
||||||
|
if backend_name == "hnsw":
|
||||||
|
kwargs["is_compact"] = is_recompute
|
||||||
|
builder = LeannBuilder(
|
||||||
|
backend_name=backend_name,
|
||||||
|
embedding_model=os.getenv("LEANN_EMBED_MODEL", "facebook/contriever"),
|
||||||
|
embedding_mode=os.getenv("LEANN_EMBED_MODE", "sentence-transformers"),
|
||||||
|
graph_degree=32,
|
||||||
|
complexity=64,
|
||||||
|
is_recompute=is_recompute,
|
||||||
|
num_threads=4,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
for i in range(num_docs):
|
||||||
|
builder.add_text(
|
||||||
|
f"This is a test document number {i}. It contains some repeated text for benchmarking."
|
||||||
|
)
|
||||||
|
builder.build_index(index_path)
|
||||||
|
|
||||||
|
|
||||||
|
def _bench_group(
|
||||||
|
index_path: str,
|
||||||
|
recompute: bool,
|
||||||
|
query: str,
|
||||||
|
repeats: int,
|
||||||
|
complexity: int = 32,
|
||||||
|
top_k: int = 10,
|
||||||
|
) -> float:
|
||||||
|
# Independent searcher per group; fixed port when recompute
|
||||||
|
searcher = LeannSearcher(index_path=index_path)
|
||||||
|
|
||||||
|
# Warm-up once
|
||||||
|
_ = searcher.search(
|
||||||
|
query,
|
||||||
|
top_k=top_k,
|
||||||
|
complexity=complexity,
|
||||||
|
recompute_embeddings=recompute,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _once() -> float:
|
||||||
|
t0 = time.time()
|
||||||
|
_ = searcher.search(
|
||||||
|
query,
|
||||||
|
top_k=top_k,
|
||||||
|
complexity=complexity,
|
||||||
|
recompute_embeddings=recompute,
|
||||||
|
)
|
||||||
|
return time.time() - t0
|
||||||
|
|
||||||
|
if repeats <= 1:
|
||||||
|
t = _once()
|
||||||
|
else:
|
||||||
|
vals = [_once() for _ in range(repeats)]
|
||||||
|
vals.sort()
|
||||||
|
t = vals[len(vals) // 2]
|
||||||
|
|
||||||
|
searcher.cleanup()
|
||||||
|
return t
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("--num-docs", type=int, default=5000)
|
||||||
|
parser.add_argument("--repeats", type=int, default=3)
|
||||||
|
parser.add_argument("--complexity", type=int, default=32)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
base = Path.cwd() / ".leann" / "indexes" / f"bench_n{args.num_docs}"
|
||||||
|
base.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
# ---------- Build HNSW variants ----------
|
||||||
|
hnsw_r = str(base / f"hnsw_recompute_n{args.num_docs}.leann")
|
||||||
|
hnsw_nr = str(base / f"hnsw_norecompute_n{args.num_docs}.leann")
|
||||||
|
ensure_index(hnsw_r, "hnsw", args.num_docs, True)
|
||||||
|
ensure_index(hnsw_nr, "hnsw", args.num_docs, False)
|
||||||
|
|
||||||
|
# ---------- Build DiskANN variants ----------
|
||||||
|
diskann_r = str(base / "diskann_r.leann")
|
||||||
|
diskann_nr = str(base / "diskann_nr.leann")
|
||||||
|
ensure_index(diskann_r, "diskann", args.num_docs, True)
|
||||||
|
ensure_index(diskann_nr, "diskann", args.num_docs, False)
|
||||||
|
|
||||||
|
# ---------- Helpers ----------
|
||||||
|
def _size_for(prefix: str) -> int:
|
||||||
|
p = Path(prefix)
|
||||||
|
base_dir = p.parent
|
||||||
|
stem = p.stem
|
||||||
|
total = 0
|
||||||
|
for f in base_dir.iterdir():
|
||||||
|
if f.is_file() and f.name.startswith(stem):
|
||||||
|
total += f.stat().st_size
|
||||||
|
return total
|
||||||
|
|
||||||
|
# ---------- HNSW benchmark ----------
|
||||||
|
t_hnsw_r = _bench_group(
|
||||||
|
hnsw_r, True, "test document number 42", repeats=args.repeats, complexity=args.complexity
|
||||||
|
)
|
||||||
|
t_hnsw_nr = _bench_group(
|
||||||
|
hnsw_nr, False, "test document number 42", repeats=args.repeats, complexity=args.complexity
|
||||||
|
)
|
||||||
|
size_hnsw_r = _size_for(hnsw_r)
|
||||||
|
size_hnsw_nr = _size_for(hnsw_nr)
|
||||||
|
|
||||||
|
print("Benchmark results (HNSW):")
|
||||||
|
print(f" recompute=True: search_time={t_hnsw_r:.3f}s, size={size_hnsw_r / 1024 / 1024:.1f}MB")
|
||||||
|
print(
|
||||||
|
f" recompute=False: search_time={t_hnsw_nr:.3f}s, size={size_hnsw_nr / 1024 / 1024:.1f}MB"
|
||||||
|
)
|
||||||
|
print(" Expectation: no-recompute should be faster but larger on disk.")
|
||||||
|
|
||||||
|
# ---------- DiskANN benchmark ----------
|
||||||
|
t_diskann_r = _bench_group(
|
||||||
|
diskann_r, True, "DiskANN R test doc 123", repeats=args.repeats, complexity=args.complexity
|
||||||
|
)
|
||||||
|
t_diskann_nr = _bench_group(
|
||||||
|
diskann_nr,
|
||||||
|
False,
|
||||||
|
"DiskANN NR test doc 123",
|
||||||
|
repeats=args.repeats,
|
||||||
|
complexity=args.complexity,
|
||||||
|
)
|
||||||
|
size_diskann_r = _size_for(diskann_r)
|
||||||
|
size_diskann_nr = _size_for(diskann_nr)
|
||||||
|
|
||||||
|
print("\nBenchmark results (DiskANN):")
|
||||||
|
print(f" build(recompute=True, partition): size={size_diskann_r / 1024 / 1024:.1f}MB")
|
||||||
|
print(f" build(recompute=False): size={size_diskann_nr / 1024 / 1024:.1f}MB")
|
||||||
|
print(f" search recompute=True (final rerank): {t_diskann_r:.3f}s")
|
||||||
|
print(f" search recompute=False (PQ only): {t_diskann_nr:.3f}s")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -3,14 +3,15 @@
|
|||||||
Memory comparison between Faiss HNSW and LEANN HNSW backend
|
Memory comparison between Faiss HNSW and LEANN HNSW backend
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import gc
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import psutil
|
|
||||||
import gc
|
|
||||||
import subprocess
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
import psutil
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
from llama_index.core.node_parser import SentenceSplitter
|
||||||
|
|
||||||
# Setup logging
|
# Setup logging
|
||||||
@@ -61,7 +62,7 @@ def test_faiss_hnsw():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
result = subprocess.run(
|
result = subprocess.run(
|
||||||
[sys.executable, "examples/faiss_only.py"],
|
[sys.executable, "benchmarks/faiss_only.py"],
|
||||||
capture_output=True,
|
capture_output=True,
|
||||||
text=True,
|
text=True,
|
||||||
timeout=300,
|
timeout=300,
|
||||||
@@ -83,9 +84,7 @@ def test_faiss_hnsw():
|
|||||||
|
|
||||||
for line in lines:
|
for line in lines:
|
||||||
if "Peak Memory:" in line:
|
if "Peak Memory:" in line:
|
||||||
peak_memory = float(
|
peak_memory = float(line.split("Peak Memory:")[1].split("MB")[0].strip())
|
||||||
line.split("Peak Memory:")[1].split("MB")[0].strip()
|
|
||||||
)
|
|
||||||
|
|
||||||
return {"peak_memory": peak_memory}
|
return {"peak_memory": peak_memory}
|
||||||
|
|
||||||
@@ -111,13 +110,12 @@ def test_leann_hnsw():
|
|||||||
|
|
||||||
tracker.checkpoint("After imports")
|
tracker.checkpoint("After imports")
|
||||||
|
|
||||||
|
from leann.api import LeannBuilder
|
||||||
from llama_index.core import SimpleDirectoryReader
|
from llama_index.core import SimpleDirectoryReader
|
||||||
from leann.api import LeannBuilder, LeannSearcher
|
|
||||||
|
|
||||||
|
|
||||||
# Load and parse documents
|
# Load and parse documents
|
||||||
documents = SimpleDirectoryReader(
|
documents = SimpleDirectoryReader(
|
||||||
"examples/data",
|
"data",
|
||||||
recursive=True,
|
recursive=True,
|
||||||
encoding="utf-8",
|
encoding="utf-8",
|
||||||
required_exts=[".pdf", ".txt", ".md"],
|
required_exts=[".pdf", ".txt", ".md"],
|
||||||
@@ -202,11 +200,9 @@ def test_leann_hnsw():
|
|||||||
searcher = LeannSearcher(index_path)
|
searcher = LeannSearcher(index_path)
|
||||||
tracker.checkpoint("After searcher loading")
|
tracker.checkpoint("After searcher loading")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
print("Running search queries...")
|
print("Running search queries...")
|
||||||
queries = [
|
queries = [
|
||||||
"什么是盘古大模型以及盘古开发过程中遇到了什么阴暗面,任务令一般在什么城市颁发",
|
"什么是盘古大模型以及盘古开发过程中遇到了什么阴暗面,任务令一般在什么城市颁发",
|
||||||
"What is LEANN and how does it work?",
|
"What is LEANN and how does it work?",
|
||||||
"华为诺亚方舟实验室的主要研究内容",
|
"华为诺亚方舟实验室的主要研究内容",
|
||||||
]
|
]
|
||||||
@@ -304,21 +300,15 @@ def main():
|
|||||||
|
|
||||||
print("\nLEANN vs Faiss Performance:")
|
print("\nLEANN vs Faiss Performance:")
|
||||||
memory_saving = faiss_results["peak_memory"] - leann_results["peak_memory"]
|
memory_saving = faiss_results["peak_memory"] - leann_results["peak_memory"]
|
||||||
print(
|
print(f" Search Memory: {memory_ratio:.1f}x less ({memory_saving:.1f} MB saved)")
|
||||||
f" Search Memory: {memory_ratio:.1f}x less ({memory_saving:.1f} MB saved)"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Storage comparison
|
# Storage comparison
|
||||||
if leann_storage_size > faiss_storage_size:
|
if leann_storage_size > faiss_storage_size:
|
||||||
storage_ratio = leann_storage_size / faiss_storage_size
|
storage_ratio = leann_storage_size / faiss_storage_size
|
||||||
print(
|
print(f" Storage Size: {storage_ratio:.1f}x larger (LEANN uses more storage)")
|
||||||
f" Storage Size: {storage_ratio:.1f}x larger (LEANN uses more storage)"
|
|
||||||
)
|
|
||||||
elif faiss_storage_size > leann_storage_size:
|
elif faiss_storage_size > leann_storage_size:
|
||||||
storage_ratio = faiss_storage_size / leann_storage_size
|
storage_ratio = faiss_storage_size / leann_storage_size
|
||||||
print(
|
print(f" Storage Size: {storage_ratio:.1f}x smaller (LEANN uses less storage)")
|
||||||
f" Storage Size: {storage_ratio:.1f}x smaller (LEANN uses less storage)"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
print(" Storage Size: similar")
|
print(" Storage Size: similar")
|
||||||
else:
|
else:
|
||||||
0
data/README.md → benchmarks/data/README.md
Normal file → Executable file
0
data/README.md → benchmarks/data/README.md
Normal file → Executable file
286
benchmarks/diskann_vs_hnsw_speed_comparison.py
Normal file
286
benchmarks/diskann_vs_hnsw_speed_comparison.py
Normal file
@@ -0,0 +1,286 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
DiskANN vs HNSW Search Performance Comparison
|
||||||
|
|
||||||
|
This benchmark compares search performance between DiskANN and HNSW backends:
|
||||||
|
- DiskANN: With graph partitioning enabled (is_recompute=True)
|
||||||
|
- HNSW: With recompute enabled (is_recompute=True)
|
||||||
|
- Tests performance across different dataset sizes
|
||||||
|
- Measures search latency, recall, and index size
|
||||||
|
"""
|
||||||
|
|
||||||
|
import gc
|
||||||
|
import multiprocessing as mp
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
# Prefer 'fork' start method to avoid POSIX semaphore leaks on macOS
|
||||||
|
try:
|
||||||
|
mp.set_start_method("fork", force=True)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def create_test_texts(n_docs: int) -> list[str]:
|
||||||
|
"""Create synthetic test documents for benchmarking."""
|
||||||
|
np.random.seed(42)
|
||||||
|
topics = [
|
||||||
|
"machine learning and artificial intelligence",
|
||||||
|
"natural language processing and text analysis",
|
||||||
|
"computer vision and image recognition",
|
||||||
|
"data science and statistical analysis",
|
||||||
|
"deep learning and neural networks",
|
||||||
|
"information retrieval and search engines",
|
||||||
|
"database systems and data management",
|
||||||
|
"software engineering and programming",
|
||||||
|
"cybersecurity and network protection",
|
||||||
|
"cloud computing and distributed systems",
|
||||||
|
]
|
||||||
|
|
||||||
|
texts = []
|
||||||
|
for i in range(n_docs):
|
||||||
|
topic = topics[i % len(topics)]
|
||||||
|
variation = np.random.randint(1, 100)
|
||||||
|
text = (
|
||||||
|
f"This is document {i} about {topic}. Content variation {variation}. "
|
||||||
|
f"Additional information about {topic} with details and examples. "
|
||||||
|
f"Technical discussion of {topic} including implementation aspects."
|
||||||
|
)
|
||||||
|
texts.append(text)
|
||||||
|
|
||||||
|
return texts
|
||||||
|
|
||||||
|
|
||||||
|
def benchmark_backend(
|
||||||
|
backend_name: str, texts: list[str], test_queries: list[str], backend_kwargs: dict[str, Any]
|
||||||
|
) -> dict[str, float]:
|
||||||
|
"""Benchmark a specific backend with the given configuration."""
|
||||||
|
from leann.api import LeannBuilder, LeannSearcher
|
||||||
|
|
||||||
|
print(f"\n🔧 Testing {backend_name.upper()} backend...")
|
||||||
|
|
||||||
|
with tempfile.TemporaryDirectory() as temp_dir:
|
||||||
|
index_path = str(Path(temp_dir) / f"benchmark_{backend_name}.leann")
|
||||||
|
|
||||||
|
# Build index
|
||||||
|
print(f"📦 Building {backend_name} index with {len(texts)} documents...")
|
||||||
|
start_time = time.time()
|
||||||
|
|
||||||
|
builder = LeannBuilder(
|
||||||
|
backend_name=backend_name,
|
||||||
|
embedding_model="facebook/contriever",
|
||||||
|
embedding_mode="sentence-transformers",
|
||||||
|
**backend_kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
for text in texts:
|
||||||
|
builder.add_text(text)
|
||||||
|
|
||||||
|
builder.build_index(index_path)
|
||||||
|
build_time = time.time() - start_time
|
||||||
|
|
||||||
|
# Measure index size
|
||||||
|
index_dir = Path(index_path).parent
|
||||||
|
index_files = list(index_dir.glob(f"{Path(index_path).stem}.*"))
|
||||||
|
total_size = sum(f.stat().st_size for f in index_files if f.is_file())
|
||||||
|
size_mb = total_size / (1024 * 1024)
|
||||||
|
|
||||||
|
print(f" ✅ Build completed in {build_time:.2f}s, index size: {size_mb:.1f}MB")
|
||||||
|
|
||||||
|
# Search benchmark
|
||||||
|
print("🔍 Running search benchmark...")
|
||||||
|
searcher = LeannSearcher(index_path)
|
||||||
|
|
||||||
|
search_times = []
|
||||||
|
all_results = []
|
||||||
|
|
||||||
|
for query in test_queries:
|
||||||
|
start_time = time.time()
|
||||||
|
results = searcher.search(query, top_k=5)
|
||||||
|
search_time = time.time() - start_time
|
||||||
|
search_times.append(search_time)
|
||||||
|
all_results.append(results)
|
||||||
|
|
||||||
|
avg_search_time = np.mean(search_times) * 1000 # Convert to ms
|
||||||
|
print(f" ✅ Average search time: {avg_search_time:.1f}ms")
|
||||||
|
|
||||||
|
# Check for valid scores (detect -inf issues)
|
||||||
|
all_scores = [
|
||||||
|
result.score
|
||||||
|
for results in all_results
|
||||||
|
for result in results
|
||||||
|
if result.score is not None
|
||||||
|
]
|
||||||
|
valid_scores = [
|
||||||
|
score for score in all_scores if score != float("-inf") and score != float("inf")
|
||||||
|
]
|
||||||
|
score_validity_rate = len(valid_scores) / len(all_scores) if all_scores else 0
|
||||||
|
|
||||||
|
# Clean up (ensure embedding server shutdown and object GC)
|
||||||
|
try:
|
||||||
|
if hasattr(searcher, "cleanup"):
|
||||||
|
searcher.cleanup()
|
||||||
|
del searcher
|
||||||
|
del builder
|
||||||
|
gc.collect()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"⚠️ Warning: Resource cleanup error: {e}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"build_time": build_time,
|
||||||
|
"avg_search_time_ms": avg_search_time,
|
||||||
|
"index_size_mb": size_mb,
|
||||||
|
"score_validity_rate": score_validity_rate,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def run_comparison(n_docs: int = 500, n_queries: int = 10):
|
||||||
|
"""Run performance comparison between DiskANN and HNSW."""
|
||||||
|
print("🚀 Starting DiskANN vs HNSW Performance Comparison")
|
||||||
|
print(f"📊 Dataset: {n_docs} documents, {n_queries} test queries")
|
||||||
|
|
||||||
|
# Create test data
|
||||||
|
texts = create_test_texts(n_docs)
|
||||||
|
test_queries = [
|
||||||
|
"machine learning algorithms",
|
||||||
|
"natural language processing",
|
||||||
|
"computer vision techniques",
|
||||||
|
"data analysis methods",
|
||||||
|
"neural network architectures",
|
||||||
|
"database query optimization",
|
||||||
|
"software development practices",
|
||||||
|
"security vulnerabilities",
|
||||||
|
"cloud infrastructure",
|
||||||
|
"distributed computing",
|
||||||
|
][:n_queries]
|
||||||
|
|
||||||
|
# HNSW benchmark
|
||||||
|
hnsw_results = benchmark_backend(
|
||||||
|
backend_name="hnsw",
|
||||||
|
texts=texts,
|
||||||
|
test_queries=test_queries,
|
||||||
|
backend_kwargs={
|
||||||
|
"is_recompute": True, # Enable recompute for fair comparison
|
||||||
|
"M": 16,
|
||||||
|
"efConstruction": 200,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# DiskANN benchmark
|
||||||
|
diskann_results = benchmark_backend(
|
||||||
|
backend_name="diskann",
|
||||||
|
texts=texts,
|
||||||
|
test_queries=test_queries,
|
||||||
|
backend_kwargs={
|
||||||
|
"is_recompute": True, # Enable graph partitioning
|
||||||
|
"num_neighbors": 32,
|
||||||
|
"search_list_size": 50,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Performance comparison
|
||||||
|
print("\n📈 Performance Comparison Results")
|
||||||
|
print(f"{'=' * 60}")
|
||||||
|
print(f"{'Metric':<25} {'HNSW':<15} {'DiskANN':<15} {'Speedup':<10}")
|
||||||
|
print(f"{'-' * 60}")
|
||||||
|
|
||||||
|
# Build time comparison
|
||||||
|
build_speedup = hnsw_results["build_time"] / diskann_results["build_time"]
|
||||||
|
print(
|
||||||
|
f"{'Build Time (s)':<25} {hnsw_results['build_time']:<15.2f} {diskann_results['build_time']:<15.2f} {build_speedup:<10.2f}x"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Search time comparison
|
||||||
|
search_speedup = hnsw_results["avg_search_time_ms"] / diskann_results["avg_search_time_ms"]
|
||||||
|
print(
|
||||||
|
f"{'Search Time (ms)':<25} {hnsw_results['avg_search_time_ms']:<15.1f} {diskann_results['avg_search_time_ms']:<15.1f} {search_speedup:<10.2f}x"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Index size comparison
|
||||||
|
size_ratio = diskann_results["index_size_mb"] / hnsw_results["index_size_mb"]
|
||||||
|
print(
|
||||||
|
f"{'Index Size (MB)':<25} {hnsw_results['index_size_mb']:<15.1f} {diskann_results['index_size_mb']:<15.1f} {size_ratio:<10.2f}x"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Score validity
|
||||||
|
print(
|
||||||
|
f"{'Score Validity (%)':<25} {hnsw_results['score_validity_rate'] * 100:<15.1f} {diskann_results['score_validity_rate'] * 100:<15.1f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"{'=' * 60}")
|
||||||
|
print("\n🎯 Summary:")
|
||||||
|
if search_speedup > 1:
|
||||||
|
print(f" DiskANN is {search_speedup:.2f}x faster than HNSW for search")
|
||||||
|
else:
|
||||||
|
print(f" HNSW is {1 / search_speedup:.2f}x faster than DiskANN for search")
|
||||||
|
|
||||||
|
if size_ratio > 1:
|
||||||
|
print(f" DiskANN uses {size_ratio:.2f}x more storage than HNSW")
|
||||||
|
else:
|
||||||
|
print(f" DiskANN uses {1 / size_ratio:.2f}x less storage than HNSW")
|
||||||
|
|
||||||
|
print(
|
||||||
|
f" Both backends achieved {min(hnsw_results['score_validity_rate'], diskann_results['score_validity_rate']) * 100:.1f}% score validity"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Handle help request
|
||||||
|
if len(sys.argv) > 1 and sys.argv[1] in ["-h", "--help", "help"]:
|
||||||
|
print("DiskANN vs HNSW Performance Comparison")
|
||||||
|
print("=" * 50)
|
||||||
|
print(f"Usage: python {sys.argv[0]} [n_docs] [n_queries]")
|
||||||
|
print()
|
||||||
|
print("Arguments:")
|
||||||
|
print(" n_docs Number of documents to index (default: 500)")
|
||||||
|
print(" n_queries Number of test queries to run (default: 10)")
|
||||||
|
print()
|
||||||
|
print("Examples:")
|
||||||
|
print(" python benchmarks/diskann_vs_hnsw_speed_comparison.py")
|
||||||
|
print(" python benchmarks/diskann_vs_hnsw_speed_comparison.py 1000")
|
||||||
|
print(" python benchmarks/diskann_vs_hnsw_speed_comparison.py 2000 20")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
# Parse command line arguments
|
||||||
|
n_docs = int(sys.argv[1]) if len(sys.argv) > 1 else 500
|
||||||
|
n_queries = int(sys.argv[2]) if len(sys.argv) > 2 else 10
|
||||||
|
|
||||||
|
print("DiskANN vs HNSW Performance Comparison")
|
||||||
|
print("=" * 50)
|
||||||
|
print(f"Dataset: {n_docs} documents, {n_queries} queries")
|
||||||
|
print()
|
||||||
|
|
||||||
|
run_comparison(n_docs=n_docs, n_queries=n_queries)
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\n⚠️ Benchmark interrupted by user")
|
||||||
|
sys.exit(130)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"\n❌ Benchmark failed: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
finally:
|
||||||
|
# Ensure clean exit (forceful to prevent rare hangs from atexit/threads)
|
||||||
|
try:
|
||||||
|
gc.collect()
|
||||||
|
print("\n🧹 Cleanup completed")
|
||||||
|
# Flush stdio to ensure message is visible before hard-exit
|
||||||
|
try:
|
||||||
|
import sys as _sys
|
||||||
|
|
||||||
|
_sys.stdout.flush()
|
||||||
|
_sys.stderr.flush()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
# Use os._exit to bypass atexit handlers that may hang in rare cases
|
||||||
|
import os as _os
|
||||||
|
|
||||||
|
_os._exit(0)
|
||||||
@@ -1,11 +1,11 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
"""Test only Faiss HNSW"""
|
"""Test only Faiss HNSW"""
|
||||||
|
|
||||||
|
import os
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
||||||
import psutil
|
import psutil
|
||||||
import gc
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
def get_memory_usage():
|
def get_memory_usage():
|
||||||
@@ -37,20 +37,20 @@ def main():
|
|||||||
import faiss
|
import faiss
|
||||||
except ImportError:
|
except ImportError:
|
||||||
print("Faiss is not installed.")
|
print("Faiss is not installed.")
|
||||||
print("Please install it with `uv pip install faiss-cpu` and you can then run this script again")
|
print(
|
||||||
|
"Please install it with `uv pip install faiss-cpu` and you can then run this script again"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
from llama_index.core import (
|
from llama_index.core import (
|
||||||
SimpleDirectoryReader,
|
|
||||||
VectorStoreIndex,
|
|
||||||
StorageContext,
|
|
||||||
Settings,
|
Settings,
|
||||||
node_parser,
|
SimpleDirectoryReader,
|
||||||
Document,
|
StorageContext,
|
||||||
|
VectorStoreIndex,
|
||||||
)
|
)
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
from llama_index.core.node_parser import SentenceSplitter
|
||||||
from llama_index.vector_stores.faiss import FaissVectorStore
|
|
||||||
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
||||||
|
from llama_index.vector_stores.faiss import FaissVectorStore
|
||||||
|
|
||||||
tracker = MemoryTracker("Faiss HNSW")
|
tracker = MemoryTracker("Faiss HNSW")
|
||||||
tracker.checkpoint("Initial")
|
tracker.checkpoint("Initial")
|
||||||
@@ -65,7 +65,7 @@ def main():
|
|||||||
tracker.checkpoint("After Faiss index creation")
|
tracker.checkpoint("After Faiss index creation")
|
||||||
|
|
||||||
documents = SimpleDirectoryReader(
|
documents = SimpleDirectoryReader(
|
||||||
"examples/data",
|
"data",
|
||||||
recursive=True,
|
recursive=True,
|
||||||
encoding="utf-8",
|
encoding="utf-8",
|
||||||
required_exts=[".pdf", ".txt", ".md"],
|
required_exts=[".pdf", ".txt", ".md"],
|
||||||
@@ -90,8 +90,9 @@ def main():
|
|||||||
vector_store=vector_store, persist_dir="./storage_faiss"
|
vector_store=vector_store, persist_dir="./storage_faiss"
|
||||||
)
|
)
|
||||||
from llama_index.core import load_index_from_storage
|
from llama_index.core import load_index_from_storage
|
||||||
|
|
||||||
index = load_index_from_storage(storage_context=storage_context)
|
index = load_index_from_storage(storage_context=storage_context)
|
||||||
print(f"Index loaded from ./storage_faiss")
|
print("Index loaded from ./storage_faiss")
|
||||||
tracker.checkpoint("After loading existing index")
|
tracker.checkpoint("After loading existing index")
|
||||||
index_loaded = True
|
index_loaded = True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -99,6 +100,7 @@ def main():
|
|||||||
print("Cleaning up corrupted index and building new one...")
|
print("Cleaning up corrupted index and building new one...")
|
||||||
# Clean up corrupted index
|
# Clean up corrupted index
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
if os.path.exists("./storage_faiss"):
|
if os.path.exists("./storage_faiss"):
|
||||||
shutil.rmtree("./storage_faiss")
|
shutil.rmtree("./storage_faiss")
|
||||||
|
|
||||||
@@ -109,9 +111,7 @@ def main():
|
|||||||
vector_store = FaissVectorStore(faiss_index=faiss_index)
|
vector_store = FaissVectorStore(faiss_index=faiss_index)
|
||||||
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
||||||
index = VectorStoreIndex.from_documents(
|
index = VectorStoreIndex.from_documents(
|
||||||
documents,
|
documents, storage_context=storage_context, transformations=[node_parser]
|
||||||
storage_context=storage_context,
|
|
||||||
transformations=[node_parser]
|
|
||||||
)
|
)
|
||||||
tracker.checkpoint("After index building")
|
tracker.checkpoint("After index building")
|
||||||
|
|
||||||
@@ -127,7 +127,7 @@ def main():
|
|||||||
|
|
||||||
query_engine = index.as_query_engine(similarity_top_k=20)
|
query_engine = index.as_query_engine(similarity_top_k=20)
|
||||||
queries = [
|
queries = [
|
||||||
"什么是盘古大模型以及盘古开发过程中遇到了什么阴暗面,任务令一般在什么城市颁发",
|
"什么是盘古大模型以及盘古开发过程中遇到了什么阴暗面,任务令一般在什么城市颁发",
|
||||||
"What is LEANN and how does it work?",
|
"What is LEANN and how does it work?",
|
||||||
"华为诺亚方舟实验室的主要研究内容",
|
"华为诺亚方舟实验室的主要研究内容",
|
||||||
]
|
]
|
||||||
@@ -2,20 +2,20 @@
|
|||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import time
|
import time
|
||||||
|
from contextlib import contextmanager
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Dict, List, Optional, Tuple
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from transformers import AutoModel, BitsAndBytesConfig
|
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from contextlib import contextmanager
|
from transformers import AutoModel, BitsAndBytesConfig
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class BenchmarkConfig:
|
class BenchmarkConfig:
|
||||||
model_path: str
|
model_path: str
|
||||||
batch_sizes: List[int]
|
batch_sizes: list[int]
|
||||||
seq_length: int
|
seq_length: int
|
||||||
num_runs: int
|
num_runs: int
|
||||||
use_fp16: bool = True
|
use_fp16: bool = True
|
||||||
@@ -32,13 +32,11 @@ class GraphContainer:
|
|||||||
def __init__(self, model: nn.Module, seq_length: int):
|
def __init__(self, model: nn.Module, seq_length: int):
|
||||||
self.model = model
|
self.model = model
|
||||||
self.seq_length = seq_length
|
self.seq_length = seq_length
|
||||||
self.graphs: Dict[int, 'GraphWrapper'] = {}
|
self.graphs: dict[int, GraphWrapper] = {}
|
||||||
|
|
||||||
def get_or_create(self, batch_size: int) -> 'GraphWrapper':
|
def get_or_create(self, batch_size: int) -> "GraphWrapper":
|
||||||
if batch_size not in self.graphs:
|
if batch_size not in self.graphs:
|
||||||
self.graphs[batch_size] = GraphWrapper(
|
self.graphs[batch_size] = GraphWrapper(self.model, batch_size, self.seq_length)
|
||||||
self.model, batch_size, self.seq_length
|
|
||||||
)
|
|
||||||
return self.graphs[batch_size]
|
return self.graphs[batch_size]
|
||||||
|
|
||||||
|
|
||||||
@@ -55,13 +53,13 @@ class GraphWrapper:
|
|||||||
self._warmup()
|
self._warmup()
|
||||||
|
|
||||||
# Only use CUDA graphs on NVIDIA GPUs
|
# Only use CUDA graphs on NVIDIA GPUs
|
||||||
if torch.cuda.is_available() and hasattr(torch.cuda, 'CUDAGraph'):
|
if torch.cuda.is_available() and hasattr(torch.cuda, "CUDAGraph"):
|
||||||
# Capture graph
|
# Capture graph
|
||||||
self.graph = torch.cuda.CUDAGraph()
|
self.graph = torch.cuda.CUDAGraph()
|
||||||
with torch.cuda.graph(self.graph):
|
with torch.cuda.graph(self.graph):
|
||||||
self.static_output = self.model(
|
self.static_output = self.model(
|
||||||
input_ids=self.static_input,
|
input_ids=self.static_input,
|
||||||
attention_mask=self.static_attention_mask
|
attention_mask=self.static_attention_mask,
|
||||||
)
|
)
|
||||||
self.use_cuda_graph = True
|
self.use_cuda_graph = True
|
||||||
else:
|
else:
|
||||||
@@ -79,9 +77,7 @@ class GraphWrapper:
|
|||||||
|
|
||||||
def _create_random_batch(self, batch_size: int, seq_length: int) -> torch.Tensor:
|
def _create_random_batch(self, batch_size: int, seq_length: int) -> torch.Tensor:
|
||||||
return torch.randint(
|
return torch.randint(
|
||||||
0, 1000, (batch_size, seq_length),
|
0, 1000, (batch_size, seq_length), device=self.device, dtype=torch.long
|
||||||
device=self.device,
|
|
||||||
dtype=torch.long
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def _warmup(self, num_warmup: int = 3):
|
def _warmup(self, num_warmup: int = 3):
|
||||||
@@ -89,7 +85,7 @@ class GraphWrapper:
|
|||||||
for _ in range(num_warmup):
|
for _ in range(num_warmup):
|
||||||
self.model(
|
self.model(
|
||||||
input_ids=self.static_input,
|
input_ids=self.static_input,
|
||||||
attention_mask=self.static_attention_mask
|
attention_mask=self.static_attention_mask,
|
||||||
)
|
)
|
||||||
|
|
||||||
def __call__(self, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
|
def __call__(self, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
|
||||||
@@ -133,8 +129,12 @@ class ModelOptimizer:
|
|||||||
print("- Using FP16 precision")
|
print("- Using FP16 precision")
|
||||||
|
|
||||||
# Check if using SDPA (only on CUDA)
|
# Check if using SDPA (only on CUDA)
|
||||||
if torch.cuda.is_available() and torch.version.cuda and float(torch.version.cuda[:3]) >= 11.6:
|
if (
|
||||||
if hasattr(torch.nn.functional, 'scaled_dot_product_attention'):
|
torch.cuda.is_available()
|
||||||
|
and torch.version.cuda
|
||||||
|
and float(torch.version.cuda[:3]) >= 11.6
|
||||||
|
):
|
||||||
|
if hasattr(torch.nn.functional, "scaled_dot_product_attention"):
|
||||||
print("- Using PyTorch SDPA (scaled_dot_product_attention)")
|
print("- Using PyTorch SDPA (scaled_dot_product_attention)")
|
||||||
else:
|
else:
|
||||||
print("- PyTorch SDPA not available")
|
print("- PyTorch SDPA not available")
|
||||||
@@ -142,7 +142,8 @@ class ModelOptimizer:
|
|||||||
# Flash Attention (only on CUDA)
|
# Flash Attention (only on CUDA)
|
||||||
if config.use_flash_attention and torch.cuda.is_available():
|
if config.use_flash_attention and torch.cuda.is_available():
|
||||||
try:
|
try:
|
||||||
from flash_attn.flash_attention import FlashAttention
|
from flash_attn.flash_attention import FlashAttention # noqa: F401
|
||||||
|
|
||||||
print("- Flash Attention 2 available")
|
print("- Flash Attention 2 available")
|
||||||
if hasattr(model.config, "attention_mode"):
|
if hasattr(model.config, "attention_mode"):
|
||||||
model.config.attention_mode = "flash_attention_2"
|
model.config.attention_mode = "flash_attention_2"
|
||||||
@@ -153,8 +154,9 @@ class ModelOptimizer:
|
|||||||
# Memory efficient attention (only on CUDA)
|
# Memory efficient attention (only on CUDA)
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
try:
|
try:
|
||||||
from xformers.ops import memory_efficient_attention
|
from xformers.ops import memory_efficient_attention # noqa: F401
|
||||||
if hasattr(model, 'enable_xformers_memory_efficient_attention'):
|
|
||||||
|
if hasattr(model, "enable_xformers_memory_efficient_attention"):
|
||||||
model.enable_xformers_memory_efficient_attention()
|
model.enable_xformers_memory_efficient_attention()
|
||||||
print("- Enabled xformers memory efficient attention")
|
print("- Enabled xformers memory efficient attention")
|
||||||
else:
|
else:
|
||||||
@@ -220,7 +222,7 @@ class Benchmark:
|
|||||||
self.graphs = None
|
self.graphs = None
|
||||||
self.timer = Timer()
|
self.timer = Timer()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR in benchmark initialization: {str(e)}")
|
print(f"ERROR in benchmark initialization: {e!s}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def _load_model(self) -> nn.Module:
|
def _load_model(self) -> nn.Module:
|
||||||
@@ -230,15 +232,17 @@ class Benchmark:
|
|||||||
# Int4 quantization using HuggingFace integration
|
# Int4 quantization using HuggingFace integration
|
||||||
if self.config.use_int4:
|
if self.config.use_int4:
|
||||||
import bitsandbytes as bnb
|
import bitsandbytes as bnb
|
||||||
|
|
||||||
print(f"- bitsandbytes version: {bnb.__version__}")
|
print(f"- bitsandbytes version: {bnb.__version__}")
|
||||||
|
|
||||||
# 检查是否使用自定义的8bit量化
|
# Check if using custom 8bit quantization
|
||||||
if hasattr(self.config, 'use_linear8bitlt') and self.config.use_linear8bitlt:
|
if hasattr(self.config, "use_linear8bitlt") and self.config.use_linear8bitlt:
|
||||||
print("- Using custom Linear8bitLt replacement for all linear layers")
|
print("- Using custom Linear8bitLt replacement for all linear layers")
|
||||||
|
|
||||||
# 加载原始模型(不使用量化配置)
|
# Load original model (without quantization config)
|
||||||
import bitsandbytes as bnb
|
import bitsandbytes as bnb
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
# set default to half
|
# set default to half
|
||||||
torch.set_default_dtype(torch.float16)
|
torch.set_default_dtype(torch.float16)
|
||||||
compute_dtype = torch.float16 if self.config.use_fp16 else torch.float32
|
compute_dtype = torch.float16 if self.config.use_fp16 else torch.float32
|
||||||
@@ -247,52 +251,58 @@ class Benchmark:
|
|||||||
torch_dtype=compute_dtype,
|
torch_dtype=compute_dtype,
|
||||||
)
|
)
|
||||||
|
|
||||||
# 定义替换函数
|
# Define replacement function
|
||||||
def replace_linear_with_linear8bitlt(model):
|
def replace_linear_with_linear8bitlt(model):
|
||||||
"""递归地将模型中的所有nn.Linear层替换为Linear8bitLt"""
|
"""Recursively replace all nn.Linear layers with Linear8bitLt"""
|
||||||
for name, module in list(model.named_children()):
|
for name, module in list(model.named_children()):
|
||||||
if isinstance(module, nn.Linear):
|
if isinstance(module, nn.Linear):
|
||||||
# 获取原始线性层的参数
|
# Get original linear layer parameters
|
||||||
in_features = module.in_features
|
in_features = module.in_features
|
||||||
out_features = module.out_features
|
out_features = module.out_features
|
||||||
bias = module.bias is not None
|
bias = module.bias is not None
|
||||||
|
|
||||||
# 创建8bit线性层
|
# Create 8bit linear layer
|
||||||
# print size
|
# print size
|
||||||
print(f"in_features: {in_features}, out_features: {out_features}")
|
print(f"in_features: {in_features}, out_features: {out_features}")
|
||||||
new_module = bnb.nn.Linear8bitLt(
|
new_module = bnb.nn.Linear8bitLt(
|
||||||
in_features,
|
in_features,
|
||||||
out_features,
|
out_features,
|
||||||
bias=bias,
|
bias=bias,
|
||||||
has_fp16_weights=False
|
has_fp16_weights=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
# 复制权重和偏置
|
# Copy weights and bias
|
||||||
new_module.weight.data = module.weight.data
|
new_module.weight.data = module.weight.data
|
||||||
if bias:
|
if bias:
|
||||||
new_module.bias.data = module.bias.data
|
new_module.bias.data = module.bias.data
|
||||||
|
|
||||||
# 替换模块
|
# Replace module
|
||||||
setattr(model, name, new_module)
|
setattr(model, name, new_module)
|
||||||
else:
|
else:
|
||||||
# 递归处理子模块
|
# Process child modules recursively
|
||||||
replace_linear_with_linear8bitlt(module)
|
replace_linear_with_linear8bitlt(module)
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
# 替换所有线性层
|
# Replace all linear layers
|
||||||
model = replace_linear_with_linear8bitlt(model)
|
model = replace_linear_with_linear8bitlt(model)
|
||||||
# add torch compile
|
# add torch compile
|
||||||
model = torch.compile(model)
|
model = torch.compile(model)
|
||||||
|
|
||||||
# 将模型移到GPU(量化发生在这里)
|
# Move model to GPU (quantization happens here)
|
||||||
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
device = (
|
||||||
|
"cuda"
|
||||||
|
if torch.cuda.is_available()
|
||||||
|
else "mps"
|
||||||
|
if torch.backends.mps.is_available()
|
||||||
|
else "cpu"
|
||||||
|
)
|
||||||
model = model.to(device)
|
model = model.to(device)
|
||||||
|
|
||||||
print("- All linear layers replaced with Linear8bitLt")
|
print("- All linear layers replaced with Linear8bitLt")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# 使用原来的Int4量化方法
|
# Use original Int4 quantization method
|
||||||
print("- Using bitsandbytes for Int4 quantization")
|
print("- Using bitsandbytes for Int4 quantization")
|
||||||
|
|
||||||
# Create quantization config
|
# Create quantization config
|
||||||
@@ -302,7 +312,7 @@ class Benchmark:
|
|||||||
load_in_4bit=True,
|
load_in_4bit=True,
|
||||||
bnb_4bit_compute_dtype=compute_dtype,
|
bnb_4bit_compute_dtype=compute_dtype,
|
||||||
bnb_4bit_use_double_quant=True,
|
bnb_4bit_use_double_quant=True,
|
||||||
bnb_4bit_quant_type="nf4"
|
bnb_4bit_quant_type="nf4",
|
||||||
)
|
)
|
||||||
|
|
||||||
print("- Quantization config:", quantization_config)
|
print("- Quantization config:", quantization_config)
|
||||||
@@ -312,7 +322,7 @@ class Benchmark:
|
|||||||
self.config.model_path,
|
self.config.model_path,
|
||||||
quantization_config=quantization_config,
|
quantization_config=quantization_config,
|
||||||
torch_dtype=compute_dtype,
|
torch_dtype=compute_dtype,
|
||||||
device_map="auto" # Let HF decide on device mapping
|
device_map="auto", # Let HF decide on device mapping
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check if model loaded successfully
|
# Check if model loaded successfully
|
||||||
@@ -324,7 +334,7 @@ class Benchmark:
|
|||||||
# Apply optimizations directly here
|
# Apply optimizations directly here
|
||||||
print("\nApplying model optimizations:")
|
print("\nApplying model optimizations:")
|
||||||
|
|
||||||
if hasattr(self.config, 'use_linear8bitlt') and self.config.use_linear8bitlt:
|
if hasattr(self.config, "use_linear8bitlt") and self.config.use_linear8bitlt:
|
||||||
print("- Model moved to GPU with Linear8bitLt quantization")
|
print("- Model moved to GPU with Linear8bitLt quantization")
|
||||||
else:
|
else:
|
||||||
# Skip moving to GPU since device_map="auto" already did that
|
# Skip moving to GPU since device_map="auto" already did that
|
||||||
@@ -334,8 +344,12 @@ class Benchmark:
|
|||||||
print(f"- Using {compute_dtype} for compute dtype")
|
print(f"- Using {compute_dtype} for compute dtype")
|
||||||
|
|
||||||
# Check CUDA and SDPA
|
# Check CUDA and SDPA
|
||||||
if torch.cuda.is_available() and torch.version.cuda and float(torch.version.cuda[:3]) >= 11.6:
|
if (
|
||||||
if hasattr(torch.nn.functional, 'scaled_dot_product_attention'):
|
torch.cuda.is_available()
|
||||||
|
and torch.version.cuda
|
||||||
|
and float(torch.version.cuda[:3]) >= 11.6
|
||||||
|
):
|
||||||
|
if hasattr(torch.nn.functional, "scaled_dot_product_attention"):
|
||||||
print("- Using PyTorch SDPA (scaled_dot_product_attention)")
|
print("- Using PyTorch SDPA (scaled_dot_product_attention)")
|
||||||
else:
|
else:
|
||||||
print("- PyTorch SDPA not available")
|
print("- PyTorch SDPA not available")
|
||||||
@@ -343,8 +357,7 @@ class Benchmark:
|
|||||||
# Try xformers if available (only on CUDA)
|
# Try xformers if available (only on CUDA)
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
try:
|
try:
|
||||||
from xformers.ops import memory_efficient_attention
|
if hasattr(model, "enable_xformers_memory_efficient_attention"):
|
||||||
if hasattr(model, 'enable_xformers_memory_efficient_attention'):
|
|
||||||
model.enable_xformers_memory_efficient_attention()
|
model.enable_xformers_memory_efficient_attention()
|
||||||
print("- Enabled xformers memory efficient attention")
|
print("- Enabled xformers memory efficient attention")
|
||||||
else:
|
else:
|
||||||
@@ -370,7 +383,7 @@ class Benchmark:
|
|||||||
self.config.model_path,
|
self.config.model_path,
|
||||||
quantization_config=quantization_config,
|
quantization_config=quantization_config,
|
||||||
torch_dtype=compute_dtype,
|
torch_dtype=compute_dtype,
|
||||||
device_map="auto"
|
device_map="auto",
|
||||||
)
|
)
|
||||||
|
|
||||||
if model is None:
|
if model is None:
|
||||||
@@ -389,6 +402,7 @@ class Benchmark:
|
|||||||
# Apply standard optimizations
|
# Apply standard optimizations
|
||||||
# set default to half
|
# set default to half
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
torch.set_default_dtype(torch.bfloat16)
|
torch.set_default_dtype(torch.bfloat16)
|
||||||
model = ModelOptimizer.optimize(model, self.config)
|
model = ModelOptimizer.optimize(model, self.config)
|
||||||
model = model.half()
|
model = model.half()
|
||||||
@@ -403,25 +417,31 @@ class Benchmark:
|
|||||||
return model
|
return model
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR loading model: {str(e)}")
|
print(f"ERROR loading model: {e!s}")
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def _create_random_batch(self, batch_size: int) -> torch.Tensor:
|
def _create_random_batch(self, batch_size: int) -> torch.Tensor:
|
||||||
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
device = (
|
||||||
|
"cuda"
|
||||||
|
if torch.cuda.is_available()
|
||||||
|
else "mps"
|
||||||
|
if torch.backends.mps.is_available()
|
||||||
|
else "cpu"
|
||||||
|
)
|
||||||
return torch.randint(
|
return torch.randint(
|
||||||
0, 1000,
|
0,
|
||||||
|
1000,
|
||||||
(batch_size, self.config.seq_length),
|
(batch_size, self.config.seq_length),
|
||||||
device=device,
|
device=device,
|
||||||
dtype=torch.long
|
dtype=torch.long,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _run_inference(
|
def _run_inference(
|
||||||
self,
|
self, input_ids: torch.Tensor, graph_wrapper: GraphWrapper | None = None
|
||||||
input_ids: torch.Tensor,
|
) -> tuple[float, torch.Tensor]:
|
||||||
graph_wrapper: Optional[GraphWrapper] = None
|
|
||||||
) -> Tuple[float, torch.Tensor]:
|
|
||||||
attention_mask = torch.ones_like(input_ids)
|
attention_mask = torch.ones_like(input_ids)
|
||||||
|
|
||||||
with torch.no_grad(), self.timer.timing():
|
with torch.no_grad(), self.timer.timing():
|
||||||
@@ -432,7 +452,7 @@ class Benchmark:
|
|||||||
|
|
||||||
return self.timer.elapsed_time(), output
|
return self.timer.elapsed_time(), output
|
||||||
|
|
||||||
def run(self) -> Dict[int, Dict[str, float]]:
|
def run(self) -> dict[int, dict[str, float]]:
|
||||||
results = {}
|
results = {}
|
||||||
|
|
||||||
# Reset peak memory stats
|
# Reset peak memory stats
|
||||||
@@ -450,9 +470,7 @@ class Benchmark:
|
|||||||
|
|
||||||
# Get or create graph for this batch size
|
# Get or create graph for this batch size
|
||||||
graph_wrapper = (
|
graph_wrapper = (
|
||||||
self.graphs.get_or_create(batch_size)
|
self.graphs.get_or_create(batch_size) if self.graphs is not None else None
|
||||||
if self.graphs is not None
|
|
||||||
else None
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Pre-allocate input tensor
|
# Pre-allocate input tensor
|
||||||
@@ -490,7 +508,7 @@ class Benchmark:
|
|||||||
|
|
||||||
# Log memory usage
|
# Log memory usage
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
peak_memory_gb = torch.cuda.max_memory_allocated() / (1024 ** 3)
|
peak_memory_gb = torch.cuda.max_memory_allocated() / (1024**3)
|
||||||
elif torch.backends.mps.is_available():
|
elif torch.backends.mps.is_available():
|
||||||
# MPS doesn't have max_memory_allocated, use 0
|
# MPS doesn't have max_memory_allocated, use 0
|
||||||
peak_memory_gb = 0.0
|
peak_memory_gb = 0.0
|
||||||
@@ -604,7 +622,15 @@ def main():
|
|||||||
os.makedirs("results", exist_ok=True)
|
os.makedirs("results", exist_ok=True)
|
||||||
|
|
||||||
# Generate filename based on configuration
|
# Generate filename based on configuration
|
||||||
precision_type = "int4" if config.use_int4 else "int8" if config.use_int8 else "fp16" if config.use_fp16 else "fp32"
|
precision_type = (
|
||||||
|
"int4"
|
||||||
|
if config.use_int4
|
||||||
|
else "int8"
|
||||||
|
if config.use_int8
|
||||||
|
else "fp16"
|
||||||
|
if config.use_fp16
|
||||||
|
else "fp32"
|
||||||
|
)
|
||||||
model_name = os.path.basename(config.model_path)
|
model_name = os.path.basename(config.model_path)
|
||||||
output_file = f"results/benchmark_{model_name}_{precision_type}.json"
|
output_file = f"results/benchmark_{model_name}_{precision_type}.json"
|
||||||
|
|
||||||
@@ -612,17 +638,20 @@ def main():
|
|||||||
with open(output_file, "w") as f:
|
with open(output_file, "w") as f:
|
||||||
json.dump(
|
json.dump(
|
||||||
{
|
{
|
||||||
"config": {k: str(v) if isinstance(v, list) else v for k, v in vars(config).items()},
|
"config": {
|
||||||
"results": {str(k): v for k, v in results.items()}
|
k: str(v) if isinstance(v, list) else v for k, v in vars(config).items()
|
||||||
|
},
|
||||||
|
"results": {str(k): v for k, v in results.items()},
|
||||||
},
|
},
|
||||||
f,
|
f,
|
||||||
indent=2
|
indent=2,
|
||||||
)
|
)
|
||||||
print(f"Results saved to {output_file}")
|
print(f"Results saved to {output_file}")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Benchmark failed: {e}")
|
print(f"Benchmark failed: {e}")
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
|
|
||||||
@@ -5,24 +5,21 @@ It correctly compares results by fetching the text content for both the new sear
|
|||||||
results and the golden standard results, making the comparison robust to ID changes.
|
results and the golden standard results, making the comparison robust to ID changes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
import time
|
import time
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import sys
|
|
||||||
import numpy as np
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from leann.api import LeannSearcher, LeannBuilder
|
import numpy as np
|
||||||
|
from leann.api import LeannBuilder, LeannChat, LeannSearcher
|
||||||
|
|
||||||
|
|
||||||
def download_data_if_needed(data_root: Path, download_embeddings: bool = False):
|
def download_data_if_needed(data_root: Path, download_embeddings: bool = False):
|
||||||
"""Checks if the data directory exists, and if not, downloads it from HF Hub."""
|
"""Checks if the data directory exists, and if not, downloads it from HF Hub."""
|
||||||
if not data_root.exists():
|
if not data_root.exists():
|
||||||
print(f"Data directory '{data_root}' not found.")
|
print(f"Data directory '{data_root}' not found.")
|
||||||
print(
|
print("Downloading evaluation data from Hugging Face Hub... (this may take a moment)")
|
||||||
"Downloading evaluation data from Hugging Face Hub... (this may take a moment)"
|
|
||||||
)
|
|
||||||
try:
|
try:
|
||||||
from huggingface_hub import snapshot_download
|
from huggingface_hub import snapshot_download
|
||||||
|
|
||||||
@@ -63,7 +60,7 @@ def download_data_if_needed(data_root: Path, download_embeddings: bool = False):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def download_embeddings_if_needed(data_root: Path, dataset_type: str = None):
|
def download_embeddings_if_needed(data_root: Path, dataset_type: str | None = None):
|
||||||
"""Download embeddings files specifically."""
|
"""Download embeddings files specifically."""
|
||||||
embeddings_dir = data_root / "embeddings"
|
embeddings_dir = data_root / "embeddings"
|
||||||
|
|
||||||
@@ -101,7 +98,7 @@ def download_embeddings_if_needed(data_root: Path, dataset_type: str = None):
|
|||||||
|
|
||||||
|
|
||||||
# --- Helper Function to get Golden Passages ---
|
# --- Helper Function to get Golden Passages ---
|
||||||
def get_golden_texts(searcher: LeannSearcher, golden_ids: List[int]) -> set:
|
def get_golden_texts(searcher: LeannSearcher, golden_ids: list[int]) -> set:
|
||||||
"""
|
"""
|
||||||
Retrieves the text for golden passage IDs directly from the LeannSearcher's
|
Retrieves the text for golden passage IDs directly from the LeannSearcher's
|
||||||
passage manager.
|
passage manager.
|
||||||
@@ -113,24 +110,20 @@ def get_golden_texts(searcher: LeannSearcher, golden_ids: List[int]) -> set:
|
|||||||
passage_data = searcher.passage_manager.get_passage(str(gid))
|
passage_data = searcher.passage_manager.get_passage(str(gid))
|
||||||
golden_texts.add(passage_data["text"])
|
golden_texts.add(passage_data["text"])
|
||||||
except KeyError:
|
except KeyError:
|
||||||
print(
|
print(f"Warning: Golden passage ID '{gid}' not found in the index's passage data.")
|
||||||
f"Warning: Golden passage ID '{gid}' not found in the index's passage data."
|
|
||||||
)
|
|
||||||
return golden_texts
|
return golden_texts
|
||||||
|
|
||||||
|
|
||||||
def load_queries(file_path: Path) -> List[str]:
|
def load_queries(file_path: Path) -> list[str]:
|
||||||
queries = []
|
queries = []
|
||||||
with open(file_path, "r", encoding="utf-8") as f:
|
with open(file_path, encoding="utf-8") as f:
|
||||||
for line in f:
|
for line in f:
|
||||||
data = json.loads(line)
|
data = json.loads(line)
|
||||||
queries.append(data["query"])
|
queries.append(data["query"])
|
||||||
return queries
|
return queries
|
||||||
|
|
||||||
|
|
||||||
def build_index_from_embeddings(
|
def build_index_from_embeddings(embeddings_file: str, output_path: str, backend: str = "hnsw"):
|
||||||
embeddings_file: str, output_path: str, backend: str = "hnsw"
|
|
||||||
):
|
|
||||||
"""
|
"""
|
||||||
Build a LEANN index from pre-computed embeddings.
|
Build a LEANN index from pre-computed embeddings.
|
||||||
|
|
||||||
@@ -173,9 +166,7 @@ def build_index_from_embeddings(
|
|||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(description="Run recall evaluation on a LEANN index.")
|
||||||
description="Run recall evaluation on a LEANN index."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"index_path",
|
"index_path",
|
||||||
type=str,
|
type=str,
|
||||||
@@ -202,26 +193,41 @@ def main():
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--num-queries", type=int, default=10, help="Number of queries to evaluate."
|
"--num-queries", type=int, default=10, help="Number of queries to evaluate."
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument("--top-k", type=int, default=3, help="The 'k' value for recall@k.")
|
||||||
"--top-k", type=int, default=3, help="The 'k' value for recall@k."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--ef-search", type=int, default=120, help="The 'efSearch' parameter for HNSW."
|
"--ef-search", type=int, default=120, help="The 'efSearch' parameter for HNSW."
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--batch-size",
|
||||||
|
type=int,
|
||||||
|
default=0,
|
||||||
|
help="Batch size for HNSW batched search (0 disables batching)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--llm-type",
|
||||||
|
type=str,
|
||||||
|
choices=["ollama", "hf", "openai", "gemini", "simulated"],
|
||||||
|
default="ollama",
|
||||||
|
help="LLM backend type to optionally query during evaluation (default: ollama)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--llm-model",
|
||||||
|
type=str,
|
||||||
|
default="qwen3:1.7b",
|
||||||
|
help="LLM model identifier for the chosen backend (default: qwen3:1.7b)",
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
# --- Path Configuration ---
|
# --- Path Configuration ---
|
||||||
# Assumes a project structure where the script is in 'examples/'
|
# Assumes a project structure where the script is in 'benchmarks/'
|
||||||
# and data is in 'data/' at the project root.
|
# and evaluation data is in 'benchmarks/data/'.
|
||||||
project_root = Path(__file__).resolve().parent.parent
|
script_dir = Path(__file__).resolve().parent
|
||||||
data_root = project_root / "data"
|
data_root = script_dir / "data"
|
||||||
|
|
||||||
# Download data based on mode
|
# Download data based on mode
|
||||||
if args.mode == "build":
|
if args.mode == "build":
|
||||||
# For building mode, we need embeddings
|
# For building mode, we need embeddings
|
||||||
download_data_if_needed(
|
download_data_if_needed(data_root, download_embeddings=False) # Basic data first
|
||||||
data_root, download_embeddings=False
|
|
||||||
) # Basic data first
|
|
||||||
|
|
||||||
# Auto-detect dataset type and download embeddings
|
# Auto-detect dataset type and download embeddings
|
||||||
if args.embeddings_file:
|
if args.embeddings_file:
|
||||||
@@ -262,9 +268,7 @@ def main():
|
|||||||
print(f"Index built successfully: {built_index_path}")
|
print(f"Index built successfully: {built_index_path}")
|
||||||
|
|
||||||
# Ask if user wants to run evaluation
|
# Ask if user wants to run evaluation
|
||||||
eval_response = (
|
eval_response = input("Run evaluation on the built index? (y/n): ").strip().lower()
|
||||||
input("Run evaluation on the built index? (y/n): ").strip().lower()
|
|
||||||
)
|
|
||||||
if eval_response != "y":
|
if eval_response != "y":
|
||||||
print("Index building complete. Exiting.")
|
print("Index building complete. Exiting.")
|
||||||
return
|
return
|
||||||
@@ -293,11 +297,9 @@ def main():
|
|||||||
break
|
break
|
||||||
|
|
||||||
if not args.index_path:
|
if not args.index_path:
|
||||||
|
print("No indices found. The data download should have included pre-built indices.")
|
||||||
print(
|
print(
|
||||||
"No indices found. The data download should have included pre-built indices."
|
"Please check the benchmarks/data/indices/ directory or provide --index-path manually."
|
||||||
)
|
|
||||||
print(
|
|
||||||
"Please check the data/indices/ directory or provide --index-path manually."
|
|
||||||
)
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
@@ -310,14 +312,10 @@ def main():
|
|||||||
else:
|
else:
|
||||||
# Fallback: try to infer from the index directory name
|
# Fallback: try to infer from the index directory name
|
||||||
dataset_type = Path(args.index_path).name
|
dataset_type = Path(args.index_path).name
|
||||||
print(
|
print(f"WARNING: Could not detect dataset type from path, inferred '{dataset_type}'.")
|
||||||
f"WARNING: Could not detect dataset type from path, inferred '{dataset_type}'."
|
|
||||||
)
|
|
||||||
|
|
||||||
queries_file = data_root / "queries" / "nq_open.jsonl"
|
queries_file = data_root / "queries" / "nq_open.jsonl"
|
||||||
golden_results_file = (
|
golden_results_file = data_root / "ground_truth" / dataset_type / "flat_results_nq_k3.json"
|
||||||
data_root / "ground_truth" / dataset_type / "flat_results_nq_k3.json"
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"INFO: Detected dataset type: {dataset_type}")
|
print(f"INFO: Detected dataset type: {dataset_type}")
|
||||||
print(f"INFO: Using queries file: {queries_file}")
|
print(f"INFO: Using queries file: {queries_file}")
|
||||||
@@ -327,7 +325,7 @@ def main():
|
|||||||
searcher = LeannSearcher(args.index_path)
|
searcher = LeannSearcher(args.index_path)
|
||||||
queries = load_queries(queries_file)
|
queries = load_queries(queries_file)
|
||||||
|
|
||||||
with open(golden_results_file, "r") as f:
|
with open(golden_results_file) as f:
|
||||||
golden_results_data = json.load(f)
|
golden_results_data = json.load(f)
|
||||||
|
|
||||||
num_eval_queries = min(args.num_queries, len(queries))
|
num_eval_queries = min(args.num_queries, len(queries))
|
||||||
@@ -340,10 +338,23 @@ def main():
|
|||||||
for i in range(num_eval_queries):
|
for i in range(num_eval_queries):
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
new_results = searcher.search(
|
new_results = searcher.search(
|
||||||
queries[i], top_k=args.top_k, ef=args.ef_search
|
queries[i],
|
||||||
|
top_k=args.top_k,
|
||||||
|
complexity=args.ef_search,
|
||||||
|
batch_size=args.batch_size,
|
||||||
)
|
)
|
||||||
search_times.append(time.time() - start_time)
|
search_times.append(time.time() - start_time)
|
||||||
|
|
||||||
|
# Optional: also call the LLM with configurable backend/model (does not affect recall)
|
||||||
|
llm_config = {"type": args.llm_type, "model": args.llm_model}
|
||||||
|
chat = LeannChat(args.index_path, llm_config=llm_config, searcher=searcher)
|
||||||
|
answer = chat.ask(
|
||||||
|
queries[i],
|
||||||
|
top_k=args.top_k,
|
||||||
|
complexity=args.ef_search,
|
||||||
|
batch_size=args.batch_size,
|
||||||
|
)
|
||||||
|
print(f"Answer: {answer}")
|
||||||
# Correct Recall Calculation: Based on TEXT content
|
# Correct Recall Calculation: Based on TEXT content
|
||||||
new_texts = {result.text for result in new_results}
|
new_texts = {result.text for result in new_results}
|
||||||
|
|
||||||
@@ -1,26 +1,27 @@
|
|||||||
import time
|
import time
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Dict, List
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from transformers import AutoModel, BitsAndBytesConfig
|
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
from transformers import AutoModel
|
||||||
|
|
||||||
# Add MLX imports
|
# Add MLX imports
|
||||||
try:
|
try:
|
||||||
import mlx.core as mx
|
import mlx.core as mx
|
||||||
from mlx_lm.utils import load
|
from mlx_lm.utils import load
|
||||||
|
|
||||||
MLX_AVAILABLE = True
|
MLX_AVAILABLE = True
|
||||||
except ImportError as e:
|
except ImportError:
|
||||||
print("MLX not available. Install with: uv pip install mlx mlx-lm")
|
print("MLX not available. Install with: uv pip install mlx mlx-lm")
|
||||||
MLX_AVAILABLE = False
|
MLX_AVAILABLE = False
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class BenchmarkConfig:
|
class BenchmarkConfig:
|
||||||
model_path: str = "facebook/contriever"
|
model_path: str = "facebook/contriever-msmarco"
|
||||||
batch_sizes: List[int] = None
|
batch_sizes: list[int] = None
|
||||||
seq_length: int = 256
|
seq_length: int = 256
|
||||||
num_runs: int = 5
|
num_runs: int = 5
|
||||||
use_fp16: bool = True
|
use_fp16: bool = True
|
||||||
@@ -33,7 +34,8 @@ class BenchmarkConfig:
|
|||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
if self.batch_sizes is None:
|
if self.batch_sizes is None:
|
||||||
self.batch_sizes = [1, 2, 4, 8, 16, 32, 64]
|
self.batch_sizes = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
|
||||||
|
|
||||||
|
|
||||||
class MLXBenchmark:
|
class MLXBenchmark:
|
||||||
"""MLX-specific benchmark for embedding models"""
|
"""MLX-specific benchmark for embedding models"""
|
||||||
@@ -55,11 +57,7 @@ class MLXBenchmark:
|
|||||||
|
|
||||||
def _create_random_batch(self, batch_size: int):
|
def _create_random_batch(self, batch_size: int):
|
||||||
"""Create random input batches for MLX testing - same as PyTorch"""
|
"""Create random input batches for MLX testing - same as PyTorch"""
|
||||||
return torch.randint(
|
return torch.randint(0, 1000, (batch_size, self.config.seq_length), dtype=torch.long)
|
||||||
0, 1000,
|
|
||||||
(batch_size, self.config.seq_length),
|
|
||||||
dtype=torch.long
|
|
||||||
)
|
|
||||||
|
|
||||||
def _run_inference(self, input_ids: torch.Tensor) -> float:
|
def _run_inference(self, input_ids: torch.Tensor) -> float:
|
||||||
"""Run MLX inference with same input as PyTorch"""
|
"""Run MLX inference with same input as PyTorch"""
|
||||||
@@ -82,12 +80,12 @@ class MLXBenchmark:
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"MLX inference error: {e}")
|
print(f"MLX inference error: {e}")
|
||||||
return float('inf')
|
return float("inf")
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
|
|
||||||
return end_time - start_time
|
return end_time - start_time
|
||||||
|
|
||||||
def run(self) -> Dict[int, Dict[str, float]]:
|
def run(self) -> dict[int, dict[str, float]]:
|
||||||
"""Run the MLX benchmark across all batch sizes"""
|
"""Run the MLX benchmark across all batch sizes"""
|
||||||
results = {}
|
results = {}
|
||||||
|
|
||||||
@@ -111,10 +109,10 @@ class MLXBenchmark:
|
|||||||
break
|
break
|
||||||
|
|
||||||
# Run benchmark
|
# Run benchmark
|
||||||
for i in tqdm(range(self.config.num_runs), desc=f"MLX Batch size {batch_size}"):
|
for _i in tqdm(range(self.config.num_runs), desc=f"MLX Batch size {batch_size}"):
|
||||||
try:
|
try:
|
||||||
elapsed_time = self._run_inference(input_ids)
|
elapsed_time = self._run_inference(input_ids)
|
||||||
if elapsed_time != float('inf'):
|
if elapsed_time != float("inf"):
|
||||||
times.append(elapsed_time)
|
times.append(elapsed_time)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error during MLX inference: {e}")
|
print(f"Error during MLX inference: {e}")
|
||||||
@@ -145,16 +143,22 @@ class MLXBenchmark:
|
|||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
class Benchmark:
|
class Benchmark:
|
||||||
def __init__(self, config: BenchmarkConfig):
|
def __init__(self, config: BenchmarkConfig):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
self.device = (
|
||||||
|
"cuda"
|
||||||
|
if torch.cuda.is_available()
|
||||||
|
else "mps"
|
||||||
|
if torch.backends.mps.is_available()
|
||||||
|
else "cpu"
|
||||||
|
)
|
||||||
self.model = self._load_model()
|
self.model = self._load_model()
|
||||||
|
|
||||||
def _load_model(self) -> nn.Module:
|
def _load_model(self) -> nn.Module:
|
||||||
print(f"Loading model from {self.config.model_path}...")
|
print(f"Loading model from {self.config.model_path}...")
|
||||||
|
|
||||||
|
|
||||||
model = AutoModel.from_pretrained(self.config.model_path)
|
model = AutoModel.from_pretrained(self.config.model_path)
|
||||||
if self.config.use_fp16:
|
if self.config.use_fp16:
|
||||||
model = model.half()
|
model = model.half()
|
||||||
@@ -166,23 +170,30 @@ class Benchmark:
|
|||||||
|
|
||||||
def _create_random_batch(self, batch_size: int) -> torch.Tensor:
|
def _create_random_batch(self, batch_size: int) -> torch.Tensor:
|
||||||
return torch.randint(
|
return torch.randint(
|
||||||
0, 1000,
|
0,
|
||||||
|
1000,
|
||||||
(batch_size, self.config.seq_length),
|
(batch_size, self.config.seq_length),
|
||||||
device=self.device,
|
device=self.device,
|
||||||
dtype=torch.long
|
dtype=torch.long,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _run_inference(self, input_ids: torch.Tensor) -> float:
|
def _run_inference(self, input_ids: torch.Tensor) -> float:
|
||||||
attention_mask = torch.ones_like(input_ids)
|
attention_mask = torch.ones_like(input_ids)
|
||||||
|
# print shape of input_ids and attention_mask
|
||||||
|
print(f"input_ids shape: {input_ids.shape}")
|
||||||
|
print(f"attention_mask shape: {attention_mask.shape}")
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
|
self.model(input_ids=input_ids, attention_mask=attention_mask)
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
torch.cuda.synchronize()
|
||||||
|
if torch.backends.mps.is_available():
|
||||||
|
torch.mps.synchronize()
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
|
|
||||||
return end_time - start_time
|
return end_time - start_time
|
||||||
|
|
||||||
def run(self) -> Dict[int, Dict[str, float]]:
|
def run(self) -> dict[int, dict[str, float]]:
|
||||||
results = {}
|
results = {}
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
@@ -194,7 +205,7 @@ class Benchmark:
|
|||||||
|
|
||||||
input_ids = self._create_random_batch(batch_size)
|
input_ids = self._create_random_batch(batch_size)
|
||||||
|
|
||||||
for i in tqdm(range(self.config.num_runs), desc=f"Batch size {batch_size}"):
|
for _i in tqdm(range(self.config.num_runs), desc=f"Batch size {batch_size}"):
|
||||||
try:
|
try:
|
||||||
elapsed_time = self._run_inference(input_ids)
|
elapsed_time = self._run_inference(input_ids)
|
||||||
times.append(elapsed_time)
|
times.append(elapsed_time)
|
||||||
@@ -219,7 +230,7 @@ class Benchmark:
|
|||||||
print(f"Throughput: {throughput:.2f} sequences/second")
|
print(f"Throughput: {throughput:.2f} sequences/second")
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
peak_memory_gb = torch.cuda.max_memory_allocated() / (1024 ** 3)
|
peak_memory_gb = torch.cuda.max_memory_allocated() / (1024**3)
|
||||||
else:
|
else:
|
||||||
peak_memory_gb = 0.0
|
peak_memory_gb = 0.0
|
||||||
|
|
||||||
@@ -228,6 +239,7 @@ class Benchmark:
|
|||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
def run_benchmark():
|
def run_benchmark():
|
||||||
"""Main function to run the benchmark with optimized parameters."""
|
"""Main function to run the benchmark with optimized parameters."""
|
||||||
config = BenchmarkConfig()
|
config = BenchmarkConfig()
|
||||||
@@ -242,16 +254,13 @@ def run_benchmark():
|
|||||||
return {
|
return {
|
||||||
"max_throughput": max_throughput,
|
"max_throughput": max_throughput,
|
||||||
"avg_throughput": avg_throughput,
|
"avg_throughput": avg_throughput,
|
||||||
"results": results
|
"results": results,
|
||||||
}
|
}
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Benchmark failed: {e}")
|
print(f"Benchmark failed: {e}")
|
||||||
return {
|
return {"max_throughput": 0.0, "avg_throughput": 0.0, "error": str(e)}
|
||||||
"max_throughput": 0.0,
|
|
||||||
"avg_throughput": 0.0,
|
|
||||||
"error": str(e)
|
|
||||||
}
|
|
||||||
|
|
||||||
def run_mlx_benchmark():
|
def run_mlx_benchmark():
|
||||||
"""Run MLX-specific benchmark"""
|
"""Run MLX-specific benchmark"""
|
||||||
@@ -260,13 +269,10 @@ def run_mlx_benchmark():
|
|||||||
return {
|
return {
|
||||||
"max_throughput": 0.0,
|
"max_throughput": 0.0,
|
||||||
"avg_throughput": 0.0,
|
"avg_throughput": 0.0,
|
||||||
"error": "MLX not available"
|
"error": "MLX not available",
|
||||||
}
|
}
|
||||||
|
|
||||||
config = BenchmarkConfig(
|
config = BenchmarkConfig(model_path="mlx-community/all-MiniLM-L6-v2-4bit", use_mlx=True)
|
||||||
model_path="mlx-community/all-MiniLM-L6-v2-4bit",
|
|
||||||
use_mlx=True
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
benchmark = MLXBenchmark(config)
|
benchmark = MLXBenchmark(config)
|
||||||
@@ -276,7 +282,7 @@ def run_mlx_benchmark():
|
|||||||
return {
|
return {
|
||||||
"max_throughput": 0.0,
|
"max_throughput": 0.0,
|
||||||
"avg_throughput": 0.0,
|
"avg_throughput": 0.0,
|
||||||
"error": "No valid results"
|
"error": "No valid results",
|
||||||
}
|
}
|
||||||
|
|
||||||
max_throughput = max(results[batch_size]["throughput"] for batch_size in results)
|
max_throughput = max(results[batch_size]["throughput"] for batch_size in results)
|
||||||
@@ -285,16 +291,13 @@ def run_mlx_benchmark():
|
|||||||
return {
|
return {
|
||||||
"max_throughput": max_throughput,
|
"max_throughput": max_throughput,
|
||||||
"avg_throughput": avg_throughput,
|
"avg_throughput": avg_throughput,
|
||||||
"results": results
|
"results": results,
|
||||||
}
|
}
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"MLX benchmark failed: {e}")
|
print(f"MLX benchmark failed: {e}")
|
||||||
return {
|
return {"max_throughput": 0.0, "avg_throughput": 0.0, "error": str(e)}
|
||||||
"max_throughput": 0.0,
|
|
||||||
"avg_throughput": 0.0,
|
|
||||||
"error": str(e)
|
|
||||||
}
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
print("=== PyTorch Benchmark ===")
|
print("=== PyTorch Benchmark ===")
|
||||||
@@ -308,7 +311,7 @@ if __name__ == "__main__":
|
|||||||
print(f"MLX Average throughput: {mlx_result['avg_throughput']:.2f} sequences/second")
|
print(f"MLX Average throughput: {mlx_result['avg_throughput']:.2f} sequences/second")
|
||||||
|
|
||||||
# Compare results
|
# Compare results
|
||||||
if pytorch_result['max_throughput'] > 0 and mlx_result['max_throughput'] > 0:
|
if pytorch_result["max_throughput"] > 0 and mlx_result["max_throughput"] > 0:
|
||||||
speedup = mlx_result['max_throughput'] / pytorch_result['max_throughput']
|
speedup = mlx_result["max_throughput"] / pytorch_result["max_throughput"]
|
||||||
print(f"\n=== Comparison ===")
|
print("\n=== Comparison ===")
|
||||||
print(f"MLX is {speedup:.2f}x {'faster' if speedup > 1 else 'slower'} than PyTorch")
|
print(f"MLX is {speedup:.2f}x {'faster' if speedup > 1 else 'slower'} than PyTorch")
|
||||||
82
data/.gitattributes
vendored
82
data/.gitattributes
vendored
@@ -1,82 +0,0 @@
|
|||||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.mds filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.model filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
|
||||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
||||||
# Audio files - uncompressed
|
|
||||||
*.pcm filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.sam filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.raw filter=lfs diff=lfs merge=lfs -text
|
|
||||||
# Audio files - compressed
|
|
||||||
*.aac filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.flac filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.ogg filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.wav filter=lfs diff=lfs merge=lfs -text
|
|
||||||
# Image files - uncompressed
|
|
||||||
*.bmp filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.gif filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.png filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.tiff filter=lfs diff=lfs merge=lfs -text
|
|
||||||
# Image files - compressed
|
|
||||||
*.jpg filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
||||||
# Video files - compressed
|
|
||||||
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
|
||||||
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
||||||
ground_truth/dpr/id_map.json filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/dpr/dpr_diskann.passages.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/dpr/dpr_diskann.passages.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/dpr/dpr_diskann_disk.index filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/dpr/leann.labels.map filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/leann.labels.map filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.index filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.0.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.0.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.1.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.1.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.2.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.2.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.3.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.3.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.4.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.4.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.5.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.5.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.6.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.6.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.7.idx filter=lfs diff=lfs merge=lfs -text
|
|
||||||
indices/rpj_wiki/rpj_wiki.passages.7.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
||||||
@@ -14903,5 +14903,3 @@ This website includes information about Project Gutenberg™,
|
|||||||
including how to make donations to the Project Gutenberg Literary
|
including how to make donations to the Project Gutenberg Literary
|
||||||
Archive Foundation, how to help produce our new eBooks, and how to
|
Archive Foundation, how to help produce our new eBooks, and how to
|
||||||
subscribe to our email newsletter to hear about new eBooks.
|
subscribe to our email newsletter to hear about new eBooks.
|
||||||
|
|
||||||
|
|
||||||
277
demo.ipynb
277
demo.ipynb
@@ -4,7 +4,11 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Quick Start in 30s"
|
"# Quick Start \n",
|
||||||
|
"\n",
|
||||||
|
"**Home GitHub Repository:** [LEANN on GitHub](https://github.com/yichuan-w/LEANN)\n",
|
||||||
|
"\n",
|
||||||
|
"**Important for Colab users:** Set your runtime type to T4 GPU for optimal performance. Go to Runtime → Change runtime type → Hardware accelerator → T4 GPU."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -13,8 +17,25 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# install this if you areusing colab\n",
|
"# install this if you are using colab\n",
|
||||||
"! pip install leann"
|
"! uv pip install leann-core leann-backend-hnsw --no-deps\n",
|
||||||
|
"! uv pip install leann --no-deps\n",
|
||||||
|
"# For Colab environment, we need to set some environment variables\n",
|
||||||
|
"import os\n",
|
||||||
|
"\n",
|
||||||
|
"os.environ[\"LEANN_LOG_LEVEL\"] = \"INFO\" # Enable more detailed logging"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from pathlib import Path\n",
|
||||||
|
"\n",
|
||||||
|
"INDEX_DIR = Path(\"./\").resolve()\n",
|
||||||
|
"INDEX_PATH = str(INDEX_DIR / \"demo.leann\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -26,91 +47,21 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 1,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"INFO: Registering backend 'hnsw'\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"/Users/yichuan/Desktop/code/LEANN/leann/.venv/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
|
||||||
" from .autonotebook import tqdm as notebook_tqdm\n",
|
|
||||||
"INFO:sentence_transformers.SentenceTransformer:Load pretrained SentenceTransformer: facebook/contriever\n",
|
|
||||||
"WARNING:sentence_transformers.SentenceTransformer:No sentence-transformers model found with name facebook/contriever. Creating a new one with mean pooling.\n",
|
|
||||||
"Writing passages: 100%|██████████| 5/5 [00:00<00:00, 27887.66chunk/s]\n",
|
|
||||||
"Batches: 100%|██████████| 1/1 [00:00<00:00, 13.51it/s]\n",
|
|
||||||
"WARNING:leann_backend_hnsw.hnsw_backend:Converting data to float32, shape: (5, 768)\n",
|
|
||||||
"INFO:leann_backend_hnsw.hnsw_backend:INFO: Converting HNSW index to CSR-pruned format...\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"M: 64 for level: 0\n",
|
|
||||||
"Starting conversion: knowledge.index -> knowledge.csr.tmp\n",
|
|
||||||
"[0.00s] Reading Index HNSW header...\n",
|
|
||||||
"[0.00s] Header read: d=768, ntotal=5\n",
|
|
||||||
"[0.00s] Reading HNSW struct vectors...\n",
|
|
||||||
" Reading vector (dtype=<class 'numpy.float64'>, fmt='d')... Count=6, Bytes=48\n",
|
|
||||||
"[0.00s] Read assign_probas (6)\n",
|
|
||||||
" Reading vector (dtype=<class 'numpy.int32'>, fmt='i')... Count=7, Bytes=28\n",
|
|
||||||
"[0.11s] Read cum_nneighbor_per_level (7)\n",
|
|
||||||
" Reading vector (dtype=<class 'numpy.int32'>, fmt='i')... Count=5, Bytes=20\n",
|
|
||||||
"[0.21s] Read levels (5)\n",
|
|
||||||
"[0.30s] Probing for compact storage flag...\n",
|
|
||||||
"[0.30s] Found compact flag: False\n",
|
|
||||||
"[0.30s] Compact flag is False, reading original format...\n",
|
|
||||||
"[0.30s] Probing for potential extra byte before non-compact offsets...\n",
|
|
||||||
"[0.30s] Found and consumed an unexpected 0x00 byte.\n",
|
|
||||||
" Reading vector (dtype=<class 'numpy.uint64'>, fmt='Q')... Count=6, Bytes=48\n",
|
|
||||||
"[0.30s] Read offsets (6)\n",
|
|
||||||
"[0.40s] Attempting to read neighbors vector...\n",
|
|
||||||
" Reading vector (dtype=<class 'numpy.int32'>, fmt='i')... Count=320, Bytes=1280\n",
|
|
||||||
"[0.40s] Read neighbors (320)\n",
|
|
||||||
"[0.50s] Read scalar params (ep=4, max_lvl=0)\n",
|
|
||||||
"[0.50s] Checking for storage data...\n",
|
|
||||||
"[0.50s] Found storage fourcc: 49467849.\n",
|
|
||||||
"[0.50s] Converting to CSR format...\n",
|
|
||||||
"[0.50s] Conversion loop finished. \n",
|
|
||||||
"[0.50s] Running validation checks...\n",
|
|
||||||
" Checking total valid neighbor count...\n",
|
|
||||||
" OK: Total valid neighbors = 20\n",
|
|
||||||
" Checking final pointer indices...\n",
|
|
||||||
" OK: Final pointers match data size.\n",
|
|
||||||
"[0.50s] Deleting original neighbors and offsets arrays...\n",
|
|
||||||
" CSR Stats: |data|=20, |level_ptr|=10\n",
|
|
||||||
"[0.59s] Writing CSR HNSW graph data in FAISS-compatible order...\n",
|
|
||||||
" Pruning embeddings: Writing NULL storage marker.\n",
|
|
||||||
"[0.69s] Conversion complete.\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"INFO:leann_backend_hnsw.hnsw_backend:✅ CSR conversion successful.\n",
|
|
||||||
"INFO:leann_backend_hnsw.hnsw_backend:INFO: Replaced original index with CSR-pruned version at 'knowledge.index'\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"from leann.api import LeannBuilder\n",
|
"from leann.api import LeannBuilder\n",
|
||||||
"\n",
|
"\n",
|
||||||
"builder = LeannBuilder(backend_name=\"hnsw\")\n",
|
"builder = LeannBuilder(backend_name=\"hnsw\")\n",
|
||||||
"builder.add_text(\"C# is a powerful programming language and it is good at game development\")\n",
|
"builder.add_text(\"C# is a powerful programming language and it is good at game development\")\n",
|
||||||
"builder.add_text(\"Python is a powerful programming language and it is good at machine learning tasks\")\n",
|
"builder.add_text(\n",
|
||||||
|
" \"Python is a powerful programming language and it is good at machine learning tasks\"\n",
|
||||||
|
")\n",
|
||||||
"builder.add_text(\"Machine learning transforms industries\")\n",
|
"builder.add_text(\"Machine learning transforms industries\")\n",
|
||||||
"builder.add_text(\"Neural networks process complex data\")\n",
|
"builder.add_text(\"Neural networks process complex data\")\n",
|
||||||
"builder.add_text(\"Leann is a great storage saving engine for RAG on your MacBook\")\n",
|
"builder.add_text(\"Leann is a great storage saving engine for RAG on your MacBook\")\n",
|
||||||
"builder.build_index(\"knowledge.leann\")"
|
"builder.build_index(INDEX_PATH)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -122,97 +73,13 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 2,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"INFO:leann.api:🔍 LeannSearcher.search() called:\n",
|
|
||||||
"INFO:leann.api: Query: 'programming languages'\n",
|
|
||||||
"INFO:leann.api: Top_k: 2\n",
|
|
||||||
"INFO:leann.api: Additional kwargs: {}\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Port 5557 has incompatible server, trying next port...\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Port 5558 has incompatible server, trying next port...\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Port 5559 has incompatible server, trying next port...\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Using port 5560 instead of 5557\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Starting embedding server on port 5560...\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Command: /Users/yichuan/Desktop/code/LEANN/leann/.venv/bin/python -m leann_backend_hnsw.hnsw_embedding_server --zmq-port 5560 --model-name facebook/contriever --passages-file knowledge.leann.meta.json\n",
|
|
||||||
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
|
|
||||||
"To disable this warning, you can either:\n",
|
|
||||||
"\t- Avoid using `tokenizers` before the fork if possible\n",
|
|
||||||
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Server process started with PID: 4574\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"[read_HNSW - CSR NL v4] Reading metadata & CSR indices (manual offset)...\n",
|
|
||||||
"[read_HNSW NL v4] Read levels vector, size: 5\n",
|
|
||||||
"[read_HNSW NL v4] Reading Compact Storage format indices...\n",
|
|
||||||
"[read_HNSW NL v4] Read compact_level_ptr, size: 10\n",
|
|
||||||
"[read_HNSW NL v4] Read compact_node_offsets, size: 6\n",
|
|
||||||
"[read_HNSW NL v4] Read entry_point: 4, max_level: 0\n",
|
|
||||||
"[read_HNSW NL v4] Read storage fourcc: 0x6c6c756e\n",
|
|
||||||
"[read_HNSW NL v4 FIX] Detected FileIOReader. Neighbors size field offset: 326\n",
|
|
||||||
"[read_HNSW NL v4] Reading neighbors data into memory.\n",
|
|
||||||
"[read_HNSW NL v4] Read neighbors data, size: 20\n",
|
|
||||||
"[read_HNSW NL v4] Finished reading metadata and CSR indices.\n",
|
|
||||||
"INFO: Skipping external storage loading, since is_recompute is true.\n",
|
|
||||||
"INFO: Registering backend 'hnsw'\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"INFO:leann.embedding_server_manager:Embedding server is ready!\n",
|
|
||||||
"INFO:leann.api: Launching server time: 1.078078269958496 seconds\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Existing server process (PID 4574) is compatible\n",
|
|
||||||
"INFO:sentence_transformers.SentenceTransformer:Load pretrained SentenceTransformer: facebook/contriever\n",
|
|
||||||
"WARNING:sentence_transformers.SentenceTransformer:No sentence-transformers model found with name facebook/contriever. Creating a new one with mean pooling.\n",
|
|
||||||
"INFO:leann.api: Generated embedding shape: (1, 768)\n",
|
|
||||||
"INFO:leann.api: Embedding time: 2.9307072162628174 seconds\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"ZmqDistanceComputer initialized: d=768, metric=0\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"INFO:leann.api: Search time: 0.27327895164489746 seconds\n",
|
|
||||||
"INFO:leann.api: Backend returned: labels=2 results\n",
|
|
||||||
"INFO:leann.api: Processing 2 passage IDs:\n",
|
|
||||||
"INFO:leann.api: 1. passage_id='0' -> SUCCESS: C# is a powerful programming language and it is good at game development...\n",
|
|
||||||
"INFO:leann.api: 2. passage_id='1' -> SUCCESS: Python is a powerful programming language and it is good at machine learning tasks...\n",
|
|
||||||
"INFO:leann.api: Final enriched results: 2 passages\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"[SearchResult(id='0', score=np.float32(0.9874103), text='C# is a powerful programming language and it is good at game development', metadata={}),\n",
|
|
||||||
" SearchResult(id='1', score=np.float32(0.8922168), text='Python is a powerful programming language and it is good at machine learning tasks', metadata={})]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 2,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"from leann.api import LeannSearcher\n",
|
"from leann.api import LeannSearcher\n",
|
||||||
"\n",
|
"\n",
|
||||||
"searcher = LeannSearcher(\"knowledge.leann\")\n",
|
"searcher = LeannSearcher(INDEX_PATH)\n",
|
||||||
"results = searcher.search(\"programming languages\", top_k=2)\n",
|
"results = searcher.search(\"programming languages\", top_k=2)\n",
|
||||||
"results"
|
"results"
|
||||||
]
|
]
|
||||||
@@ -228,79 +95,7 @@
|
|||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"INFO:leann.chat:Attempting to create LLM of type='hf' with model='Qwen/Qwen3-0.6B'\n",
|
|
||||||
"INFO:leann.chat:Initializing HFChat with model='Qwen/Qwen3-0.6B'\n",
|
|
||||||
"INFO:leann.chat:MPS is available. Using Apple Silicon GPU.\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"[read_HNSW - CSR NL v4] Reading metadata & CSR indices (manual offset)...\n",
|
|
||||||
"[read_HNSW NL v4] Read levels vector, size: 5\n",
|
|
||||||
"[read_HNSW NL v4] Reading Compact Storage format indices...\n",
|
|
||||||
"[read_HNSW NL v4] Read compact_level_ptr, size: 10\n",
|
|
||||||
"[read_HNSW NL v4] Read compact_node_offsets, size: 6\n",
|
|
||||||
"[read_HNSW NL v4] Read entry_point: 4, max_level: 0\n",
|
|
||||||
"[read_HNSW NL v4] Read storage fourcc: 0x6c6c756e\n",
|
|
||||||
"[read_HNSW NL v4 FIX] Detected FileIOReader. Neighbors size field offset: 326\n",
|
|
||||||
"[read_HNSW NL v4] Reading neighbors data into memory.\n",
|
|
||||||
"[read_HNSW NL v4] Read neighbors data, size: 20\n",
|
|
||||||
"[read_HNSW NL v4] Finished reading metadata and CSR indices.\n",
|
|
||||||
"INFO: Skipping external storage loading, since is_recompute is true.\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"INFO:leann.api:🔍 LeannSearcher.search() called:\n",
|
|
||||||
"INFO:leann.api: Query: 'Compare the two retrieved programming languages and tell me their advantages.'\n",
|
|
||||||
"INFO:leann.api: Top_k: 2\n",
|
|
||||||
"INFO:leann.api: Additional kwargs: {}\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Port 5557 has incompatible server, trying next port...\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Port 5558 has incompatible server, trying next port...\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Port 5559 has incompatible server, trying next port...\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Found compatible server on port 5560\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Using existing compatible server on port 5560\n",
|
|
||||||
"INFO:leann.api: Launching server time: 0.04932403564453125 seconds\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Found compatible server on port 5560\n",
|
|
||||||
"INFO:leann.embedding_server_manager:Using existing compatible server on port 5560\n",
|
|
||||||
"INFO:leann.api: Generated embedding shape: (1, 768)\n",
|
|
||||||
"INFO:leann.api: Embedding time: 0.06902289390563965 seconds\n",
|
|
||||||
"INFO:leann.api: Search time: 0.026793241500854492 seconds\n",
|
|
||||||
"INFO:leann.api: Backend returned: labels=2 results\n",
|
|
||||||
"INFO:leann.api: Processing 2 passage IDs:\n",
|
|
||||||
"INFO:leann.api: 1. passage_id='0' -> SUCCESS: C# is a powerful programming language and it is good at game development...\n",
|
|
||||||
"INFO:leann.api: 2. passage_id='1' -> SUCCESS: Python is a powerful programming language and it is good at machine learning tasks...\n",
|
|
||||||
"INFO:leann.api: Final enriched results: 2 passages\n",
|
|
||||||
"INFO:leann.chat:Generating with HuggingFace model, config: {'max_new_tokens': 128, 'temperature': 0.7, 'top_p': 0.9, 'do_sample': True, 'pad_token_id': 151645, 'eos_token_id': 151645}\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"ZmqDistanceComputer initialized: d=768, metric=0\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"\"<think>\\n\\n</think>\\n\\nBased on the context provided, here's a comparison of the two retrieved programming languages:\\n\\n**C#** is known for being a powerful programming language and is well-suited for game development. It is often used in game development and is popular among developers working on Windows applications.\\n\\n**Python**, on the other hand, is also a powerful language and is well-suited for machine learning tasks. It is widely used for data analysis, scientific computing, and other applications that require handling large datasets or performing complex calculations.\\n\\n**Advantages**:\\n- C#: Strong for game development and cross-platform compatibility.\\n- Python: Strong for\""
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 8,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"from leann.api import LeannChat\n",
|
"from leann.api import LeannChat\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -309,11 +104,11 @@
|
|||||||
" \"model\": \"Qwen/Qwen3-0.6B\",\n",
|
" \"model\": \"Qwen/Qwen3-0.6B\",\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"chat = LeannChat(index_path=\"knowledge.leann\", llm_config=llm_config)\n",
|
"chat = LeannChat(index_path=INDEX_PATH, llm_config=llm_config)\n",
|
||||||
"response = chat.ask(\n",
|
"response = chat.ask(\n",
|
||||||
" \"Compare the two retrieved programming languages and tell me their advantages.\",\n",
|
" \"Compare the two retrieved programming languages and tell me their advantages.\",\n",
|
||||||
" top_k=2,\n",
|
" top_k=2,\n",
|
||||||
" llm_kwargs={\"max_tokens\": 128}\n",
|
" llm_kwargs={\"max_tokens\": 128},\n",
|
||||||
")\n",
|
")\n",
|
||||||
"response"
|
"response"
|
||||||
]
|
]
|
||||||
|
|||||||
220
docs/CONTRIBUTING.md
Normal file
220
docs/CONTRIBUTING.md
Normal file
@@ -0,0 +1,220 @@
|
|||||||
|
# 🤝 Contributing
|
||||||
|
|
||||||
|
We welcome contributions! Leann is built by the community, for the community.
|
||||||
|
|
||||||
|
## Ways to Contribute
|
||||||
|
|
||||||
|
- 🐛 **Bug Reports**: Found an issue? Let us know!
|
||||||
|
- 💡 **Feature Requests**: Have an idea? We'd love to hear it!
|
||||||
|
- 🔧 **Code Contributions**: PRs welcome for all skill levels
|
||||||
|
- 📖 **Documentation**: Help make Leann more accessible
|
||||||
|
- 🧪 **Benchmarks**: Share your performance results
|
||||||
|
|
||||||
|
## 🚀 Development Setup
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
1. **Install uv** (fast Python package installer):
|
||||||
|
```bash
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Clone the repository**:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/LEANN-RAG/LEANN-RAG.git
|
||||||
|
cd LEANN-RAG
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Install system dependencies**:
|
||||||
|
|
||||||
|
**macOS:**
|
||||||
|
```bash
|
||||||
|
brew install llvm libomp boost protobuf zeromq pkgconf
|
||||||
|
```
|
||||||
|
|
||||||
|
**Ubuntu/Debian:**
|
||||||
|
```bash
|
||||||
|
sudo apt-get install libomp-dev libboost-all-dev protobuf-compiler \
|
||||||
|
libabsl-dev libmkl-full-dev libaio-dev libzmq3-dev
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Build from source**:
|
||||||
|
```bash
|
||||||
|
# macOS
|
||||||
|
CC=$(brew --prefix llvm)/bin/clang CXX=$(brew --prefix llvm)/bin/clang++ uv sync
|
||||||
|
|
||||||
|
# Ubuntu/Debian
|
||||||
|
uv sync
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔨 Pre-commit Hooks
|
||||||
|
|
||||||
|
We use pre-commit hooks to ensure code quality and consistency. This runs automatically before each commit.
|
||||||
|
|
||||||
|
### Setup Pre-commit
|
||||||
|
|
||||||
|
1. **Install pre-commit** (already included when you run `uv sync`):
|
||||||
|
```bash
|
||||||
|
uv pip install pre-commit
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Install the git hooks**:
|
||||||
|
```bash
|
||||||
|
pre-commit install
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Run pre-commit manually** (optional):
|
||||||
|
```bash
|
||||||
|
pre-commit run --all-files
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pre-commit Checks
|
||||||
|
|
||||||
|
Our pre-commit configuration includes:
|
||||||
|
- **Trailing whitespace removal**
|
||||||
|
- **End-of-file fixing**
|
||||||
|
- **YAML validation**
|
||||||
|
- **Large file prevention**
|
||||||
|
- **Merge conflict detection**
|
||||||
|
- **Debug statement detection**
|
||||||
|
- **Code formatting with ruff**
|
||||||
|
- **Code linting with ruff**
|
||||||
|
|
||||||
|
## 🧪 Testing
|
||||||
|
|
||||||
|
### Running Tests
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all tests
|
||||||
|
uv run pytest
|
||||||
|
|
||||||
|
# Run specific test file
|
||||||
|
uv run pytest test/test_filename.py
|
||||||
|
|
||||||
|
# Run with coverage
|
||||||
|
uv run pytest --cov=leann
|
||||||
|
```
|
||||||
|
|
||||||
|
### Writing Tests
|
||||||
|
|
||||||
|
- Place tests in the `test/` directory
|
||||||
|
- Follow the naming convention `test_*.py`
|
||||||
|
- Use descriptive test names that explain what's being tested
|
||||||
|
- Include both positive and negative test cases
|
||||||
|
|
||||||
|
## 📝 Code Style
|
||||||
|
|
||||||
|
We use `ruff` for both linting and formatting to ensure consistent code style.
|
||||||
|
|
||||||
|
### Format Your Code
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Format all files
|
||||||
|
ruff format
|
||||||
|
|
||||||
|
# Check formatting without changing files
|
||||||
|
ruff format --check
|
||||||
|
```
|
||||||
|
|
||||||
|
### Lint Your Code
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run linter with auto-fix
|
||||||
|
ruff check --fix
|
||||||
|
|
||||||
|
# Just check without fixing
|
||||||
|
ruff check
|
||||||
|
```
|
||||||
|
|
||||||
|
### Style Guidelines
|
||||||
|
|
||||||
|
- Follow PEP 8 conventions
|
||||||
|
- Use descriptive variable names
|
||||||
|
- Add type hints where appropriate
|
||||||
|
- Write docstrings for all public functions and classes
|
||||||
|
- Keep functions focused and single-purpose
|
||||||
|
|
||||||
|
## 🚦 CI/CD
|
||||||
|
|
||||||
|
Our CI pipeline runs automatically on all pull requests. It includes:
|
||||||
|
|
||||||
|
1. **Linting and Formatting**: Ensures code follows our style guidelines
|
||||||
|
2. **Multi-platform builds**: Tests on Ubuntu and macOS
|
||||||
|
3. **Python version matrix**: Tests on Python 3.9-3.13
|
||||||
|
4. **Wheel building**: Ensures packages can be built and distributed
|
||||||
|
|
||||||
|
### CI Commands
|
||||||
|
|
||||||
|
The CI uses the same commands as pre-commit to ensure consistency:
|
||||||
|
```bash
|
||||||
|
# Linting
|
||||||
|
ruff check .
|
||||||
|
|
||||||
|
# Format checking
|
||||||
|
ruff format --check .
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure your code passes these checks locally before pushing!
|
||||||
|
|
||||||
|
## 🔄 Pull Request Process
|
||||||
|
|
||||||
|
1. **Fork the repository** and create your branch from `main`:
|
||||||
|
```bash
|
||||||
|
git checkout -b feature/your-feature-name
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Make your changes**:
|
||||||
|
- Write clean, documented code
|
||||||
|
- Add tests for new functionality
|
||||||
|
- Update documentation as needed
|
||||||
|
|
||||||
|
3. **Run pre-commit checks**:
|
||||||
|
```bash
|
||||||
|
pre-commit run --all-files
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Test your changes**:
|
||||||
|
```bash
|
||||||
|
uv run pytest
|
||||||
|
```
|
||||||
|
|
||||||
|
5. **Commit with descriptive messages**:
|
||||||
|
```bash
|
||||||
|
git commit -m "feat: add new search algorithm"
|
||||||
|
```
|
||||||
|
|
||||||
|
Follow [Conventional Commits](https://www.conventionalcommits.org/):
|
||||||
|
- `feat:` for new features
|
||||||
|
- `fix:` for bug fixes
|
||||||
|
- `docs:` for documentation changes
|
||||||
|
- `test:` for test additions/changes
|
||||||
|
- `refactor:` for code refactoring
|
||||||
|
- `perf:` for performance improvements
|
||||||
|
|
||||||
|
6. **Push and create a pull request**:
|
||||||
|
- Provide a clear description of your changes
|
||||||
|
- Reference any related issues
|
||||||
|
- Include examples or screenshots if applicable
|
||||||
|
|
||||||
|
## 📚 Documentation
|
||||||
|
|
||||||
|
When adding new features or making significant changes:
|
||||||
|
|
||||||
|
1. Update relevant documentation in `/docs`
|
||||||
|
2. Add docstrings to new functions/classes
|
||||||
|
3. Update README.md if needed
|
||||||
|
4. Include usage examples
|
||||||
|
|
||||||
|
## 🤔 Getting Help
|
||||||
|
|
||||||
|
- **Discord**: Join our community for discussions
|
||||||
|
- **Issues**: Check existing issues or create a new one
|
||||||
|
- **Discussions**: For general questions and ideas
|
||||||
|
|
||||||
|
## 📄 License
|
||||||
|
|
||||||
|
By contributing, you agree that your contributions will be licensed under the same license as the project (MIT).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Thank you for contributing to LEANN! Every contribution, no matter how small, helps make the project better for everyone. 🌟
|
||||||
123
docs/THINKING_BUDGET_FEATURE.md
Normal file
123
docs/THINKING_BUDGET_FEATURE.md
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
# Thinking Budget Feature Implementation
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This document describes the implementation of the **thinking budget** feature for LEANN, which allows users to control the computational effort for reasoning models like GPT-Oss:20b.
|
||||||
|
|
||||||
|
## Feature Description
|
||||||
|
|
||||||
|
The thinking budget feature provides three levels of computational effort for reasoning models:
|
||||||
|
- **`low`**: Fast responses, basic reasoning (default for simple queries)
|
||||||
|
- **`medium`**: Balanced speed and reasoning depth
|
||||||
|
- **`high`**: Maximum reasoning effort, best for complex analytical questions
|
||||||
|
|
||||||
|
## Implementation Details
|
||||||
|
|
||||||
|
### 1. Command Line Interface
|
||||||
|
|
||||||
|
Added `--thinking-budget` parameter to both CLI and RAG examples:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# LEANN CLI
|
||||||
|
leann ask my-index --llm ollama --model gpt-oss:20b --thinking-budget high
|
||||||
|
|
||||||
|
# RAG Examples
|
||||||
|
python apps/email_rag.py --llm ollama --llm-model gpt-oss:20b --thinking-budget high
|
||||||
|
python apps/document_rag.py --llm openai --llm-model o3 --thinking-budget medium
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. LLM Backend Support
|
||||||
|
|
||||||
|
#### Ollama Backend (`packages/leann-core/src/leann/chat.py`)
|
||||||
|
|
||||||
|
```python
|
||||||
|
def ask(self, prompt: str, **kwargs) -> str:
|
||||||
|
# Handle thinking budget for reasoning models
|
||||||
|
options = kwargs.copy()
|
||||||
|
thinking_budget = kwargs.get("thinking_budget")
|
||||||
|
if thinking_budget:
|
||||||
|
options.pop("thinking_budget", None)
|
||||||
|
if thinking_budget in ["low", "medium", "high"]:
|
||||||
|
options["reasoning"] = {"effort": thinking_budget, "exclude": False}
|
||||||
|
```
|
||||||
|
|
||||||
|
**API Format**: Uses Ollama's `reasoning` parameter with `effort` and `exclude` fields.
|
||||||
|
|
||||||
|
#### OpenAI Backend (`packages/leann-core/src/leann/chat.py`)
|
||||||
|
|
||||||
|
```python
|
||||||
|
def ask(self, prompt: str, **kwargs) -> str:
|
||||||
|
# Handle thinking budget for reasoning models
|
||||||
|
thinking_budget = kwargs.get("thinking_budget")
|
||||||
|
if thinking_budget and thinking_budget in ["low", "medium", "high"]:
|
||||||
|
# Check if this is an o-series model
|
||||||
|
o_series_models = ["o3", "o3-mini", "o4-mini", "o1", "o3-pro", "o3-deep-research"]
|
||||||
|
if any(model in self.model for model in o_series_models):
|
||||||
|
params["reasoning_effort"] = thinking_budget
|
||||||
|
```
|
||||||
|
|
||||||
|
**API Format**: Uses OpenAI's `reasoning_effort` parameter for o-series models.
|
||||||
|
|
||||||
|
### 3. Parameter Propagation
|
||||||
|
|
||||||
|
The thinking budget parameter is properly propagated through the LEANN architecture:
|
||||||
|
|
||||||
|
1. **CLI** (`packages/leann-core/src/leann/cli.py`): Captures `--thinking-budget` argument
|
||||||
|
2. **Base RAG** (`apps/base_rag_example.py`): Adds parameter to argument parser
|
||||||
|
3. **LeannChat** (`packages/leann-core/src/leann/api.py`): Passes `llm_kwargs` to LLM
|
||||||
|
4. **LLM Interface**: Handles the parameter in backend-specific implementations
|
||||||
|
|
||||||
|
## Files Modified
|
||||||
|
|
||||||
|
### Core Implementation
|
||||||
|
- `packages/leann-core/src/leann/chat.py`: Added thinking budget support to OllamaChat and OpenAIChat
|
||||||
|
- `packages/leann-core/src/leann/cli.py`: Added `--thinking-budget` argument
|
||||||
|
- `apps/base_rag_example.py`: Added thinking budget parameter to RAG examples
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
- `README.md`: Added thinking budget parameter to usage examples
|
||||||
|
- `docs/configuration-guide.md`: Added detailed documentation and usage guidelines
|
||||||
|
|
||||||
|
### Examples
|
||||||
|
- `examples/thinking_budget_demo.py`: Comprehensive demo script with usage examples
|
||||||
|
|
||||||
|
## Usage Examples
|
||||||
|
|
||||||
|
### Basic Usage
|
||||||
|
```bash
|
||||||
|
# High reasoning effort for complex questions
|
||||||
|
leann ask my-index --llm ollama --model gpt-oss:20b --thinking-budget high
|
||||||
|
|
||||||
|
# Medium reasoning for balanced performance
|
||||||
|
leann ask my-index --llm openai --model gpt-4o --thinking-budget medium
|
||||||
|
|
||||||
|
# Low reasoning for fast responses
|
||||||
|
leann ask my-index --llm ollama --model gpt-oss:20b --thinking-budget low
|
||||||
|
```
|
||||||
|
|
||||||
|
### RAG Examples
|
||||||
|
```bash
|
||||||
|
# Email RAG with high reasoning
|
||||||
|
python apps/email_rag.py --llm ollama --llm-model gpt-oss:20b --thinking-budget high
|
||||||
|
|
||||||
|
# Document RAG with medium reasoning
|
||||||
|
python apps/document_rag.py --llm openai --llm-model gpt-4o --thinking-budget medium
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supported Models
|
||||||
|
|
||||||
|
### Ollama Models
|
||||||
|
- **GPT-Oss:20b**: Primary target model with reasoning capabilities
|
||||||
|
- **Other reasoning models**: Any Ollama model that supports the `reasoning` parameter
|
||||||
|
|
||||||
|
### OpenAI Models
|
||||||
|
- **o3, o3-mini, o4-mini, o1**: o-series reasoning models with `reasoning_effort` parameter
|
||||||
|
- **GPT-OSS models**: Models that support reasoning capabilities
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
The implementation includes comprehensive testing:
|
||||||
|
- Parameter handling verification
|
||||||
|
- Backend-specific API format validation
|
||||||
|
- CLI argument parsing tests
|
||||||
|
- Integration with existing LEANN architecture
|
||||||
143
docs/ast_chunking_guide.md
Normal file
143
docs/ast_chunking_guide.md
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
# AST-Aware Code chunking guide
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This guide covers best practices for using AST-aware code chunking in LEANN. AST chunking provides better semantic understanding of code structure compared to traditional text-based chunking.
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
### Basic Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Enable AST chunking for mixed content (code + docs)
|
||||||
|
python -m apps.document_rag --enable-code-chunking --data-dir ./my_project
|
||||||
|
|
||||||
|
# Specialized code repository indexing
|
||||||
|
python -m apps.code_rag --repo-dir ./my_codebase
|
||||||
|
|
||||||
|
# Global CLI with AST support
|
||||||
|
leann build my-code-index --docs ./src --use-ast-chunking
|
||||||
|
```
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install LEANN with AST chunking support
|
||||||
|
uv pip install -e "."
|
||||||
|
```
|
||||||
|
|
||||||
|
#### For normal users (PyPI install)
|
||||||
|
- Use `pip install leann` or `uv pip install leann`.
|
||||||
|
- `astchunk` is pulled automatically from PyPI as a dependency; no extra steps.
|
||||||
|
|
||||||
|
#### For developers (from source, editable)
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/yichuan-w/LEANN.git leann
|
||||||
|
cd leann
|
||||||
|
git submodule update --init --recursive
|
||||||
|
uv sync
|
||||||
|
```
|
||||||
|
- This repo vendors `astchunk` as a git submodule at `packages/astchunk-leann` (our fork).
|
||||||
|
- `[tool.uv.sources]` maps the `astchunk` package to that path in editable mode.
|
||||||
|
- You can edit code under `packages/astchunk-leann` and Python will use your changes immediately (no separate `pip install astchunk` needed).
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
### When to Use AST Chunking
|
||||||
|
|
||||||
|
✅ **Recommended for:**
|
||||||
|
- Code repositories with multiple languages
|
||||||
|
- Mixed documentation and code content
|
||||||
|
- Complex codebases with deep function/class hierarchies
|
||||||
|
- When working with Claude Code for code assistance
|
||||||
|
|
||||||
|
❌ **Not recommended for:**
|
||||||
|
- Pure text documents
|
||||||
|
- Very large files (>1MB)
|
||||||
|
- Languages not supported by tree-sitter
|
||||||
|
|
||||||
|
### Optimal Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Recommended settings for most codebases
|
||||||
|
python -m apps.code_rag \
|
||||||
|
--repo-dir ./src \
|
||||||
|
--ast-chunk-size 768 \
|
||||||
|
--ast-chunk-overlap 96 \
|
||||||
|
--exclude-dirs .git __pycache__ node_modules build dist
|
||||||
|
```
|
||||||
|
|
||||||
|
### Supported Languages
|
||||||
|
|
||||||
|
| Extension | Language | Status |
|
||||||
|
|-----------|----------|--------|
|
||||||
|
| `.py` | Python | ✅ Full support |
|
||||||
|
| `.java` | Java | ✅ Full support |
|
||||||
|
| `.cs` | C# | ✅ Full support |
|
||||||
|
| `.ts`, `.tsx` | TypeScript | ✅ Full support |
|
||||||
|
| `.js`, `.jsx` | JavaScript | ✅ Via TypeScript parser |
|
||||||
|
|
||||||
|
## Integration Examples
|
||||||
|
|
||||||
|
### Document RAG with Code Support
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Enable code chunking in document RAG
|
||||||
|
python -m apps.document_rag \
|
||||||
|
--enable-code-chunking \
|
||||||
|
--data-dir ./project \
|
||||||
|
--query "How does authentication work in the codebase?"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Claude Code Integration
|
||||||
|
|
||||||
|
When using with Claude Code MCP server, AST chunking provides better context for:
|
||||||
|
- Code completion and suggestions
|
||||||
|
- Bug analysis and debugging
|
||||||
|
- Architecture understanding
|
||||||
|
- Refactoring assistance
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
1. **Fallback to Traditional Chunking**
|
||||||
|
- Normal behavior for unsupported languages
|
||||||
|
- Check logs for specific language support
|
||||||
|
|
||||||
|
2. **Performance with Large Files**
|
||||||
|
- Adjust `--max-file-size` parameter
|
||||||
|
- Use `--exclude-dirs` to skip unnecessary directories
|
||||||
|
|
||||||
|
3. **Quality Issues**
|
||||||
|
- Try different `--ast-chunk-size` values (512, 768, 1024)
|
||||||
|
- Adjust overlap for better context preservation
|
||||||
|
|
||||||
|
### Debug Mode
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export LEANN_LOG_LEVEL=DEBUG
|
||||||
|
python -m apps.code_rag --repo-dir ./my_code
|
||||||
|
```
|
||||||
|
|
||||||
|
## Migration from Traditional Chunking
|
||||||
|
|
||||||
|
Existing workflows continue to work without changes. To enable AST chunking:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Before
|
||||||
|
python -m apps.document_rag --chunk-size 256
|
||||||
|
|
||||||
|
# After (maintains traditional chunking for non-code files)
|
||||||
|
python -m apps.document_rag --enable-code-chunking --chunk-size 256 --ast-chunk-size 768
|
||||||
|
```
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [astchunk GitHub Repository](https://github.com/yilinjz/astchunk)
|
||||||
|
- [LEANN MCP Integration](../packages/leann-mcp/README.md)
|
||||||
|
- [Research Paper](https://arxiv.org/html/2506.15655v1)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Note**: AST chunking maintains full backward compatibility while enhancing code understanding capabilities.
|
||||||
98
docs/code/embedding_model_compare.py
Normal file
98
docs/code/embedding_model_compare.py
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
"""
|
||||||
|
Comparison between Sentence Transformers and OpenAI embeddings
|
||||||
|
|
||||||
|
This example shows how different embedding models handle complex queries
|
||||||
|
and demonstrates the differences between local and API-based embeddings.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from leann.embedding_compute import compute_embeddings
|
||||||
|
|
||||||
|
# OpenAI API key should be set as environment variable
|
||||||
|
# export OPENAI_API_KEY="your-api-key-here"
|
||||||
|
|
||||||
|
# Test data
|
||||||
|
conference_text = "[Title]: COLING 2025 Conference\n[URL]: https://coling2025.org/"
|
||||||
|
browser_text = "[Title]: Browser Use Tool\n[URL]: https://github.com/browser-use"
|
||||||
|
|
||||||
|
# Two queries with same intent but different wording
|
||||||
|
query1 = "Tell me my browser history about some conference i often visit"
|
||||||
|
query2 = "browser history about conference I often visit"
|
||||||
|
|
||||||
|
texts = [query1, query2, conference_text, browser_text]
|
||||||
|
|
||||||
|
|
||||||
|
def cosine_similarity(a, b):
|
||||||
|
return np.dot(a, b) # Already normalized
|
||||||
|
|
||||||
|
|
||||||
|
def analyze_embeddings(embeddings, model_name):
|
||||||
|
print(f"\n=== {model_name} Results ===")
|
||||||
|
|
||||||
|
# Results for Query 1
|
||||||
|
sim1_conf = cosine_similarity(embeddings[0], embeddings[2])
|
||||||
|
sim1_browser = cosine_similarity(embeddings[0], embeddings[3])
|
||||||
|
|
||||||
|
print(f"Query 1: '{query1}'")
|
||||||
|
print(f" → Conference similarity: {sim1_conf:.4f} {'✓' if sim1_conf > sim1_browser else ''}")
|
||||||
|
print(
|
||||||
|
f" → Browser similarity: {sim1_browser:.4f} {'✓' if sim1_browser > sim1_conf else ''}"
|
||||||
|
)
|
||||||
|
print(f" Winner: {'Conference' if sim1_conf > sim1_browser else 'Browser'}")
|
||||||
|
|
||||||
|
# Results for Query 2
|
||||||
|
sim2_conf = cosine_similarity(embeddings[1], embeddings[2])
|
||||||
|
sim2_browser = cosine_similarity(embeddings[1], embeddings[3])
|
||||||
|
|
||||||
|
print(f"\nQuery 2: '{query2}'")
|
||||||
|
print(f" → Conference similarity: {sim2_conf:.4f} {'✓' if sim2_conf > sim2_browser else ''}")
|
||||||
|
print(
|
||||||
|
f" → Browser similarity: {sim2_browser:.4f} {'✓' if sim2_browser > sim2_conf else ''}"
|
||||||
|
)
|
||||||
|
print(f" Winner: {'Conference' if sim2_conf > sim2_browser else 'Browser'}")
|
||||||
|
|
||||||
|
# Show the impact
|
||||||
|
print("\n=== Impact Analysis ===")
|
||||||
|
print(f"Conference similarity change: {sim2_conf - sim1_conf:+.4f}")
|
||||||
|
print(f"Browser similarity change: {sim2_browser - sim1_browser:+.4f}")
|
||||||
|
|
||||||
|
if sim1_conf > sim1_browser and sim2_browser > sim2_conf:
|
||||||
|
print("❌ FLIP: Adding 'browser history' flips winner from Conference to Browser!")
|
||||||
|
elif sim1_conf > sim1_browser and sim2_conf > sim2_browser:
|
||||||
|
print("✅ STABLE: Conference remains winner in both queries")
|
||||||
|
elif sim1_browser > sim1_conf and sim2_browser > sim2_conf:
|
||||||
|
print("✅ STABLE: Browser remains winner in both queries")
|
||||||
|
else:
|
||||||
|
print("🔄 MIXED: Results vary between queries")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"query1_conf": sim1_conf,
|
||||||
|
"query1_browser": sim1_browser,
|
||||||
|
"query2_conf": sim2_conf,
|
||||||
|
"query2_browser": sim2_browser,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Test Sentence Transformers
|
||||||
|
print("Testing Sentence Transformers (facebook/contriever)...")
|
||||||
|
try:
|
||||||
|
st_embeddings = compute_embeddings(texts, "facebook/contriever", mode="sentence-transformers")
|
||||||
|
st_results = analyze_embeddings(st_embeddings, "Sentence Transformers (facebook/contriever)")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ Sentence Transformers failed: {e}")
|
||||||
|
st_results = None
|
||||||
|
|
||||||
|
# Test OpenAI
|
||||||
|
print("\n" + "=" * 60)
|
||||||
|
print("Testing OpenAI (text-embedding-3-small)...")
|
||||||
|
try:
|
||||||
|
openai_embeddings = compute_embeddings(texts, "text-embedding-3-small", mode="openai")
|
||||||
|
openai_results = analyze_embeddings(openai_embeddings, "OpenAI (text-embedding-3-small)")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ OpenAI failed: {e}")
|
||||||
|
openai_results = None
|
||||||
|
|
||||||
|
# Compare results
|
||||||
|
if st_results and openai_results:
|
||||||
|
print("\n" + "=" * 60)
|
||||||
|
print("=== COMPARISON SUMMARY ===")
|
||||||
459
docs/configuration-guide.md
Normal file
459
docs/configuration-guide.md
Normal file
@@ -0,0 +1,459 @@
|
|||||||
|
# LEANN Configuration Guide
|
||||||
|
|
||||||
|
This guide helps you optimize LEANN for different use cases and understand the trade-offs between various configuration options.
|
||||||
|
|
||||||
|
## Getting Started: Simple is Better
|
||||||
|
|
||||||
|
When first trying LEANN, start with a small dataset to quickly validate your approach:
|
||||||
|
|
||||||
|
**For document RAG**: The default `data/` directory works perfectly - includes 2 AI research papers, Pride and Prejudice literature, and a technical report
|
||||||
|
```bash
|
||||||
|
python -m apps.document_rag --query "What techniques does LEANN use?"
|
||||||
|
```
|
||||||
|
|
||||||
|
**For other data sources**: Limit the dataset size for quick testing
|
||||||
|
```bash
|
||||||
|
# WeChat: Test with recent messages only
|
||||||
|
python -m apps.wechat_rag --max-items 100 --query "What did we discuss about the project timeline?"
|
||||||
|
|
||||||
|
# Browser history: Last few days
|
||||||
|
python -m apps.browser_rag --max-items 500 --query "Find documentation about vector databases"
|
||||||
|
|
||||||
|
# Email: Recent inbox
|
||||||
|
python -m apps.email_rag --max-items 200 --query "Who sent updates about the deployment status?"
|
||||||
|
```
|
||||||
|
|
||||||
|
Once validated, scale up gradually:
|
||||||
|
- 100 documents → 1,000 → 10,000 → full dataset (`--max-items -1`)
|
||||||
|
- This helps identify issues early before committing to long processing times
|
||||||
|
|
||||||
|
## Embedding Model Selection: Understanding the Trade-offs
|
||||||
|
|
||||||
|
Based on our experience developing LEANN, embedding models fall into three categories:
|
||||||
|
|
||||||
|
### Small Models (< 100M parameters)
|
||||||
|
**Example**: `sentence-transformers/all-MiniLM-L6-v2` (22M params)
|
||||||
|
- **Pros**: Lightweight, fast for both indexing and inference
|
||||||
|
- **Cons**: Lower semantic understanding, may miss nuanced relationships
|
||||||
|
- **Use when**: Speed is critical, handling simple queries, interactive mode, or just experimenting with LEANN. If time is not a constraint, consider using a larger/better embedding model
|
||||||
|
|
||||||
|
### Medium Models (100M-500M parameters)
|
||||||
|
**Example**: `facebook/contriever` (110M params), `BAAI/bge-base-en-v1.5` (110M params)
|
||||||
|
- **Pros**: Balanced performance, good multilingual support, reasonable speed
|
||||||
|
- **Cons**: Requires more compute than small models
|
||||||
|
- **Use when**: Need quality results without extreme compute requirements, general-purpose RAG applications
|
||||||
|
|
||||||
|
### Large Models (500M+ parameters)
|
||||||
|
**Example**: `Qwen/Qwen3-Embedding-0.6B` (600M params), `intfloat/multilingual-e5-large` (560M params)
|
||||||
|
- **Pros**: Best semantic understanding, captures complex relationships, excellent multilingual support. **Qwen3-Embedding-0.6B achieves nearly OpenAI API performance!**
|
||||||
|
- **Cons**: Slower inference, longer index build times
|
||||||
|
- **Use when**: Quality is paramount and you have sufficient compute resources. **Highly recommended** for production use
|
||||||
|
|
||||||
|
### Quick Start: Cloud and Local Embedding Options
|
||||||
|
|
||||||
|
**OpenAI Embeddings (Fastest Setup)**
|
||||||
|
For immediate testing without local model downloads(also if you [do not have GPU](https://github.com/yichuan-w/LEANN/issues/43) and do not care that much about your document leak, you should use this, we compute the embedding and recompute using openai API):
|
||||||
|
```bash
|
||||||
|
# Set OpenAI embeddings (requires OPENAI_API_KEY)
|
||||||
|
--embedding-mode openai --embedding-model text-embedding-3-small
|
||||||
|
```
|
||||||
|
|
||||||
|
**Ollama Embeddings (Privacy-Focused)**
|
||||||
|
For local embeddings with complete privacy:
|
||||||
|
```bash
|
||||||
|
# First, pull an embedding model
|
||||||
|
ollama pull nomic-embed-text
|
||||||
|
|
||||||
|
# Use Ollama embeddings
|
||||||
|
--embedding-mode ollama --embedding-model nomic-embed-text
|
||||||
|
```
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><strong>Cloud vs Local Trade-offs</strong></summary>
|
||||||
|
|
||||||
|
**OpenAI Embeddings** (`text-embedding-3-small/large`)
|
||||||
|
- **Pros**: No local compute needed, consistently fast, high quality
|
||||||
|
- **Cons**: Requires API key, costs money, data leaves your system, [known limitations with certain languages](https://yichuan-w.github.io/blog/lessons_learned_in_dev_leann/)
|
||||||
|
- **When to use**: Prototyping, non-sensitive data, need immediate results
|
||||||
|
|
||||||
|
**Local Embeddings**
|
||||||
|
- **Pros**: Complete privacy, no ongoing costs, full control, can sometimes outperform OpenAI embeddings
|
||||||
|
- **Cons**: Slower than cloud APIs, requires local compute resources
|
||||||
|
- **When to use**: Production systems, sensitive data, cost-sensitive applications
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## Local & Remote Inference Endpoints
|
||||||
|
|
||||||
|
> Applies to both LLMs (`leann ask`) and embeddings (`leann build`).
|
||||||
|
|
||||||
|
LEANN now treats Ollama, LM Studio, and other OpenAI-compatible runtimes as first-class providers. You can point LEANN at any compatible endpoint – either on the same machine or across the network – with a couple of flags or environment variables.
|
||||||
|
|
||||||
|
### One-Time Environment Setup
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Works for OpenAI-compatible runtimes such as LM Studio, vLLM, SGLang, llamafile, etc.
|
||||||
|
export OPENAI_API_KEY="your-key" # or leave unset for local servers that do not check keys
|
||||||
|
export OPENAI_BASE_URL="http://localhost:1234/v1"
|
||||||
|
|
||||||
|
# Ollama-compatible runtimes (Ollama, Ollama on another host, llamacpp-server, etc.)
|
||||||
|
export LEANN_OLLAMA_HOST="http://localhost:11434" # falls back to OLLAMA_HOST or LOCAL_LLM_ENDPOINT
|
||||||
|
```
|
||||||
|
|
||||||
|
LEANN also recognises `LEANN_LOCAL_LLM_HOST` (highest priority), `LEANN_OPENAI_BASE_URL`, and `LOCAL_OPENAI_BASE_URL`, so existing scripts continue to work.
|
||||||
|
|
||||||
|
### Passing Hosts Per Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build an index with a remote embedding server
|
||||||
|
leann build my-notes \
|
||||||
|
--docs ./notes \
|
||||||
|
--embedding-mode openai \
|
||||||
|
--embedding-model text-embedding-qwen3-embedding-0.6b \
|
||||||
|
--embedding-api-base http://192.168.1.50:1234/v1 \
|
||||||
|
--embedding-api-key local-dev-key
|
||||||
|
|
||||||
|
# Query using a local LM Studio instance via OpenAI-compatible API
|
||||||
|
leann ask my-notes \
|
||||||
|
--llm openai \
|
||||||
|
--llm-model qwen3-8b \
|
||||||
|
--api-base http://localhost:1234/v1 \
|
||||||
|
--api-key local-dev-key
|
||||||
|
|
||||||
|
# Query an Ollama instance running on another box
|
||||||
|
leann ask my-notes \
|
||||||
|
--llm ollama \
|
||||||
|
--llm-model qwen3:14b \
|
||||||
|
--host http://192.168.1.101:11434
|
||||||
|
```
|
||||||
|
|
||||||
|
⚠️ **Make sure the endpoint is reachable**: when your inference server runs on a home/workstation and the index/search job runs in the cloud, the server must be able to reach the host you configured. Typical options include:
|
||||||
|
|
||||||
|
- Expose a public IP (and open the relevant port) on the machine that hosts LM Studio/Ollama.
|
||||||
|
- Configure router or cloud provider port forwarding.
|
||||||
|
- Tunnel traffic through tools like `tailscale`, `cloudflared`, or `ssh -R`.
|
||||||
|
|
||||||
|
When you set these options while building an index, LEANN stores them in `meta.json`. Any subsequent `leann ask` or searcher process automatically reuses the same provider settings – even when we spawn background embedding servers. This makes the “server without GPU talking to my local workstation” workflow from [issue #80](https://github.com/yichuan-w/LEANN/issues/80#issuecomment-2287230548) work out-of-the-box.
|
||||||
|
|
||||||
|
**Tip:** If your runtime does not require an API key (many local stacks don’t), leave `--api-key` unset. LEANN will skip injecting credentials.
|
||||||
|
|
||||||
|
### Python API Usage
|
||||||
|
|
||||||
|
You can pass the same configuration from Python:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from leann.api import LeannBuilder
|
||||||
|
|
||||||
|
builder = LeannBuilder(
|
||||||
|
backend_name="hnsw",
|
||||||
|
embedding_mode="openai",
|
||||||
|
embedding_model="text-embedding-qwen3-embedding-0.6b",
|
||||||
|
embedding_options={
|
||||||
|
"base_url": "http://192.168.1.50:1234/v1",
|
||||||
|
"api_key": "local-dev-key",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
builder.build_index("./indexes/my-notes", chunks)
|
||||||
|
```
|
||||||
|
|
||||||
|
`embedding_options` is persisted to the index `meta.json`, so subsequent `LeannSearcher` or `LeannChat` sessions automatically reuse the same provider settings (the embedding server manager forwards them to the provider for you).
|
||||||
|
|
||||||
|
## Index Selection: Matching Your Scale
|
||||||
|
|
||||||
|
### HNSW (Hierarchical Navigable Small World)
|
||||||
|
**Best for**: Small to medium datasets (< 10M vectors) - **Default and recommended for extreme low storage**
|
||||||
|
- Full recomputation required
|
||||||
|
- High memory usage during build phase
|
||||||
|
- Excellent recall (95%+)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Optimal for most use cases
|
||||||
|
--backend-name hnsw --graph-degree 32 --build-complexity 64
|
||||||
|
```
|
||||||
|
|
||||||
|
### DiskANN
|
||||||
|
**Best for**: Large datasets, especially when you want `recompute=True`.
|
||||||
|
|
||||||
|
**Key advantages:**
|
||||||
|
- **Faster search** on large datasets (3x+ speedup vs HNSW in many cases)
|
||||||
|
- **Smart storage**: `recompute=True` enables automatic graph partitioning for smaller indexes
|
||||||
|
- **Better scaling**: Designed for 100k+ documents
|
||||||
|
|
||||||
|
**Recompute behavior:**
|
||||||
|
- `recompute=True` (recommended): Pure PQ traversal + final reranking - faster and enables partitioning
|
||||||
|
- `recompute=False`: PQ + partial real distances during traversal - slower but higher accuracy
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Recommended for most use cases
|
||||||
|
--backend-name diskann --graph-degree 32 --build-complexity 64
|
||||||
|
```
|
||||||
|
|
||||||
|
**Performance Benchmark**: Run `uv run benchmarks/diskann_vs_hnsw_speed_comparison.py` to compare DiskANN and HNSW on your system.
|
||||||
|
|
||||||
|
## LLM Selection: Engine and Model Comparison
|
||||||
|
|
||||||
|
### LLM Engines
|
||||||
|
|
||||||
|
**OpenAI** (`--llm openai`)
|
||||||
|
- **Pros**: Best quality, consistent performance, no local resources needed
|
||||||
|
- **Cons**: Costs money ($0.15-2.5 per million tokens), requires internet, data privacy concerns
|
||||||
|
- **Models**: `gpt-4o-mini` (fast, cheap), `gpt-4o` (best quality), `o3` (reasoning), `o3-mini` (reasoning, cheaper)
|
||||||
|
- **Thinking Budget**: Use `--thinking-budget low/medium/high` for o-series reasoning models (o3, o3-mini, o4-mini)
|
||||||
|
- **Note**: Our current default, but we recommend switching to Ollama for most use cases
|
||||||
|
|
||||||
|
**Ollama** (`--llm ollama`)
|
||||||
|
- **Pros**: Fully local, free, privacy-preserving, good model variety
|
||||||
|
- **Cons**: Requires local GPU/CPU resources, slower than cloud APIs, need to install extra [ollama app](https://github.com/ollama/ollama?tab=readme-ov-file#ollama) and pre-download models by `ollama pull`
|
||||||
|
- **Models**: `qwen3:0.6b` (ultra-fast), `qwen3:1.7b` (balanced), `qwen3:4b` (good quality), `qwen3:7b` (high quality), `deepseek-r1:1.5b` (reasoning)
|
||||||
|
- **Thinking Budget**: Use `--thinking-budget low/medium/high` for reasoning models like GPT-Oss:20b
|
||||||
|
|
||||||
|
**HuggingFace** (`--llm hf`)
|
||||||
|
- **Pros**: Free tier available, huge model selection, direct model loading (vs Ollama's server-based approach)
|
||||||
|
- **Cons**: More complex initial setup
|
||||||
|
- **Models**: `Qwen/Qwen3-1.7B-FP8`
|
||||||
|
|
||||||
|
## Parameter Tuning Guide
|
||||||
|
|
||||||
|
### Search Complexity Parameters
|
||||||
|
|
||||||
|
**`--build-complexity`** (index building)
|
||||||
|
- Controls thoroughness during index construction
|
||||||
|
- Higher = better recall but slower build
|
||||||
|
- Recommendations:
|
||||||
|
- 32: Quick prototyping
|
||||||
|
- 64: Balanced (default)
|
||||||
|
- 128: Production systems
|
||||||
|
- 256: Maximum quality
|
||||||
|
|
||||||
|
**`--search-complexity`** (query time)
|
||||||
|
- Controls search thoroughness
|
||||||
|
- Higher = better results but slower
|
||||||
|
- Recommendations:
|
||||||
|
- 16: Fast/Interactive search
|
||||||
|
- 32: High quality with diversity
|
||||||
|
- 64+: Maximum accuracy
|
||||||
|
|
||||||
|
### Top-K Selection
|
||||||
|
|
||||||
|
**`--top-k`** (number of retrieved chunks)
|
||||||
|
- More chunks = better context but slower LLM processing
|
||||||
|
- Should be always smaller than `--search-complexity`
|
||||||
|
- Guidelines:
|
||||||
|
- 10-20: General questions (default: 20)
|
||||||
|
- 30+: Complex multi-hop reasoning requiring comprehensive context
|
||||||
|
|
||||||
|
**Trade-off formula**:
|
||||||
|
- Retrieval time ∝ log(n) × search_complexity
|
||||||
|
- LLM processing time ∝ top_k × chunk_size
|
||||||
|
- Total context = top_k × chunk_size tokens
|
||||||
|
|
||||||
|
### Thinking Budget for Reasoning Models
|
||||||
|
|
||||||
|
**`--thinking-budget`** (reasoning effort level)
|
||||||
|
- Controls the computational effort for reasoning models
|
||||||
|
- Options: `low`, `medium`, `high`
|
||||||
|
- Guidelines:
|
||||||
|
- `low`: Fast responses, basic reasoning (default for simple queries)
|
||||||
|
- `medium`: Balanced speed and reasoning depth
|
||||||
|
- `high`: Maximum reasoning effort, best for complex analytical questions
|
||||||
|
- **Supported Models**:
|
||||||
|
- **Ollama**: `gpt-oss:20b`, `gpt-oss:120b`
|
||||||
|
- **OpenAI**: `o3`, `o3-mini`, `o4-mini`, `o1` (o-series reasoning models)
|
||||||
|
- **Note**: Models without reasoning support will show a warning and proceed without reasoning parameters
|
||||||
|
- **Example**: `--thinking-budget high` for complex analytical questions
|
||||||
|
|
||||||
|
**📖 For detailed usage examples and implementation details, check out [Thinking Budget Documentation](THINKING_BUDGET_FEATURE.md)**
|
||||||
|
|
||||||
|
**💡 Quick Examples:**
|
||||||
|
```bash
|
||||||
|
# OpenAI o-series reasoning model
|
||||||
|
python apps/document_rag.py --query "What are the main techniques LEANN explores?" \
|
||||||
|
--index-dir hnswbuild --backend hnsw \
|
||||||
|
--llm openai --llm-model o3 --thinking-budget medium
|
||||||
|
|
||||||
|
# Ollama reasoning model
|
||||||
|
python apps/document_rag.py --query "What are the main techniques LEANN explores?" \
|
||||||
|
--index-dir hnswbuild --backend hnsw \
|
||||||
|
--llm ollama --llm-model gpt-oss:20b --thinking-budget high
|
||||||
|
```
|
||||||
|
|
||||||
|
### Graph Degree (HNSW/DiskANN)
|
||||||
|
|
||||||
|
**`--graph-degree`**
|
||||||
|
- Number of connections per node in the graph
|
||||||
|
- Higher = better recall but more memory
|
||||||
|
- HNSW: 16-32 (default: 32)
|
||||||
|
- DiskANN: 32-128 (default: 64)
|
||||||
|
|
||||||
|
|
||||||
|
## Performance Optimization Checklist
|
||||||
|
|
||||||
|
### If Embedding is Too Slow
|
||||||
|
|
||||||
|
1. **Switch to smaller model**:
|
||||||
|
```bash
|
||||||
|
# From large model
|
||||||
|
--embedding-model Qwen/Qwen3-Embedding-0.6B
|
||||||
|
# To small model
|
||||||
|
--embedding-model sentence-transformers/all-MiniLM-L6-v2
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Limit dataset size for testing**:
|
||||||
|
```bash
|
||||||
|
--max-items 1000 # Process first 1k items only
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Use MLX on Apple Silicon** (optional optimization):
|
||||||
|
```bash
|
||||||
|
--embedding-mode mlx --embedding-model mlx-community/Qwen3-Embedding-0.6B-8bit
|
||||||
|
```
|
||||||
|
MLX might not be the best choice, as we tested and found that it only offers 1.3x acceleration compared to HF, so maybe using ollama is a better choice for embedding generation
|
||||||
|
|
||||||
|
4. **Use Ollama**
|
||||||
|
```bash
|
||||||
|
--embedding-mode ollama --embedding-model nomic-embed-text
|
||||||
|
```
|
||||||
|
To discover additional embedding models in ollama, check out https://ollama.com/search?c=embedding or read more about embedding models at https://ollama.com/blog/embedding-models, please do check the model size that works best for you
|
||||||
|
### If Search Quality is Poor
|
||||||
|
|
||||||
|
1. **Increase retrieval count**:
|
||||||
|
```bash
|
||||||
|
--top-k 30 # Retrieve more candidates
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Upgrade embedding model**:
|
||||||
|
```bash
|
||||||
|
# For English
|
||||||
|
--embedding-model BAAI/bge-base-en-v1.5
|
||||||
|
# For multilingual
|
||||||
|
--embedding-model intfloat/multilingual-e5-large
|
||||||
|
```
|
||||||
|
|
||||||
|
## Understanding the Trade-offs
|
||||||
|
|
||||||
|
Every configuration choice involves trade-offs:
|
||||||
|
|
||||||
|
| Factor | Small/Fast | Large/Quality |
|
||||||
|
|--------|------------|---------------|
|
||||||
|
| Embedding Model | `all-MiniLM-L6-v2` | `Qwen/Qwen3-Embedding-0.6B` |
|
||||||
|
| Chunk Size | 512 tokens | 128 tokens |
|
||||||
|
| Index Type | HNSW | DiskANN |
|
||||||
|
| LLM | `qwen3:1.7b` | `gpt-4o` |
|
||||||
|
|
||||||
|
The key is finding the right balance for your specific use case. Start small and simple, measure performance, then scale up only where needed.
|
||||||
|
|
||||||
|
## Low-resource setups
|
||||||
|
|
||||||
|
If you don’t have a local GPU or builds/searches are too slow, use one or more of the options below.
|
||||||
|
|
||||||
|
### 1) Use OpenAI embeddings (no local compute)
|
||||||
|
|
||||||
|
Fastest path with zero local GPU requirements. Set your API key and use OpenAI embeddings during build and search:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export OPENAI_API_KEY=sk-...
|
||||||
|
|
||||||
|
# Build with OpenAI embeddings
|
||||||
|
leann build my-index \
|
||||||
|
--embedding-mode openai \
|
||||||
|
--embedding-model text-embedding-3-small
|
||||||
|
|
||||||
|
# Search with OpenAI embeddings (recompute at query time)
|
||||||
|
leann search my-index "your query" \
|
||||||
|
--recompute
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2) Run remote builds with SkyPilot (cloud GPU)
|
||||||
|
|
||||||
|
Offload embedding generation and index building to a GPU VM using [SkyPilot](https://skypilot.readthedocs.io/en/latest/). A template is provided at `sky/leann-build.yaml`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# One-time: install and configure SkyPilot
|
||||||
|
pip install skypilot
|
||||||
|
|
||||||
|
# Launch with defaults (L4:1) and mount ./data to ~/leann-data; the build runs automatically
|
||||||
|
sky launch -c leann-gpu sky/leann-build.yaml
|
||||||
|
|
||||||
|
# Override parameters via -e key=value (optional)
|
||||||
|
sky launch -c leann-gpu sky/leann-build.yaml \
|
||||||
|
-e index_name=my-index \
|
||||||
|
-e backend=hnsw \
|
||||||
|
-e embedding_mode=sentence-transformers \
|
||||||
|
-e embedding_model=Qwen/Qwen3-Embedding-0.6B
|
||||||
|
|
||||||
|
# Copy the built index back to your local .leann (use rsync)
|
||||||
|
rsync -Pavz leann-gpu:~/.leann/indexes/my-index ./.leann/indexes/
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3) Disable recomputation to trade storage for speed
|
||||||
|
|
||||||
|
If you need lower latency and have more storage/memory, disable recomputation. This stores full embeddings and avoids recomputing at search time.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build without recomputation (HNSW requires non-compact in this mode)
|
||||||
|
leann build my-index --no-recompute --no-compact
|
||||||
|
|
||||||
|
# Search without recomputation
|
||||||
|
leann search my-index "your query" --no-recompute
|
||||||
|
```
|
||||||
|
|
||||||
|
When to use:
|
||||||
|
- Extreme low latency requirements (high QPS, interactive assistants)
|
||||||
|
- Read-heavy workloads where storage is cheaper than latency
|
||||||
|
- No always-available GPU
|
||||||
|
|
||||||
|
Constraints:
|
||||||
|
- HNSW: when `--no-recompute` is set, LEANN automatically disables compact mode during build
|
||||||
|
- DiskANN: supported; `--no-recompute` skips selective recompute during search
|
||||||
|
|
||||||
|
Storage impact:
|
||||||
|
- Storing N embeddings of dimension D with float32 requires approximately N × D × 4 bytes
|
||||||
|
- Example: 1,000,000 chunks × 768 dims × 4 bytes ≈ 2.86 GB (plus graph/metadata)
|
||||||
|
|
||||||
|
Converting an existing index (rebuild required):
|
||||||
|
```bash
|
||||||
|
# Rebuild in-place (ensure you still have original docs or can regenerate chunks)
|
||||||
|
leann build my-index --force --no-recompute --no-compact
|
||||||
|
```
|
||||||
|
|
||||||
|
Python API usage:
|
||||||
|
```python
|
||||||
|
from leann import LeannSearcher
|
||||||
|
|
||||||
|
searcher = LeannSearcher("/path/to/my-index.leann")
|
||||||
|
results = searcher.search("your query", top_k=10, recompute_embeddings=False)
|
||||||
|
```
|
||||||
|
|
||||||
|
Trade-offs:
|
||||||
|
- Lower latency and fewer network hops at query time
|
||||||
|
- Significantly higher storage (10–100× vs selective recomputation)
|
||||||
|
- Slightly larger memory footprint during build and search
|
||||||
|
|
||||||
|
Quick benchmark results (`benchmarks/benchmark_no_recompute.py` with 5k texts, complexity=32):
|
||||||
|
|
||||||
|
- HNSW
|
||||||
|
|
||||||
|
```text
|
||||||
|
recompute=True: search_time=0.818s, size=1.1MB
|
||||||
|
recompute=False: search_time=0.012s, size=16.6MB
|
||||||
|
```
|
||||||
|
|
||||||
|
- DiskANN
|
||||||
|
|
||||||
|
```text
|
||||||
|
recompute=True: search_time=0.041s, size=5.9MB
|
||||||
|
recompute=False: search_time=0.013s, size=24.6MB
|
||||||
|
```
|
||||||
|
|
||||||
|
Conclusion:
|
||||||
|
- **HNSW**: `no-recompute` is significantly faster (no embedding recomputation) but requires much more storage (stores all embeddings)
|
||||||
|
- **DiskANN**: `no-recompute` uses PQ + partial real distances during traversal (slower but higher accuracy), while `recompute=True` uses pure PQ traversal + final reranking (faster traversal, enables build-time partitioning for smaller storage)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Further Reading
|
||||||
|
|
||||||
|
- [Lessons Learned Developing LEANN](https://yichuan-w.github.io/blog/lessons_learned_in_dev_leann/)
|
||||||
|
- [LEANN Technical Paper](https://arxiv.org/abs/2506.08276)
|
||||||
|
- [DiskANN Original Paper](https://papers.nips.cc/paper/2019/file/09853c7fb1d3f8ee67a61b6bf4a7f8e6-Paper.pdf)
|
||||||
|
- [SSD-based Graph Partitioning](https://github.com/SonglinLife/SSD_BASED_PLAN)
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
# 🤝 Contributing
|
|
||||||
|
|
||||||
We welcome contributions! Leann is built by the community, for the community.
|
|
||||||
|
|
||||||
## Ways to Contribute
|
|
||||||
|
|
||||||
- 🐛 **Bug Reports**: Found an issue? Let us know!
|
|
||||||
- 💡 **Feature Requests**: Have an idea? We'd love to hear it!
|
|
||||||
- 🔧 **Code Contributions**: PRs welcome for all skill levels
|
|
||||||
- 📖 **Documentation**: Help make Leann more accessible
|
|
||||||
- 🧪 **Benchmarks**: Share your performance results
|
|
||||||
@@ -3,9 +3,10 @@
|
|||||||
## 🔥 Core Features
|
## 🔥 Core Features
|
||||||
|
|
||||||
- **🔄 Real-time Embeddings** - Eliminate heavy embedding storage with dynamic computation using optimized ZMQ servers and highly optimized search paradigm (overlapping and batching) with highly optimized embedding engine
|
- **🔄 Real-time Embeddings** - Eliminate heavy embedding storage with dynamic computation using optimized ZMQ servers and highly optimized search paradigm (overlapping and batching) with highly optimized embedding engine
|
||||||
|
- **🧠 AST-Aware Code Chunking** - Intelligent code chunking that preserves semantic boundaries (functions, classes, methods) for Python, Java, C#, and TypeScript files
|
||||||
- **📈 Scalable Architecture** - Handles millions of documents on consumer hardware; the larger your dataset, the more LEANN can save
|
- **📈 Scalable Architecture** - Handles millions of documents on consumer hardware; the larger your dataset, the more LEANN can save
|
||||||
- **🎯 Graph Pruning** - Advanced techniques to minimize the storage overhead of vector search to a limited footprint
|
- **🎯 Graph Pruning** - Advanced techniques to minimize the storage overhead of vector search to a limited footprint
|
||||||
- **🏗️ Pluggable Backends** - DiskANN, HNSW/FAISS with unified API
|
- **🏗️ Pluggable Backends** - HNSW/FAISS (default), with optional DiskANN for large-scale deployments
|
||||||
|
|
||||||
## 🛠️ Technical Highlights
|
## 🛠️ Technical Highlights
|
||||||
- **🔄 Recompute Mode** - Highest accuracy scenarios while eliminating vector storage overhead
|
- **🔄 Recompute Mode** - Highest accuracy scenarios while eliminating vector storage overhead
|
||||||
@@ -13,7 +14,7 @@
|
|||||||
- **🚀 High-throughput Embedding Pipeline** - Optimized batched processing for maximum efficiency
|
- **🚀 High-throughput Embedding Pipeline** - Optimized batched processing for maximum efficiency
|
||||||
- **🎯 Two-level Search** - Novel coarse-to-fine search overlap for accelerated query processing (optional)
|
- **🎯 Two-level Search** - Novel coarse-to-fine search overlap for accelerated query processing (optional)
|
||||||
- **💾 Memory-mapped Indices** - Fast startup with raw text mapping to reduce memory overhead
|
- **💾 Memory-mapped Indices** - Fast startup with raw text mapping to reduce memory overhead
|
||||||
- **🚀 MLX Support** - Ultra-fast recompute/build with quantized embedding models, accelerating building and search ([minimal example](test/build_mlx_index.py))
|
- **🚀 MLX Support** - Ultra-fast recompute/build with quantized embedding models, accelerating building and search ([minimal example](../examples/mlx_demo.py))
|
||||||
|
|
||||||
## 🎨 Developer Experience
|
## 🎨 Developer Experience
|
||||||
|
|
||||||
|
|||||||
149
docs/grep_search.md
Normal file
149
docs/grep_search.md
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
# LEANN Grep Search Usage Guide
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
LEANN's grep search functionality provides exact text matching for finding specific code patterns, error messages, function names, or exact phrases in your indexed documents.
|
||||||
|
|
||||||
|
## Basic Usage
|
||||||
|
|
||||||
|
### Simple Grep Search
|
||||||
|
|
||||||
|
```python
|
||||||
|
from leann.api import LeannSearcher
|
||||||
|
|
||||||
|
searcher = LeannSearcher("your_index_path")
|
||||||
|
|
||||||
|
# Exact text search
|
||||||
|
results = searcher.search("def authenticate_user", use_grep=True, top_k=5)
|
||||||
|
|
||||||
|
for result in results:
|
||||||
|
print(f"Score: {result.score}")
|
||||||
|
print(f"Text: {result.text[:100]}...")
|
||||||
|
print("-" * 40)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Comparison: Semantic vs Grep Search
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Semantic search - finds conceptually similar content
|
||||||
|
semantic_results = searcher.search("machine learning algorithms", top_k=3)
|
||||||
|
|
||||||
|
# Grep search - finds exact text matches
|
||||||
|
grep_results = searcher.search("def train_model", use_grep=True, top_k=3)
|
||||||
|
```
|
||||||
|
|
||||||
|
## When to Use Grep Search
|
||||||
|
|
||||||
|
### Use Cases
|
||||||
|
|
||||||
|
- **Code Search**: Finding specific function definitions, class names, or variable references
|
||||||
|
- **Error Debugging**: Locating exact error messages or stack traces
|
||||||
|
- **Documentation**: Finding specific API endpoints or exact terminology
|
||||||
|
|
||||||
|
### Examples
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Find function definitions
|
||||||
|
functions = searcher.search("def __init__", use_grep=True)
|
||||||
|
|
||||||
|
# Find import statements
|
||||||
|
imports = searcher.search("from sklearn import", use_grep=True)
|
||||||
|
|
||||||
|
# Find specific error types
|
||||||
|
errors = searcher.search("FileNotFoundError", use_grep=True)
|
||||||
|
|
||||||
|
# Find TODO comments
|
||||||
|
todos = searcher.search("TODO:", use_grep=True)
|
||||||
|
|
||||||
|
# Find configuration entries
|
||||||
|
configs = searcher.search("server_port=", use_grep=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Technical Details
|
||||||
|
|
||||||
|
### How It Works
|
||||||
|
|
||||||
|
1. **File Location**: Grep search operates on the raw text stored in `.jsonl` files
|
||||||
|
2. **Command Execution**: Uses the system `grep` command with case-insensitive search
|
||||||
|
3. **Result Processing**: Parses JSON lines and extracts text and metadata
|
||||||
|
4. **Scoring**: Simple frequency-based scoring based on query term occurrences
|
||||||
|
|
||||||
|
### Search Process
|
||||||
|
|
||||||
|
```
|
||||||
|
Query: "def train_model"
|
||||||
|
↓
|
||||||
|
grep -i -n "def train_model" documents.leann.passages.jsonl
|
||||||
|
↓
|
||||||
|
Parse matching JSON lines
|
||||||
|
↓
|
||||||
|
Calculate scores based on term frequency
|
||||||
|
↓
|
||||||
|
Return top_k results
|
||||||
|
```
|
||||||
|
|
||||||
|
### Scoring Algorithm
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Term frequency in document
|
||||||
|
score = text.lower().count(query.lower())
|
||||||
|
```
|
||||||
|
|
||||||
|
Results are ranked by score (highest first), with higher scores indicating more occurrences of the search term.
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
#### Grep Command Not Found
|
||||||
|
```
|
||||||
|
RuntimeError: grep command not found. Please install grep or use semantic search.
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution**: Install grep on your system:
|
||||||
|
- **Ubuntu/Debian**: `sudo apt-get install grep`
|
||||||
|
- **macOS**: grep is pre-installed
|
||||||
|
- **Windows**: Use WSL or install grep via Git Bash/MSYS2
|
||||||
|
|
||||||
|
#### No Results Found
|
||||||
|
```python
|
||||||
|
# Check if your query exists in the raw data
|
||||||
|
results = searcher.search("your_query", use_grep=True)
|
||||||
|
if not results:
|
||||||
|
print("No exact matches found. Try:")
|
||||||
|
print("1. Check spelling and case")
|
||||||
|
print("2. Use partial terms")
|
||||||
|
print("3. Switch to semantic search")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Complete Example
|
||||||
|
|
||||||
|
```python
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Grep Search Example
|
||||||
|
Demonstrates grep search for exact text matching.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from leann.api import LeannSearcher
|
||||||
|
|
||||||
|
def demonstrate_grep_search():
|
||||||
|
# Initialize searcher
|
||||||
|
searcher = LeannSearcher("my_index")
|
||||||
|
|
||||||
|
print("=== Function Search ===")
|
||||||
|
functions = searcher.search("def __init__", use_grep=True, top_k=5)
|
||||||
|
for i, result in enumerate(functions, 1):
|
||||||
|
print(f"{i}. Score: {result.score}")
|
||||||
|
print(f" Preview: {result.text[:60]}...")
|
||||||
|
print()
|
||||||
|
|
||||||
|
print("=== Error Search ===")
|
||||||
|
errors = searcher.search("FileNotFoundError", use_grep=True, top_k=3)
|
||||||
|
for result in errors:
|
||||||
|
print(f"Content: {result.text.strip()}")
|
||||||
|
print("-" * 40)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
demonstrate_grep_search()
|
||||||
|
```
|
||||||
300
docs/metadata_filtering.md
Normal file
300
docs/metadata_filtering.md
Normal file
@@ -0,0 +1,300 @@
|
|||||||
|
# LEANN Metadata Filtering Usage Guide
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Leann possesses metadata filtering capabilities that allow you to filter search results based on arbitrary metadata fields set during chunking. This feature enables use cases like spoiler-free book search, document filtering by date/type, code search by file type, and potentially much more.
|
||||||
|
|
||||||
|
## Basic Usage
|
||||||
|
|
||||||
|
### Adding Metadata to Your Documents
|
||||||
|
|
||||||
|
When building your index, add metadata to each text chunk:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from leann.api import LeannBuilder
|
||||||
|
|
||||||
|
builder = LeannBuilder("hnsw")
|
||||||
|
|
||||||
|
# Add text with metadata
|
||||||
|
builder.add_text(
|
||||||
|
text="Chapter 1: Alice falls down the rabbit hole",
|
||||||
|
metadata={
|
||||||
|
"chapter": 1,
|
||||||
|
"character": "Alice",
|
||||||
|
"themes": ["adventure", "curiosity"],
|
||||||
|
"word_count": 150
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
builder.build_index("alice_in_wonderland_index")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Searching with Metadata Filters
|
||||||
|
|
||||||
|
Use the `metadata_filters` parameter in search calls:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from leann.api import LeannSearcher
|
||||||
|
|
||||||
|
searcher = LeannSearcher("alice_in_wonderland_index")
|
||||||
|
|
||||||
|
# Search with filters
|
||||||
|
results = searcher.search(
|
||||||
|
query="What happens to Alice?",
|
||||||
|
top_k=10,
|
||||||
|
metadata_filters={
|
||||||
|
"chapter": {"<=": 5}, # Only chapters 1-5
|
||||||
|
"spoiler_level": {"!=": "high"} # No high spoilers
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Filter Syntax
|
||||||
|
|
||||||
|
### Basic Structure
|
||||||
|
|
||||||
|
```python
|
||||||
|
metadata_filters = {
|
||||||
|
"field_name": {"operator": value},
|
||||||
|
"another_field": {"operator": value}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Supported Operators
|
||||||
|
|
||||||
|
#### Comparison Operators
|
||||||
|
- `"=="`: Equal to
|
||||||
|
- `"!="`: Not equal to
|
||||||
|
- `"<"`: Less than
|
||||||
|
- `"<="`: Less than or equal
|
||||||
|
- `">"`: Greater than
|
||||||
|
- `">="`: Greater than or equal
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Examples
|
||||||
|
{"chapter": {"==": 1}} # Exactly chapter 1
|
||||||
|
{"page": {">": 100}} # Pages after 100
|
||||||
|
{"rating": {">=": 4.0}} # Rating 4.0 or higher
|
||||||
|
{"word_count": {"<": 500}} # Short passages
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Membership Operators
|
||||||
|
- `"in"`: Value is in list
|
||||||
|
- `"not_in"`: Value is not in list
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Examples
|
||||||
|
{"character": {"in": ["Alice", "Bob"]}} # Alice OR Bob
|
||||||
|
{"genre": {"not_in": ["horror", "thriller"]}} # Exclude genres
|
||||||
|
{"tags": {"in": ["fiction", "adventure"]}} # Any of these tags
|
||||||
|
```
|
||||||
|
|
||||||
|
#### String Operators
|
||||||
|
- `"contains"`: String contains substring
|
||||||
|
- `"starts_with"`: String starts with prefix
|
||||||
|
- `"ends_with"`: String ends with suffix
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Examples
|
||||||
|
{"title": {"contains": "alice"}} # Title contains "alice"
|
||||||
|
{"filename": {"ends_with": ".py"}} # Python files
|
||||||
|
{"author": {"starts_with": "Dr."}} # Authors with "Dr." prefix
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Boolean Operators
|
||||||
|
- `"is_true"`: Field is truthy
|
||||||
|
- `"is_false"`: Field is falsy
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Examples
|
||||||
|
{"is_published": {"is_true": True}} # Published content
|
||||||
|
{"is_draft": {"is_false": False}} # Not drafts
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multiple Operators on Same Field
|
||||||
|
|
||||||
|
You can apply multiple operators to the same field (AND logic):
|
||||||
|
|
||||||
|
```python
|
||||||
|
metadata_filters = {
|
||||||
|
"word_count": {
|
||||||
|
">=": 100, # At least 100 words
|
||||||
|
"<=": 500 # At most 500 words
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Compound Filters
|
||||||
|
|
||||||
|
Multiple fields are combined with AND logic:
|
||||||
|
|
||||||
|
```python
|
||||||
|
metadata_filters = {
|
||||||
|
"chapter": {"<=": 10}, # Up to chapter 10
|
||||||
|
"character": {"==": "Alice"}, # About Alice
|
||||||
|
"spoiler_level": {"!=": "high"} # No major spoilers
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Use Case Examples
|
||||||
|
|
||||||
|
### 1. Spoiler-Free Book Search
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Reader has only read up to chapter 5
|
||||||
|
def search_spoiler_free(query, max_chapter):
|
||||||
|
return searcher.search(
|
||||||
|
query=query,
|
||||||
|
metadata_filters={
|
||||||
|
"chapter": {"<=": max_chapter},
|
||||||
|
"spoiler_level": {"in": ["none", "low"]}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
results = search_spoiler_free("What happens to Alice?", max_chapter=5)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Document Management by Date
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Find recent documents
|
||||||
|
recent_docs = searcher.search(
|
||||||
|
query="project updates",
|
||||||
|
metadata_filters={
|
||||||
|
"date": {">=": "2024-01-01"},
|
||||||
|
"document_type": {"==": "report"}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Code Search by File Type
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Search only Python files
|
||||||
|
python_code = searcher.search(
|
||||||
|
query="authentication function",
|
||||||
|
metadata_filters={
|
||||||
|
"file_extension": {"==": ".py"},
|
||||||
|
"lines_of_code": {"<": 100}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Content Filtering by Audience
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Age-appropriate content
|
||||||
|
family_content = searcher.search(
|
||||||
|
query="adventure stories",
|
||||||
|
metadata_filters={
|
||||||
|
"age_rating": {"in": ["G", "PG"]},
|
||||||
|
"content_warnings": {"not_in": ["violence", "adult_themes"]}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Multi-Book Series Management
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Search across first 3 books only
|
||||||
|
early_series = searcher.search(
|
||||||
|
query="character development",
|
||||||
|
metadata_filters={
|
||||||
|
"series": {"==": "Harry Potter"},
|
||||||
|
"book_number": {"<=": 3}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Running the Example
|
||||||
|
|
||||||
|
You can see metadata filtering in action with our spoiler-free book RAG example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Don't forget to set up the environment
|
||||||
|
uv venv
|
||||||
|
source .venv/bin/activate
|
||||||
|
|
||||||
|
# Set your OpenAI API key (required for embeddings, but you can update the example locally and use ollama instead)
|
||||||
|
export OPENAI_API_KEY="your-api-key-here"
|
||||||
|
|
||||||
|
# Run the spoiler-free book RAG example
|
||||||
|
uv run examples/spoiler_free_book_rag.py
|
||||||
|
```
|
||||||
|
|
||||||
|
This example demonstrates:
|
||||||
|
- Building an index with metadata (chapter numbers, characters, themes, locations)
|
||||||
|
- Searching with filters to avoid spoilers (e.g., only show results up to chapter 5)
|
||||||
|
- Different scenarios for readers at various points in the book
|
||||||
|
|
||||||
|
The example uses Alice's Adventures in Wonderland as sample data and shows how you can search for information without revealing plot points from later chapters.
|
||||||
|
|
||||||
|
## Advanced Patterns
|
||||||
|
|
||||||
|
### Custom Chunking with metadata
|
||||||
|
|
||||||
|
```python
|
||||||
|
def chunk_book_with_metadata(book_text, book_info):
|
||||||
|
chunks = []
|
||||||
|
|
||||||
|
for chapter_num, chapter_text in parse_chapters(book_text):
|
||||||
|
# Extract entities, themes, etc.
|
||||||
|
characters = extract_characters(chapter_text)
|
||||||
|
themes = classify_themes(chapter_text)
|
||||||
|
spoiler_level = assess_spoiler_level(chapter_text, chapter_num)
|
||||||
|
|
||||||
|
# Create chunks with rich metadata
|
||||||
|
for paragraph in split_paragraphs(chapter_text):
|
||||||
|
chunks.append({
|
||||||
|
"text": paragraph,
|
||||||
|
"metadata": {
|
||||||
|
"book_title": book_info["title"],
|
||||||
|
"chapter": chapter_num,
|
||||||
|
"characters": characters,
|
||||||
|
"themes": themes,
|
||||||
|
"spoiler_level": spoiler_level,
|
||||||
|
"word_count": len(paragraph.split()),
|
||||||
|
"reading_level": calculate_reading_level(paragraph)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return chunks
|
||||||
|
```
|
||||||
|
|
||||||
|
## Performance Considerations
|
||||||
|
|
||||||
|
### Efficient Filtering Strategies
|
||||||
|
|
||||||
|
1. **Post-search filtering**: Applies filters after vector search, which should be efficient for typical result sets (10-100 results).
|
||||||
|
|
||||||
|
2. **Metadata design**: Keep metadata fields simple and avoid deeply nested structures.
|
||||||
|
|
||||||
|
### Best Practices
|
||||||
|
|
||||||
|
1. **Consistent metadata schema**: Use consistent field names and value types across your documents.
|
||||||
|
|
||||||
|
2. **Reasonable metadata size**: Keep metadata reasonably sized to avoid storage overhead.
|
||||||
|
|
||||||
|
3. **Type consistency**: Use consistent data types for the same fields (e.g., always integers for chapter numbers).
|
||||||
|
|
||||||
|
4. **Index multiple granularities**: Consider chunking at different levels (paragraph, section, chapter) with appropriate metadata.
|
||||||
|
|
||||||
|
### Adding Metadata to Existing Indices
|
||||||
|
|
||||||
|
To add metadata filtering to existing indices, you'll need to rebuild them with metadata:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Read existing passages and add metadata
|
||||||
|
def add_metadata_to_existing_chunks(chunks):
|
||||||
|
for chunk in chunks:
|
||||||
|
# Extract or assign metadata based on content
|
||||||
|
chunk["metadata"] = extract_metadata(chunk["text"])
|
||||||
|
return chunks
|
||||||
|
|
||||||
|
# Rebuild index with metadata
|
||||||
|
enhanced_chunks = add_metadata_to_existing_chunks(existing_chunks)
|
||||||
|
builder = LeannBuilder("hnsw")
|
||||||
|
for chunk in enhanced_chunks:
|
||||||
|
builder.add_text(chunk["text"], chunk["metadata"])
|
||||||
|
builder.build_index("enhanced_index")
|
||||||
|
```
|
||||||
75
docs/normalized_embeddings.md
Normal file
75
docs/normalized_embeddings.md
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
# Normalized Embeddings Support in LEANN
|
||||||
|
|
||||||
|
LEANN now automatically detects normalized embedding models and sets the appropriate distance metric for optimal performance.
|
||||||
|
|
||||||
|
## What are Normalized Embeddings?
|
||||||
|
|
||||||
|
Normalized embeddings are vectors with L2 norm = 1 (unit vectors). These embeddings are optimized for cosine similarity rather than Maximum Inner Product Search (MIPS).
|
||||||
|
|
||||||
|
## Automatic Detection
|
||||||
|
|
||||||
|
When you create a `LeannBuilder` instance with a normalized embedding model, LEANN will:
|
||||||
|
|
||||||
|
1. **Automatically set `distance_metric="cosine"`** if not specified
|
||||||
|
2. **Show a warning** if you manually specify a different distance metric
|
||||||
|
3. **Provide optimal search performance** with the correct metric
|
||||||
|
|
||||||
|
## Supported Normalized Embedding Models
|
||||||
|
|
||||||
|
### OpenAI
|
||||||
|
All OpenAI text embedding models are normalized:
|
||||||
|
- `text-embedding-ada-002`
|
||||||
|
- `text-embedding-3-small`
|
||||||
|
- `text-embedding-3-large`
|
||||||
|
|
||||||
|
### Voyage AI
|
||||||
|
All Voyage AI embedding models are normalized:
|
||||||
|
- `voyage-2`
|
||||||
|
- `voyage-3`
|
||||||
|
- `voyage-large-2`
|
||||||
|
- `voyage-multilingual-2`
|
||||||
|
- `voyage-code-2`
|
||||||
|
|
||||||
|
### Cohere
|
||||||
|
All Cohere embedding models are normalized:
|
||||||
|
- `embed-english-v3.0`
|
||||||
|
- `embed-multilingual-v3.0`
|
||||||
|
- `embed-english-light-v3.0`
|
||||||
|
- `embed-multilingual-light-v3.0`
|
||||||
|
|
||||||
|
## Example Usage
|
||||||
|
|
||||||
|
```python
|
||||||
|
from leann.api import LeannBuilder
|
||||||
|
|
||||||
|
# Automatic detection - will use cosine distance
|
||||||
|
builder = LeannBuilder(
|
||||||
|
backend_name="hnsw",
|
||||||
|
embedding_model="text-embedding-3-small",
|
||||||
|
embedding_mode="openai"
|
||||||
|
)
|
||||||
|
# Warning: Detected normalized embeddings model 'text-embedding-3-small'...
|
||||||
|
# Automatically setting distance_metric='cosine'
|
||||||
|
|
||||||
|
# Manual override (not recommended)
|
||||||
|
builder = LeannBuilder(
|
||||||
|
backend_name="hnsw",
|
||||||
|
embedding_model="text-embedding-3-small",
|
||||||
|
embedding_mode="openai",
|
||||||
|
distance_metric="mips" # Will show warning
|
||||||
|
)
|
||||||
|
# Warning: Using 'mips' distance metric with normalized embeddings...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Non-Normalized Embeddings
|
||||||
|
|
||||||
|
Models like `facebook/contriever` and other sentence-transformers models that are not normalized will continue to use MIPS by default, which is optimal for them.
|
||||||
|
|
||||||
|
## Why This Matters
|
||||||
|
|
||||||
|
Using the wrong distance metric with normalized embeddings can lead to:
|
||||||
|
- **Poor search quality** due to HNSW's early termination with narrow score ranges
|
||||||
|
- **Incorrect ranking** of search results
|
||||||
|
- **Suboptimal performance** compared to using the correct metric
|
||||||
|
|
||||||
|
For more details on why this happens, see our analysis in the [embedding detection code](../packages/leann-core/src/leann/api.py) which automatically handles normalized embeddings and MIPS distance metric issues.
|
||||||
@@ -2,8 +2,8 @@
|
|||||||
|
|
||||||
## 🎯 Q2 2025
|
## 🎯 Q2 2025
|
||||||
|
|
||||||
- [X] DiskANN backend with MIPS/L2/Cosine support
|
|
||||||
- [X] HNSW backend integration
|
- [X] HNSW backend integration
|
||||||
|
- [X] DiskANN backend with MIPS/L2/Cosine support
|
||||||
- [X] Real-time embedding pipeline
|
- [X] Real-time embedding pipeline
|
||||||
- [X] Memory-efficient graph pruning
|
- [X] Memory-efficient graph pruning
|
||||||
|
|
||||||
|
|||||||
0
examples/__init__.py
Normal file
0
examples/__init__.py
Normal file
@@ -1,16 +1,23 @@
|
|||||||
"""
|
"""
|
||||||
Simple demo showing basic leann usage
|
Simple demo showing basic leann usage
|
||||||
Run: uv run python examples/simple_demo.py
|
Run: uv run python examples/basic_demo.py
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
from leann import LeannBuilder, LeannSearcher, LeannChat
|
|
||||||
|
from leann import LeannBuilder, LeannChat, LeannSearcher
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(description="Simple demo of Leann with selectable embedding models.")
|
parser = argparse.ArgumentParser(
|
||||||
parser.add_argument("--embedding_model", type=str, default="sentence-transformers/all-mpnet-base-v2",
|
description="Simple demo of Leann with selectable embedding models."
|
||||||
help="The embedding model to use, e.g., 'sentence-transformers/all-mpnet-base-v2' or 'text-embedding-ada-002'.")
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--embedding_model",
|
||||||
|
type=str,
|
||||||
|
default="sentence-transformers/all-mpnet-base-v2",
|
||||||
|
help="The embedding model to use, e.g., 'sentence-transformers/all-mpnet-base-v2' or 'text-embedding-ada-002'.",
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
print(f"=== Leann Simple Demo with {args.embedding_model} ===")
|
print(f"=== Leann Simple Demo with {args.embedding_model} ===")
|
||||||
@@ -74,7 +81,7 @@ def main():
|
|||||||
print()
|
print()
|
||||||
|
|
||||||
print("Demo completed! Try running:")
|
print("Demo completed! Try running:")
|
||||||
print(" uv run python examples/document_search.py")
|
print(" uv run python apps/document_rag.py")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@@ -1,146 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
Document search demo with recompute mode
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
import shutil
|
|
||||||
import time
|
|
||||||
|
|
||||||
# Import backend packages to trigger plugin registration
|
|
||||||
try:
|
|
||||||
import leann_backend_diskann
|
|
||||||
import leann_backend_hnsw
|
|
||||||
print("INFO: Backend packages imported successfully.")
|
|
||||||
except ImportError as e:
|
|
||||||
print(f"WARNING: Could not import backend packages. Error: {e}")
|
|
||||||
|
|
||||||
# Import upper-level API from leann-core
|
|
||||||
from leann.api import LeannBuilder, LeannSearcher, LeannChat
|
|
||||||
|
|
||||||
|
|
||||||
def load_sample_documents():
|
|
||||||
"""Create sample documents for demonstration"""
|
|
||||||
docs = [
|
|
||||||
{"title": "Intro to Python", "content": "Python is a high-level, interpreted language known for simplicity."},
|
|
||||||
{"title": "ML Basics", "content": "Machine learning builds systems that learn from data."},
|
|
||||||
{"title": "Data Structures", "content": "Data structures like arrays, lists, and graphs organize data."},
|
|
||||||
]
|
|
||||||
return docs
|
|
||||||
|
|
||||||
def main():
|
|
||||||
print("==========================================================")
|
|
||||||
print("=== Leann Document Search Demo (DiskANN + Recompute) ===")
|
|
||||||
print("==========================================================")
|
|
||||||
|
|
||||||
INDEX_DIR = Path("./test_indices")
|
|
||||||
INDEX_PATH = str(INDEX_DIR / "documents.diskann")
|
|
||||||
BACKEND_TO_TEST = "diskann"
|
|
||||||
|
|
||||||
if INDEX_DIR.exists():
|
|
||||||
print(f"--- Cleaning up old index directory: {INDEX_DIR} ---")
|
|
||||||
shutil.rmtree(INDEX_DIR)
|
|
||||||
|
|
||||||
# --- 1. Build index ---
|
|
||||||
print(f"\n[PHASE 1] Building index using '{BACKEND_TO_TEST}' backend...")
|
|
||||||
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name=BACKEND_TO_TEST,
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64
|
|
||||||
)
|
|
||||||
|
|
||||||
documents = load_sample_documents()
|
|
||||||
print(f"Loaded {len(documents)} sample documents.")
|
|
||||||
for doc in documents:
|
|
||||||
builder.add_text(doc["content"], metadata={"title": doc["title"]})
|
|
||||||
|
|
||||||
builder.build_index(INDEX_PATH)
|
|
||||||
print(f"\nIndex built!")
|
|
||||||
|
|
||||||
# --- 2. Basic search demo ---
|
|
||||||
print(f"\n[PHASE 2] Basic search using '{BACKEND_TO_TEST}' backend...")
|
|
||||||
searcher = LeannSearcher(index_path=INDEX_PATH)
|
|
||||||
|
|
||||||
query = "What is machine learning?"
|
|
||||||
print(f"\nQuery: '{query}'")
|
|
||||||
|
|
||||||
print("\n--- Basic search mode (PQ computation) ---")
|
|
||||||
start_time = time.time()
|
|
||||||
results = searcher.search(query, top_k=2)
|
|
||||||
basic_time = time.time() - start_time
|
|
||||||
|
|
||||||
print(f"⏱️ Basic search time: {basic_time:.3f} seconds")
|
|
||||||
print(">>> Basic search results <<<")
|
|
||||||
for i, res in enumerate(results, 1):
|
|
||||||
print(f" {i}. ID: {res.id}, Score: {res.score:.4f}, Text: '{res.text}', Metadata: {res.metadata}")
|
|
||||||
|
|
||||||
# --- 3. Recompute search demo ---
|
|
||||||
print(f"\n[PHASE 3] Recompute search using embedding server...")
|
|
||||||
|
|
||||||
print("\n--- Recompute search mode (get real embeddings via network) ---")
|
|
||||||
|
|
||||||
# Configure recompute parameters
|
|
||||||
recompute_params = {
|
|
||||||
"recompute_beighbor_embeddings": True, # Enable network recomputation
|
|
||||||
"USE_DEFERRED_FETCH": False, # Don't use deferred fetch
|
|
||||||
"skip_search_reorder": True, # Skip search reordering
|
|
||||||
"dedup_node_dis": True, # Enable node distance deduplication
|
|
||||||
"prune_ratio": 0.1, # Pruning ratio 10%
|
|
||||||
"batch_recompute": False, # Don't use batch recomputation
|
|
||||||
"global_pruning": False, # Don't use global pruning
|
|
||||||
"zmq_port": 5555, # ZMQ port
|
|
||||||
"embedding_model": "sentence-transformers/all-mpnet-base-v2"
|
|
||||||
}
|
|
||||||
|
|
||||||
print("Recompute parameter configuration:")
|
|
||||||
for key, value in recompute_params.items():
|
|
||||||
print(f" {key}: {value}")
|
|
||||||
|
|
||||||
print(f"\n🔄 Executing Recompute search...")
|
|
||||||
try:
|
|
||||||
start_time = time.time()
|
|
||||||
recompute_results = searcher.search(query, top_k=2, **recompute_params)
|
|
||||||
recompute_time = time.time() - start_time
|
|
||||||
|
|
||||||
print(f"⏱️ Recompute search time: {recompute_time:.3f} seconds")
|
|
||||||
print(">>> Recompute search results <<<")
|
|
||||||
for i, res in enumerate(recompute_results, 1):
|
|
||||||
print(f" {i}. ID: {res.id}, Score: {res.score:.4f}, Text: '{res.text}', Metadata: {res.metadata}")
|
|
||||||
|
|
||||||
# Compare results
|
|
||||||
print(f"\n--- Result comparison ---")
|
|
||||||
print(f"Basic search time: {basic_time:.3f} seconds")
|
|
||||||
print(f"Recompute time: {recompute_time:.3f} seconds")
|
|
||||||
|
|
||||||
print("\nBasic search vs Recompute results:")
|
|
||||||
for i in range(min(len(results), len(recompute_results))):
|
|
||||||
basic_score = results[i].score
|
|
||||||
recompute_score = recompute_results[i].score
|
|
||||||
score_diff = abs(basic_score - recompute_score)
|
|
||||||
print(f" Position {i+1}: PQ={basic_score:.4f}, Recompute={recompute_score:.4f}, Difference={score_diff:.4f}")
|
|
||||||
|
|
||||||
if recompute_time > basic_time:
|
|
||||||
print(f"✅ Recompute mode working correctly (more accurate but slower)")
|
|
||||||
else:
|
|
||||||
print(f"ℹ️ Recompute time is unusually fast, network recomputation may not be enabled")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"❌ Recompute search failed: {e}")
|
|
||||||
print("This usually indicates an embedding server connection issue")
|
|
||||||
|
|
||||||
# --- 4. Chat demo ---
|
|
||||||
print(f"\n[PHASE 4] Starting chat session...")
|
|
||||||
chat = LeannChat(index_path=INDEX_PATH)
|
|
||||||
chat_response = chat.ask(query)
|
|
||||||
print(f"You: {query}")
|
|
||||||
print(f"Leann: {chat_response}")
|
|
||||||
|
|
||||||
print("\n==========================================================")
|
|
||||||
print("✅ Demo finished successfully!")
|
|
||||||
print("==========================================================")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
404
examples/dynamic_update_no_recompute.py
Normal file
404
examples/dynamic_update_no_recompute.py
Normal file
@@ -0,0 +1,404 @@
|
|||||||
|
"""Dynamic HNSW update demo without compact storage.
|
||||||
|
|
||||||
|
This script reproduces the minimal scenario we used while debugging on-the-fly
|
||||||
|
recompute:
|
||||||
|
|
||||||
|
1. Build a non-compact HNSW index from the first few paragraphs of a text file.
|
||||||
|
2. Print the top results with `recompute_embeddings=True`.
|
||||||
|
3. Append additional paragraphs with :meth:`LeannBuilder.update_index`.
|
||||||
|
4. Run the same query again to show the newly inserted passages.
|
||||||
|
|
||||||
|
Run it with ``uv`` (optionally pointing LEANN_HNSW_LOG_PATH at a file to inspect
|
||||||
|
ZMQ activity)::
|
||||||
|
|
||||||
|
LEANN_HNSW_LOG_PATH=embedding_fetch.log \
|
||||||
|
uv run -m examples.dynamic_update_no_recompute \
|
||||||
|
--index-path .leann/examples/leann-demo.leann
|
||||||
|
|
||||||
|
By default the script builds an index from ``data/2501.14312v1 (1).pdf`` and
|
||||||
|
then updates it with LEANN-related material from ``data/2506.08276v1.pdf``.
|
||||||
|
It issues the query "What's LEANN?" before and after the update to show how the
|
||||||
|
new passages become immediately searchable. The script uses the
|
||||||
|
``sentence-transformers/all-MiniLM-L6-v2`` model with ``is_recompute=True`` so
|
||||||
|
Faiss pulls existing vectors on demand via the ZMQ embedding server, while
|
||||||
|
freshly added passages are embedded locally just like the initial build.
|
||||||
|
|
||||||
|
To make storage comparisons easy, the script can also build a matching
|
||||||
|
``is_recompute=False`` baseline (enabled by default) and report the index size
|
||||||
|
delta after the update. Disable the baseline run with
|
||||||
|
``--skip-compare-no-recompute`` if you only need the recompute flow.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
from collections.abc import Iterable
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from leann.api import LeannBuilder, LeannSearcher
|
||||||
|
from leann.registry import register_project_directory
|
||||||
|
|
||||||
|
from apps.chunking import create_text_chunks
|
||||||
|
|
||||||
|
REPO_ROOT = Path(__file__).resolve().parents[1]
|
||||||
|
|
||||||
|
DEFAULT_QUERY = "What's LEANN?"
|
||||||
|
DEFAULT_INITIAL_FILES = [REPO_ROOT / "data" / "2501.14312v1 (1).pdf"]
|
||||||
|
DEFAULT_UPDATE_FILES = [REPO_ROOT / "data" / "2506.08276v1.pdf"]
|
||||||
|
|
||||||
|
|
||||||
|
def load_chunks_from_files(paths: list[Path]) -> list[str]:
|
||||||
|
from llama_index.core import SimpleDirectoryReader
|
||||||
|
|
||||||
|
documents = []
|
||||||
|
for path in paths:
|
||||||
|
p = path.expanduser().resolve()
|
||||||
|
if not p.exists():
|
||||||
|
raise FileNotFoundError(f"Input path not found: {p}")
|
||||||
|
if p.is_dir():
|
||||||
|
reader = SimpleDirectoryReader(str(p), recursive=False)
|
||||||
|
documents.extend(reader.load_data(show_progress=True))
|
||||||
|
else:
|
||||||
|
reader = SimpleDirectoryReader(input_files=[str(p)])
|
||||||
|
documents.extend(reader.load_data(show_progress=True))
|
||||||
|
|
||||||
|
if not documents:
|
||||||
|
return []
|
||||||
|
|
||||||
|
chunks = create_text_chunks(
|
||||||
|
documents,
|
||||||
|
chunk_size=512,
|
||||||
|
chunk_overlap=128,
|
||||||
|
use_ast_chunking=False,
|
||||||
|
)
|
||||||
|
return [c for c in chunks if isinstance(c, str) and c.strip()]
|
||||||
|
|
||||||
|
|
||||||
|
def run_search(index_path: Path, query: str, top_k: int, *, recompute_embeddings: bool) -> list:
|
||||||
|
searcher = LeannSearcher(str(index_path))
|
||||||
|
try:
|
||||||
|
return searcher.search(
|
||||||
|
query=query,
|
||||||
|
top_k=top_k,
|
||||||
|
recompute_embeddings=recompute_embeddings,
|
||||||
|
batch_size=16,
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
searcher.cleanup()
|
||||||
|
|
||||||
|
|
||||||
|
def print_results(title: str, results: Iterable) -> None:
|
||||||
|
print(f"\n=== {title} ===")
|
||||||
|
res_list = list(results)
|
||||||
|
print(f"results count: {len(res_list)}")
|
||||||
|
print("passages:")
|
||||||
|
if not res_list:
|
||||||
|
print(" (no passages returned)")
|
||||||
|
for res in res_list:
|
||||||
|
snippet = res.text.replace("\n", " ")[:120]
|
||||||
|
print(f" - {res.id}: {snippet}... (score={res.score:.4f})")
|
||||||
|
|
||||||
|
|
||||||
|
def build_initial_index(
|
||||||
|
index_path: Path,
|
||||||
|
paragraphs: list[str],
|
||||||
|
model_name: str,
|
||||||
|
embedding_mode: str,
|
||||||
|
is_recompute: bool,
|
||||||
|
) -> None:
|
||||||
|
builder = LeannBuilder(
|
||||||
|
backend_name="hnsw",
|
||||||
|
embedding_model=model_name,
|
||||||
|
embedding_mode=embedding_mode,
|
||||||
|
is_compact=False,
|
||||||
|
is_recompute=is_recompute,
|
||||||
|
)
|
||||||
|
for idx, passage in enumerate(paragraphs):
|
||||||
|
builder.add_text(passage, metadata={"id": str(idx)})
|
||||||
|
builder.build_index(str(index_path))
|
||||||
|
|
||||||
|
|
||||||
|
def update_index(
|
||||||
|
index_path: Path,
|
||||||
|
start_id: int,
|
||||||
|
paragraphs: list[str],
|
||||||
|
model_name: str,
|
||||||
|
embedding_mode: str,
|
||||||
|
is_recompute: bool,
|
||||||
|
) -> None:
|
||||||
|
updater = LeannBuilder(
|
||||||
|
backend_name="hnsw",
|
||||||
|
embedding_model=model_name,
|
||||||
|
embedding_mode=embedding_mode,
|
||||||
|
is_compact=False,
|
||||||
|
is_recompute=is_recompute,
|
||||||
|
)
|
||||||
|
for offset, passage in enumerate(paragraphs, start=start_id):
|
||||||
|
updater.add_text(passage, metadata={"id": str(offset)})
|
||||||
|
updater.update_index(str(index_path))
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_index_dir(index_path: Path) -> None:
|
||||||
|
index_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
def cleanup_index_files(index_path: Path) -> None:
|
||||||
|
"""Remove leftover index artifacts for a clean rebuild."""
|
||||||
|
|
||||||
|
parent = index_path.parent
|
||||||
|
if not parent.exists():
|
||||||
|
return
|
||||||
|
stem = index_path.stem
|
||||||
|
for file in parent.glob(f"{stem}*"):
|
||||||
|
if file.is_file():
|
||||||
|
file.unlink()
|
||||||
|
|
||||||
|
|
||||||
|
def index_file_size(index_path: Path) -> int:
|
||||||
|
"""Return the size of the primary .index file for the given index path."""
|
||||||
|
|
||||||
|
index_file = index_path.parent / f"{index_path.stem}.index"
|
||||||
|
return index_file.stat().st_size if index_file.exists() else 0
|
||||||
|
|
||||||
|
|
||||||
|
def load_metadata_snapshot(index_path: Path) -> dict[str, Any] | None:
|
||||||
|
meta_path = index_path.parent / f"{index_path.name}.meta.json"
|
||||||
|
if not meta_path.exists():
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
return json.loads(meta_path.read_text())
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def run_workflow(
|
||||||
|
*,
|
||||||
|
label: str,
|
||||||
|
index_path: Path,
|
||||||
|
initial_paragraphs: list[str],
|
||||||
|
update_paragraphs: list[str],
|
||||||
|
model_name: str,
|
||||||
|
embedding_mode: str,
|
||||||
|
is_recompute: bool,
|
||||||
|
query: str,
|
||||||
|
top_k: int,
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
prefix = f"[{label}] " if label else ""
|
||||||
|
|
||||||
|
ensure_index_dir(index_path)
|
||||||
|
cleanup_index_files(index_path)
|
||||||
|
|
||||||
|
print(f"{prefix}Building initial index...")
|
||||||
|
build_initial_index(
|
||||||
|
index_path,
|
||||||
|
initial_paragraphs,
|
||||||
|
model_name,
|
||||||
|
embedding_mode,
|
||||||
|
is_recompute=is_recompute,
|
||||||
|
)
|
||||||
|
|
||||||
|
initial_size = index_file_size(index_path)
|
||||||
|
before_results = run_search(
|
||||||
|
index_path,
|
||||||
|
query,
|
||||||
|
top_k,
|
||||||
|
recompute_embeddings=is_recompute,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"\n{prefix}Updating index with additional passages...")
|
||||||
|
update_index(
|
||||||
|
index_path,
|
||||||
|
start_id=len(initial_paragraphs),
|
||||||
|
paragraphs=update_paragraphs,
|
||||||
|
model_name=model_name,
|
||||||
|
embedding_mode=embedding_mode,
|
||||||
|
is_recompute=is_recompute,
|
||||||
|
)
|
||||||
|
|
||||||
|
after_results = run_search(
|
||||||
|
index_path,
|
||||||
|
query,
|
||||||
|
top_k,
|
||||||
|
recompute_embeddings=is_recompute,
|
||||||
|
)
|
||||||
|
updated_size = index_file_size(index_path)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"initial_size": initial_size,
|
||||||
|
"updated_size": updated_size,
|
||||||
|
"delta": updated_size - initial_size,
|
||||||
|
"before_results": before_results,
|
||||||
|
"after_results": after_results,
|
||||||
|
"metadata": load_metadata_snapshot(index_path),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
parser = argparse.ArgumentParser(description=__doc__)
|
||||||
|
parser.add_argument(
|
||||||
|
"--initial-files",
|
||||||
|
type=Path,
|
||||||
|
nargs="+",
|
||||||
|
default=DEFAULT_INITIAL_FILES,
|
||||||
|
help="Initial document files (PDF/TXT) used to build the base index",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--index-path",
|
||||||
|
type=Path,
|
||||||
|
default=Path(".leann/examples/leann-demo.leann"),
|
||||||
|
help="Destination index path (default: .leann/examples/leann-demo.leann)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--initial-count",
|
||||||
|
type=int,
|
||||||
|
default=8,
|
||||||
|
help="Number of chunks to use from the initial documents (default: 8)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--update-files",
|
||||||
|
type=Path,
|
||||||
|
nargs="*",
|
||||||
|
default=DEFAULT_UPDATE_FILES,
|
||||||
|
help="Additional documents to add during update (PDF/TXT)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--update-count",
|
||||||
|
type=int,
|
||||||
|
default=4,
|
||||||
|
help="Number of chunks to append from update documents (default: 4)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--update-text",
|
||||||
|
type=str,
|
||||||
|
default=(
|
||||||
|
"LEANN (Lightweight Embedding ANN) is an indexing toolkit focused on "
|
||||||
|
"recompute-aware HNSW graphs, allowing embeddings to be regenerated "
|
||||||
|
"on demand to keep disk usage minimal."
|
||||||
|
),
|
||||||
|
help="Fallback text to append if --update-files is omitted",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--top-k",
|
||||||
|
type=int,
|
||||||
|
default=4,
|
||||||
|
help="Number of results to show for each search (default: 4)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--query",
|
||||||
|
type=str,
|
||||||
|
default=DEFAULT_QUERY,
|
||||||
|
help="Query to run before/after the update",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--embedding-model",
|
||||||
|
type=str,
|
||||||
|
default="sentence-transformers/all-MiniLM-L6-v2",
|
||||||
|
help="Embedding model name",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--embedding-mode",
|
||||||
|
type=str,
|
||||||
|
default="sentence-transformers",
|
||||||
|
choices=["sentence-transformers", "openai", "mlx", "ollama"],
|
||||||
|
help="Embedding backend mode",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--compare-no-recompute",
|
||||||
|
dest="compare_no_recompute",
|
||||||
|
action="store_true",
|
||||||
|
help="Also run a baseline with is_recompute=False and report its index growth.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--skip-compare-no-recompute",
|
||||||
|
dest="compare_no_recompute",
|
||||||
|
action="store_false",
|
||||||
|
help="Skip building the no-recompute baseline.",
|
||||||
|
)
|
||||||
|
parser.set_defaults(compare_no_recompute=True)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
ensure_index_dir(args.index_path)
|
||||||
|
register_project_directory(REPO_ROOT)
|
||||||
|
|
||||||
|
initial_chunks = load_chunks_from_files(list(args.initial_files))
|
||||||
|
if not initial_chunks:
|
||||||
|
raise ValueError("No text chunks extracted from the initial files.")
|
||||||
|
|
||||||
|
initial = initial_chunks[: args.initial_count]
|
||||||
|
if not initial:
|
||||||
|
raise ValueError("Initial chunk set is empty after applying --initial-count.")
|
||||||
|
|
||||||
|
if args.update_files:
|
||||||
|
update_chunks = load_chunks_from_files(list(args.update_files))
|
||||||
|
if not update_chunks:
|
||||||
|
raise ValueError("No text chunks extracted from the update files.")
|
||||||
|
to_add = update_chunks[: args.update_count]
|
||||||
|
else:
|
||||||
|
if not args.update_text:
|
||||||
|
raise ValueError("Provide --update-files or --update-text for the update step.")
|
||||||
|
to_add = [args.update_text]
|
||||||
|
if not to_add:
|
||||||
|
raise ValueError("Update chunk set is empty after applying --update-count.")
|
||||||
|
|
||||||
|
recompute_stats = run_workflow(
|
||||||
|
label="recompute",
|
||||||
|
index_path=args.index_path,
|
||||||
|
initial_paragraphs=initial,
|
||||||
|
update_paragraphs=to_add,
|
||||||
|
model_name=args.embedding_model,
|
||||||
|
embedding_mode=args.embedding_mode,
|
||||||
|
is_recompute=True,
|
||||||
|
query=args.query,
|
||||||
|
top_k=args.top_k,
|
||||||
|
)
|
||||||
|
|
||||||
|
print_results("initial search", recompute_stats["before_results"])
|
||||||
|
print_results("after update", recompute_stats["after_results"])
|
||||||
|
print(
|
||||||
|
f"\n[recompute] Index file size change: {recompute_stats['initial_size']} -> {recompute_stats['updated_size']} bytes"
|
||||||
|
f" (Δ {recompute_stats['delta']})"
|
||||||
|
)
|
||||||
|
|
||||||
|
if recompute_stats["metadata"]:
|
||||||
|
meta_view = {k: recompute_stats["metadata"].get(k) for k in ("is_compact", "is_pruned")}
|
||||||
|
print("[recompute] metadata snapshot:")
|
||||||
|
print(json.dumps(meta_view, indent=2))
|
||||||
|
|
||||||
|
if args.compare_no_recompute:
|
||||||
|
baseline_path = (
|
||||||
|
args.index_path.parent / f"{args.index_path.stem}-norecompute{args.index_path.suffix}"
|
||||||
|
)
|
||||||
|
baseline_stats = run_workflow(
|
||||||
|
label="no-recompute",
|
||||||
|
index_path=baseline_path,
|
||||||
|
initial_paragraphs=initial,
|
||||||
|
update_paragraphs=to_add,
|
||||||
|
model_name=args.embedding_model,
|
||||||
|
embedding_mode=args.embedding_mode,
|
||||||
|
is_recompute=False,
|
||||||
|
query=args.query,
|
||||||
|
top_k=args.top_k,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(
|
||||||
|
f"\n[no-recompute] Index file size change: {baseline_stats['initial_size']} -> {baseline_stats['updated_size']} bytes"
|
||||||
|
f" (Δ {baseline_stats['delta']})"
|
||||||
|
)
|
||||||
|
|
||||||
|
after_texts = [res.text for res in recompute_stats["after_results"]]
|
||||||
|
baseline_after_texts = [res.text for res in baseline_stats["after_results"]]
|
||||||
|
if after_texts == baseline_after_texts:
|
||||||
|
print(
|
||||||
|
"[no-recompute] Search results match recompute baseline; see above for the shared output."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print("[no-recompute] WARNING: search results differ from recompute baseline.")
|
||||||
|
|
||||||
|
if baseline_stats["metadata"]:
|
||||||
|
meta_view = {k: baseline_stats["metadata"].get(k) for k in ("is_compact", "is_pruned")}
|
||||||
|
print("[no-recompute] metadata snapshot:")
|
||||||
|
print(json.dumps(meta_view, indent=2))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -1,122 +0,0 @@
|
|||||||
import os
|
|
||||||
import email
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import List, Any
|
|
||||||
from llama_index.core import Document
|
|
||||||
from llama_index.core.readers.base import BaseReader
|
|
||||||
|
|
||||||
def find_all_messages_directories(root: str = None) -> List[Path]:
|
|
||||||
"""
|
|
||||||
Recursively find all 'Messages' directories under the given root.
|
|
||||||
Returns a list of Path objects.
|
|
||||||
"""
|
|
||||||
if root is None:
|
|
||||||
# Auto-detect user's mail path
|
|
||||||
home_dir = os.path.expanduser("~")
|
|
||||||
root = os.path.join(home_dir, "Library", "Mail")
|
|
||||||
|
|
||||||
messages_dirs = []
|
|
||||||
for dirpath, dirnames, filenames in os.walk(root):
|
|
||||||
if os.path.basename(dirpath) == "Messages":
|
|
||||||
messages_dirs.append(Path(dirpath))
|
|
||||||
return messages_dirs
|
|
||||||
|
|
||||||
class EmlxReader(BaseReader):
|
|
||||||
"""
|
|
||||||
Apple Mail .emlx file reader with embedded metadata.
|
|
||||||
|
|
||||||
Reads individual .emlx files from Apple Mail's storage format.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, include_html: bool = False) -> None:
|
|
||||||
"""
|
|
||||||
Initialize.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
include_html: Whether to include HTML content in the email body (default: False)
|
|
||||||
"""
|
|
||||||
self.include_html = include_html
|
|
||||||
|
|
||||||
def load_data(self, input_dir: str, **load_kwargs: Any) -> List[Document]:
|
|
||||||
"""
|
|
||||||
Load data from the input directory containing .emlx files.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
input_dir: Directory containing .emlx files
|
|
||||||
**load_kwargs:
|
|
||||||
max_count (int): Maximum amount of messages to read.
|
|
||||||
"""
|
|
||||||
docs: List[Document] = []
|
|
||||||
max_count = load_kwargs.get('max_count', 1000)
|
|
||||||
count = 0
|
|
||||||
|
|
||||||
# Walk through the directory recursively
|
|
||||||
for dirpath, dirnames, filenames in os.walk(input_dir):
|
|
||||||
# Skip hidden directories
|
|
||||||
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
|
|
||||||
|
|
||||||
for filename in filenames:
|
|
||||||
if count >= max_count:
|
|
||||||
break
|
|
||||||
|
|
||||||
if filename.endswith(".emlx"):
|
|
||||||
filepath = os.path.join(dirpath, filename)
|
|
||||||
try:
|
|
||||||
# Read the .emlx file
|
|
||||||
with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
|
|
||||||
content = f.read()
|
|
||||||
|
|
||||||
# .emlx files have a length prefix followed by the email content
|
|
||||||
# The first line contains the length, followed by the email
|
|
||||||
lines = content.split('\n', 1)
|
|
||||||
if len(lines) >= 2:
|
|
||||||
email_content = lines[1]
|
|
||||||
|
|
||||||
# Parse the email using Python's email module
|
|
||||||
try:
|
|
||||||
msg = email.message_from_string(email_content)
|
|
||||||
|
|
||||||
# Extract email metadata
|
|
||||||
subject = msg.get('Subject', 'No Subject')
|
|
||||||
from_addr = msg.get('From', 'Unknown')
|
|
||||||
to_addr = msg.get('To', 'Unknown')
|
|
||||||
date = msg.get('Date', 'Unknown')
|
|
||||||
|
|
||||||
# Extract email body
|
|
||||||
body = ""
|
|
||||||
if msg.is_multipart():
|
|
||||||
for part in msg.walk():
|
|
||||||
if part.get_content_type() == "text/plain" or part.get_content_type() == "text/html":
|
|
||||||
if part.get_content_type() == "text/html" and not self.include_html:
|
|
||||||
continue
|
|
||||||
body += part.get_payload(decode=True).decode('utf-8', errors='ignore')
|
|
||||||
# break
|
|
||||||
else:
|
|
||||||
body = msg.get_payload(decode=True).decode('utf-8', errors='ignore')
|
|
||||||
|
|
||||||
# Create document content with metadata embedded in text
|
|
||||||
doc_content = f"""
|
|
||||||
[File]: {filename}
|
|
||||||
[From]: {from_addr}
|
|
||||||
[To]: {to_addr}
|
|
||||||
[Subject]: {subject}
|
|
||||||
[Date]: {date}
|
|
||||||
[EMAIL BODY Start]:
|
|
||||||
{body}
|
|
||||||
"""
|
|
||||||
|
|
||||||
# No separate metadata - everything is in the text
|
|
||||||
doc = Document(text=doc_content, metadata={})
|
|
||||||
docs.append(doc)
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error parsing email from {filepath}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error reading file {filepath}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
print(f"Loaded {len(docs)} email documents")
|
|
||||||
return docs
|
|
||||||
@@ -1,286 +0,0 @@
|
|||||||
import os
|
|
||||||
import asyncio
|
|
||||||
import argparse
|
|
||||||
try:
|
|
||||||
import dotenv
|
|
||||||
dotenv.load_dotenv()
|
|
||||||
except ModuleNotFoundError:
|
|
||||||
# python-dotenv is not installed; skip loading environment variables
|
|
||||||
dotenv = None
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import List, Any
|
|
||||||
from leann.api import LeannBuilder, LeannSearcher, LeannChat
|
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
|
||||||
|
|
||||||
# dotenv.load_dotenv() # handled above if python-dotenv is available
|
|
||||||
|
|
||||||
# Default Chrome profile path
|
|
||||||
DEFAULT_CHROME_PROFILE = os.path.expanduser("~/Library/Application Support/Google/Chrome/Default")
|
|
||||||
|
|
||||||
def create_leann_index_from_multiple_chrome_profiles(profile_dirs: List[Path], index_path: str = "chrome_history_index.leann", max_count: int = -1):
|
|
||||||
"""
|
|
||||||
Create LEANN index from multiple Chrome profile data sources.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
profile_dirs: List of Path objects pointing to Chrome profile directories
|
|
||||||
index_path: Path to save the LEANN index
|
|
||||||
max_count: Maximum number of history entries to process per profile
|
|
||||||
"""
|
|
||||||
print("Creating LEANN index from multiple Chrome profile data sources...")
|
|
||||||
|
|
||||||
# Load documents using ChromeHistoryReader from history_data
|
|
||||||
from history_data.history import ChromeHistoryReader
|
|
||||||
reader = ChromeHistoryReader()
|
|
||||||
|
|
||||||
INDEX_DIR = Path(index_path).parent
|
|
||||||
|
|
||||||
if not INDEX_DIR.exists():
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
all_documents = []
|
|
||||||
total_processed = 0
|
|
||||||
|
|
||||||
# Process each Chrome profile directory
|
|
||||||
for i, profile_dir in enumerate(profile_dirs):
|
|
||||||
print(f"\nProcessing Chrome profile {i+1}/{len(profile_dirs)}: {profile_dir}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
documents = reader.load_data(
|
|
||||||
chrome_profile_path=str(profile_dir),
|
|
||||||
max_count=max_count
|
|
||||||
)
|
|
||||||
if documents:
|
|
||||||
print(f"Loaded {len(documents)} history documents from {profile_dir}")
|
|
||||||
all_documents.extend(documents)
|
|
||||||
total_processed += len(documents)
|
|
||||||
|
|
||||||
# Check if we've reached the max count
|
|
||||||
if max_count > 0 and total_processed >= max_count:
|
|
||||||
print(f"Reached max count of {max_count} documents")
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
print(f"No documents loaded from {profile_dir}")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing {profile_dir}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not all_documents:
|
|
||||||
print("No documents loaded from any source. Exiting.")
|
|
||||||
# highlight info that you need to close all chrome browser before running this script and high light the instruction!!
|
|
||||||
print("\033[91mYou need to close or quit all chrome browser before running this script\033[0m")
|
|
||||||
return None
|
|
||||||
|
|
||||||
print(f"\nTotal loaded {len(all_documents)} history documents from {len(profile_dirs)} profiles")
|
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=128)
|
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
|
||||||
all_texts = []
|
|
||||||
for doc in all_documents:
|
|
||||||
# Split the document into chunks
|
|
||||||
nodes = text_splitter.get_nodes_from_documents([doc])
|
|
||||||
for node in nodes:
|
|
||||||
text = node.get_content()
|
|
||||||
# text = '[Title] ' + doc.metadata["title"] + '\n' + text
|
|
||||||
all_texts.append(text)
|
|
||||||
|
|
||||||
print(f"Created {len(all_texts)} text chunks from {len(all_documents)} documents")
|
|
||||||
|
|
||||||
# Create LEANN index directory
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Use HNSW backend for better macOS compatibility
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model="facebook/contriever",
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64,
|
|
||||||
is_compact=True,
|
|
||||||
is_recompute=True,
|
|
||||||
num_threads=1 # Force single-threaded mode
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Adding {len(all_texts)} history chunks to index...")
|
|
||||||
for chunk_text in all_texts:
|
|
||||||
builder.add_text(chunk_text)
|
|
||||||
|
|
||||||
builder.build_index(index_path)
|
|
||||||
print(f"\nLEANN index built at {index_path}!")
|
|
||||||
else:
|
|
||||||
print(f"--- Using existing index at {INDEX_DIR} ---")
|
|
||||||
|
|
||||||
return index_path
|
|
||||||
|
|
||||||
def create_leann_index(profile_path: str = None, index_path: str = "chrome_history_index.leann", max_count: int = 1000):
|
|
||||||
"""
|
|
||||||
Create LEANN index from Chrome history data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
profile_path: Path to the Chrome profile directory (optional, uses default if None)
|
|
||||||
index_path: Path to save the LEANN index
|
|
||||||
max_count: Maximum number of history entries to process
|
|
||||||
"""
|
|
||||||
print("Creating LEANN index from Chrome history data...")
|
|
||||||
INDEX_DIR = Path(index_path).parent
|
|
||||||
|
|
||||||
if not INDEX_DIR.exists():
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Load documents using ChromeHistoryReader from history_data
|
|
||||||
from history_data.history import ChromeHistoryReader
|
|
||||||
reader = ChromeHistoryReader()
|
|
||||||
|
|
||||||
documents = reader.load_data(
|
|
||||||
chrome_profile_path=profile_path,
|
|
||||||
max_count=max_count
|
|
||||||
)
|
|
||||||
|
|
||||||
if not documents:
|
|
||||||
print("No documents loaded. Exiting.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
print(f"Loaded {len(documents)} history documents")
|
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=25)
|
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
|
||||||
all_texts = []
|
|
||||||
for doc in documents:
|
|
||||||
# Split the document into chunks
|
|
||||||
nodes = text_splitter.get_nodes_from_documents([doc])
|
|
||||||
for node in nodes:
|
|
||||||
all_texts.append(node.get_content())
|
|
||||||
|
|
||||||
print(f"Created {len(all_texts)} text chunks from {len(documents)} documents")
|
|
||||||
|
|
||||||
# Create LEANN index directory
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Use HNSW backend for better macOS compatibility
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model="facebook/contriever",
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64,
|
|
||||||
is_compact=True,
|
|
||||||
is_recompute=True,
|
|
||||||
num_threads=1 # Force single-threaded mode
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Adding {len(all_texts)} history chunks to index...")
|
|
||||||
for chunk_text in all_texts:
|
|
||||||
builder.add_text(chunk_text)
|
|
||||||
|
|
||||||
builder.build_index(index_path)
|
|
||||||
print(f"\nLEANN index built at {index_path}!")
|
|
||||||
else:
|
|
||||||
print(f"--- Using existing index at {INDEX_DIR} ---")
|
|
||||||
|
|
||||||
return index_path
|
|
||||||
|
|
||||||
async def query_leann_index(index_path: str, query: str):
|
|
||||||
"""
|
|
||||||
Query the LEANN index.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
index_path: Path to the LEANN index
|
|
||||||
query: The query string
|
|
||||||
"""
|
|
||||||
print(f"\n[PHASE 2] Starting Leann chat session...")
|
|
||||||
chat = LeannChat(index_path=index_path)
|
|
||||||
|
|
||||||
print(f"You: {query}")
|
|
||||||
chat_response = chat.ask(
|
|
||||||
query,
|
|
||||||
top_k=10,
|
|
||||||
recompute_beighbor_embeddings=True,
|
|
||||||
complexity=32,
|
|
||||||
beam_width=1,
|
|
||||||
llm_config={
|
|
||||||
"type": "openai",
|
|
||||||
"model": "gpt-4o",
|
|
||||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
|
||||||
},
|
|
||||||
llm_kwargs={
|
|
||||||
"temperature": 0.0,
|
|
||||||
"max_tokens": 1000
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Leann chat response: \033[36m{chat_response}\033[0m")
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
# Parse command line arguments
|
|
||||||
parser = argparse.ArgumentParser(description='LEANN Chrome History Reader - Create and query browser history index')
|
|
||||||
parser.add_argument('--chrome-profile', type=str, default=DEFAULT_CHROME_PROFILE,
|
|
||||||
help=f'Path to Chrome profile directory (default: {DEFAULT_CHROME_PROFILE}), usually you dont need to change this')
|
|
||||||
parser.add_argument('--index-dir', type=str, default="./google_history_index",
|
|
||||||
help='Directory to store the LEANN index (default: ./chrome_history_index_leann_test)')
|
|
||||||
parser.add_argument('--max-entries', type=int, default=1000,
|
|
||||||
help='Maximum number of history entries to process (default: 1000)')
|
|
||||||
parser.add_argument('--query', type=str, default=None,
|
|
||||||
help='Single query to run (default: runs example queries)')
|
|
||||||
parser.add_argument('--auto-find-profiles', action='store_true', default=True,
|
|
||||||
help='Automatically find all Chrome profiles (default: True)')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
INDEX_DIR = Path(args.index_dir)
|
|
||||||
INDEX_PATH = str(INDEX_DIR / "chrome_history.leann")
|
|
||||||
|
|
||||||
print(f"Using Chrome profile: {args.chrome_profile}")
|
|
||||||
print(f"Index directory: {INDEX_DIR}")
|
|
||||||
print(f"Max entries: {args.max_entries}")
|
|
||||||
|
|
||||||
# Find Chrome profile directories
|
|
||||||
from history_data.history import ChromeHistoryReader
|
|
||||||
|
|
||||||
if args.auto_find_profiles:
|
|
||||||
profile_dirs = ChromeHistoryReader.find_chrome_profiles()
|
|
||||||
if not profile_dirs:
|
|
||||||
print("No Chrome profiles found automatically. Exiting.")
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
# Use single specified profile
|
|
||||||
profile_path = Path(args.chrome_profile)
|
|
||||||
if not profile_path.exists():
|
|
||||||
print(f"Chrome profile not found: {profile_path}")
|
|
||||||
return
|
|
||||||
profile_dirs = [profile_path]
|
|
||||||
|
|
||||||
# Create or load the LEANN index from all sources
|
|
||||||
index_path = create_leann_index_from_multiple_chrome_profiles(profile_dirs, INDEX_PATH, args.max_entries)
|
|
||||||
|
|
||||||
if index_path:
|
|
||||||
if args.query:
|
|
||||||
# Run single query
|
|
||||||
await query_leann_index(index_path, args.query)
|
|
||||||
else:
|
|
||||||
# Example queries
|
|
||||||
queries = [
|
|
||||||
"What websites did I visit about machine learning?",
|
|
||||||
"Find my search history about programming"
|
|
||||||
]
|
|
||||||
|
|
||||||
for query in queries:
|
|
||||||
print("\n" + "="*60)
|
|
||||||
await query_leann_index(index_path, query)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
||||||
35
examples/grep_search_example.py
Normal file
35
examples/grep_search_example.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
"""
|
||||||
|
Grep Search Example
|
||||||
|
|
||||||
|
Shows how to use grep-based text search instead of semantic search.
|
||||||
|
Useful when you need exact text matches rather than meaning-based results.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from leann import LeannSearcher
|
||||||
|
|
||||||
|
# Load your index
|
||||||
|
searcher = LeannSearcher("my-documents.leann")
|
||||||
|
|
||||||
|
# Regular semantic search
|
||||||
|
print("=== Semantic Search ===")
|
||||||
|
results = searcher.search("machine learning algorithms", top_k=3)
|
||||||
|
for result in results:
|
||||||
|
print(f"Score: {result.score:.3f}")
|
||||||
|
print(f"Text: {result.text[:80]}...")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Grep-based search for exact text matches
|
||||||
|
print("=== Grep Search ===")
|
||||||
|
results = searcher.search("def train_model", top_k=3, use_grep=True)
|
||||||
|
for result in results:
|
||||||
|
print(f"Score: {result.score}")
|
||||||
|
print(f"Text: {result.text[:80]}...")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Find specific error messages
|
||||||
|
error_results = searcher.search("FileNotFoundError", use_grep=True)
|
||||||
|
print(f"Found {len(error_results)} files mentioning FileNotFoundError")
|
||||||
|
|
||||||
|
# Search for function definitions
|
||||||
|
func_results = searcher.search("class SearchResult", use_grep=True, top_k=5)
|
||||||
|
print(f"Found {len(func_results)} class definitions")
|
||||||
@@ -1,291 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
import asyncio
|
|
||||||
import dotenv
|
|
||||||
import argparse
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import List, Any
|
|
||||||
|
|
||||||
# Add the project root to Python path so we can import from examples
|
|
||||||
project_root = Path(__file__).parent.parent
|
|
||||||
sys.path.insert(0, str(project_root))
|
|
||||||
|
|
||||||
from leann.api import LeannBuilder, LeannSearcher, LeannChat
|
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
|
||||||
|
|
||||||
dotenv.load_dotenv()
|
|
||||||
|
|
||||||
# Auto-detect user's mail path
|
|
||||||
def get_mail_path():
|
|
||||||
"""Get the mail path for the current user"""
|
|
||||||
home_dir = os.path.expanduser("~")
|
|
||||||
return os.path.join(home_dir, "Library", "Mail")
|
|
||||||
|
|
||||||
# Default mail path for macOS
|
|
||||||
DEFAULT_MAIL_PATH = "/Users/yichuan/Library/Mail/V10/0FCA0879-FD8C-4B7E-83BF-FDDA930791C5/[Gmail].mbox/All Mail.mbox/78BA5BE1-8819-4F9A-9613-EB63772F1DD0/Data"
|
|
||||||
|
|
||||||
def create_leann_index_from_multiple_sources(messages_dirs: List[Path], index_path: str = "mail_index.leann", max_count: int = -1, include_html: bool = False, embedding_model: str = "facebook/contriever"):
|
|
||||||
"""
|
|
||||||
Create LEANN index from multiple mail data sources.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
messages_dirs: List of Path objects pointing to Messages directories
|
|
||||||
index_path: Path to save the LEANN index
|
|
||||||
max_count: Maximum number of emails to process per directory
|
|
||||||
include_html: Whether to include HTML content in email processing
|
|
||||||
"""
|
|
||||||
print("Creating LEANN index from multiple mail data sources...")
|
|
||||||
|
|
||||||
# Load documents using EmlxReader from LEANN_email_reader
|
|
||||||
from examples.email_data.LEANN_email_reader import EmlxReader
|
|
||||||
reader = EmlxReader(include_html=include_html)
|
|
||||||
# from email_data.email import EmlxMboxReader
|
|
||||||
# from pathlib import Path
|
|
||||||
# reader = EmlxMboxReader()
|
|
||||||
INDEX_DIR = Path(index_path).parent
|
|
||||||
|
|
||||||
if not INDEX_DIR.exists():
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
all_documents = []
|
|
||||||
total_processed = 0
|
|
||||||
|
|
||||||
# Process each Messages directory
|
|
||||||
for i, messages_dir in enumerate(messages_dirs):
|
|
||||||
print(f"\nProcessing Messages directory {i+1}/{len(messages_dirs)}: {messages_dir}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
documents = reader.load_data(messages_dir)
|
|
||||||
if documents:
|
|
||||||
print(f"Loaded {len(documents)} email documents from {messages_dir}")
|
|
||||||
all_documents.extend(documents)
|
|
||||||
total_processed += len(documents)
|
|
||||||
|
|
||||||
# Check if we've reached the max count
|
|
||||||
if max_count > 0 and total_processed >= max_count:
|
|
||||||
print(f"Reached max count of {max_count} documents")
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
print(f"No documents loaded from {messages_dir}")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing {messages_dir}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not all_documents:
|
|
||||||
print("No documents loaded from any source. Exiting.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
print(f"\nTotal loaded {len(all_documents)} email documents from {len(messages_dirs)} directories and starting to split them into chunks")
|
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=25)
|
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
|
||||||
all_texts = []
|
|
||||||
for doc in all_documents:
|
|
||||||
# Split the document into chunks
|
|
||||||
nodes = text_splitter.get_nodes_from_documents([doc])
|
|
||||||
for node in nodes:
|
|
||||||
text = node.get_content()
|
|
||||||
# text = '[subject] ' + doc.metadata["subject"] + '\n' + text
|
|
||||||
all_texts.append(text)
|
|
||||||
|
|
||||||
print(f"Finished splitting {len(all_documents)} documents into {len(all_texts)} text chunks")
|
|
||||||
|
|
||||||
# Create LEANN index directory
|
|
||||||
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Use HNSW backend for better macOS compatibility
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model=embedding_model,
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64,
|
|
||||||
is_compact=True,
|
|
||||||
is_recompute=True,
|
|
||||||
num_threads=1 # Force single-threaded mode
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Adding {len(all_texts)} email chunks to index...")
|
|
||||||
for chunk_text in all_texts:
|
|
||||||
builder.add_text(chunk_text)
|
|
||||||
|
|
||||||
builder.build_index(index_path)
|
|
||||||
print(f"\nLEANN index built at {index_path}!")
|
|
||||||
else:
|
|
||||||
print(f"--- Using existing index at {INDEX_DIR} ---")
|
|
||||||
|
|
||||||
return index_path
|
|
||||||
|
|
||||||
def create_leann_index(mail_path: str, index_path: str = "mail_index.leann", max_count: int = 1000, include_html: bool = False, embedding_model: str = "facebook/contriever"):
|
|
||||||
"""
|
|
||||||
Create LEANN index from mail data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
mail_path: Path to the mail directory
|
|
||||||
index_path: Path to save the LEANN index
|
|
||||||
max_count: Maximum number of emails to process
|
|
||||||
include_html: Whether to include HTML content in email processing
|
|
||||||
"""
|
|
||||||
print("Creating LEANN index from mail data...")
|
|
||||||
INDEX_DIR = Path(index_path).parent
|
|
||||||
|
|
||||||
if not INDEX_DIR.exists():
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Load documents using EmlxReader from LEANN_email_reader
|
|
||||||
from examples.email_data.LEANN_email_reader import EmlxReader
|
|
||||||
reader = EmlxReader(include_html=include_html)
|
|
||||||
# from email_data.email import EmlxMboxReader
|
|
||||||
# from pathlib import Path
|
|
||||||
# reader = EmlxMboxReader()
|
|
||||||
documents = reader.load_data(Path(mail_path))
|
|
||||||
|
|
||||||
if not documents:
|
|
||||||
print("No documents loaded. Exiting.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
print(f"Loaded {len(documents)} email documents")
|
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=128)
|
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
|
||||||
all_texts = []
|
|
||||||
for doc in documents:
|
|
||||||
# Split the document into chunks
|
|
||||||
nodes = text_splitter.get_nodes_from_documents([doc])
|
|
||||||
for node in nodes:
|
|
||||||
all_texts.append(node.get_content())
|
|
||||||
|
|
||||||
print(f"Created {len(all_texts)} text chunks from {len(documents)} documents")
|
|
||||||
|
|
||||||
# Create LEANN index directory
|
|
||||||
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Use HNSW backend for better macOS compatibility
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model=embedding_model,
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64,
|
|
||||||
is_compact=True,
|
|
||||||
is_recompute=True,
|
|
||||||
num_threads=1 # Force single-threaded mode
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Adding {len(all_texts)} email chunks to index...")
|
|
||||||
for chunk_text in all_texts:
|
|
||||||
builder.add_text(chunk_text)
|
|
||||||
|
|
||||||
builder.build_index(index_path)
|
|
||||||
print(f"\nLEANN index built at {index_path}!")
|
|
||||||
else:
|
|
||||||
print(f"--- Using existing index at {INDEX_DIR} ---")
|
|
||||||
|
|
||||||
return index_path
|
|
||||||
|
|
||||||
async def query_leann_index(index_path: str, query: str):
|
|
||||||
"""
|
|
||||||
Query the LEANN index.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
index_path: Path to the LEANN index
|
|
||||||
query: The query string
|
|
||||||
"""
|
|
||||||
print(f"\n[PHASE 2] Starting Leann chat session...")
|
|
||||||
chat = LeannChat(index_path=index_path,
|
|
||||||
llm_config={"type": "openai", "model": "gpt-4o"})
|
|
||||||
|
|
||||||
print(f"You: {query}")
|
|
||||||
import time
|
|
||||||
start_time = time.time()
|
|
||||||
chat_response = chat.ask(
|
|
||||||
query,
|
|
||||||
top_k=20,
|
|
||||||
recompute_beighbor_embeddings=True,
|
|
||||||
complexity=32,
|
|
||||||
beam_width=1,
|
|
||||||
)
|
|
||||||
end_time = time.time()
|
|
||||||
# print(f"Time taken: {end_time - start_time} seconds")
|
|
||||||
# highlight the answer
|
|
||||||
print(f"Leann chat response: \033[36m{chat_response}\033[0m")
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
# Parse command line arguments
|
|
||||||
parser = argparse.ArgumentParser(description='LEANN Mail Reader - Create and query email index')
|
|
||||||
# Remove --mail-path argument and auto-detect all Messages directories
|
|
||||||
# Remove DEFAULT_MAIL_PATH
|
|
||||||
parser.add_argument('--index-dir', type=str, default="./mail_index",
|
|
||||||
help='Directory to store the LEANN index (default: ./mail_index_leann_raw_text_all_dicts)')
|
|
||||||
parser.add_argument('--max-emails', type=int, default=1000,
|
|
||||||
help='Maximum number of emails to process (-1 means all)')
|
|
||||||
parser.add_argument('--query', type=str, default="Give me some funny advertisement about apple or other companies",
|
|
||||||
help='Single query to run (default: runs example queries)')
|
|
||||||
parser.add_argument('--include-html', action='store_true', default=False,
|
|
||||||
help='Include HTML content in email processing (default: False)')
|
|
||||||
parser.add_argument('--embedding-model', type=str, default="facebook/contriever",
|
|
||||||
help='Embedding model to use (default: facebook/contriever)')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
print(f"args: {args}")
|
|
||||||
|
|
||||||
# Automatically find all Messages directories under the current user's Mail directory
|
|
||||||
from examples.email_data.LEANN_email_reader import find_all_messages_directories
|
|
||||||
mail_path = get_mail_path()
|
|
||||||
print(f"Searching for email data in: {mail_path}")
|
|
||||||
messages_dirs = find_all_messages_directories(mail_path)
|
|
||||||
# messages_dirs = find_all_messages_directories(DEFAULT_MAIL_PATH)
|
|
||||||
# messages_dirs = [DEFAULT_MAIL_PATH]
|
|
||||||
# messages_dirs = messages_dirs[:1]
|
|
||||||
|
|
||||||
print('len(messages_dirs): ', len(messages_dirs))
|
|
||||||
|
|
||||||
|
|
||||||
if not messages_dirs:
|
|
||||||
print("No Messages directories found. Exiting.")
|
|
||||||
return
|
|
||||||
|
|
||||||
INDEX_DIR = Path(args.index_dir)
|
|
||||||
INDEX_PATH = str(INDEX_DIR / "mail_documents.leann")
|
|
||||||
print(f"Index directory: {INDEX_DIR}")
|
|
||||||
print(f"Found {len(messages_dirs)} Messages directories.")
|
|
||||||
|
|
||||||
# Create or load the LEANN index from all sources
|
|
||||||
index_path = create_leann_index_from_multiple_sources(messages_dirs, INDEX_PATH, args.max_emails, args.include_html, args.embedding_model)
|
|
||||||
|
|
||||||
if index_path:
|
|
||||||
if args.query:
|
|
||||||
# Run single query
|
|
||||||
await query_leann_index(index_path, args.query)
|
|
||||||
else:
|
|
||||||
# Example queries
|
|
||||||
queries = [
|
|
||||||
"Hows Berkeley Graduate Student Instructor",
|
|
||||||
"how's the icloud related advertisement saying",
|
|
||||||
"Whats the number of class recommend to take per semester for incoming EECS students"
|
|
||||||
]
|
|
||||||
for query in queries:
|
|
||||||
print("\n" + "="*60)
|
|
||||||
await query_leann_index(index_path, query)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
||||||
@@ -1,108 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
import argparse
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import List, Any
|
|
||||||
|
|
||||||
# Add the project root to Python path so we can import from examples
|
|
||||||
project_root = Path(__file__).parent.parent
|
|
||||||
sys.path.insert(0, str(project_root))
|
|
||||||
|
|
||||||
from llama_index.core import VectorStoreIndex, StorageContext
|
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
|
||||||
|
|
||||||
# --- EMBEDDING MODEL ---
|
|
||||||
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
|
||||||
import torch
|
|
||||||
|
|
||||||
# --- END EMBEDDING MODEL ---
|
|
||||||
|
|
||||||
# Import EmlxReader from the new module
|
|
||||||
from examples.email_data.LEANN_email_reader import EmlxReader
|
|
||||||
|
|
||||||
def create_and_save_index(mail_path: str, save_dir: str = "mail_index_embedded", max_count: int = 1000, include_html: bool = False):
|
|
||||||
print("Creating index from mail data with embedded metadata...")
|
|
||||||
documents = EmlxReader(include_html=include_html).load_data(mail_path, max_count=max_count)
|
|
||||||
if not documents:
|
|
||||||
print("No documents loaded. Exiting.")
|
|
||||||
return None
|
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=25)
|
|
||||||
# Use facebook/contriever as the embedder
|
|
||||||
embed_model = HuggingFaceEmbedding(model_name="facebook/contriever")
|
|
||||||
# set on device
|
|
||||||
import torch
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
embed_model._model.to("cuda")
|
|
||||||
# set mps
|
|
||||||
elif torch.backends.mps.is_available():
|
|
||||||
embed_model._model.to("mps")
|
|
||||||
else:
|
|
||||||
embed_model._model.to("cpu")
|
|
||||||
index = VectorStoreIndex.from_documents(
|
|
||||||
documents,
|
|
||||||
transformations=[text_splitter],
|
|
||||||
embed_model=embed_model
|
|
||||||
)
|
|
||||||
os.makedirs(save_dir, exist_ok=True)
|
|
||||||
index.storage_context.persist(persist_dir=save_dir)
|
|
||||||
print(f"Index saved to {save_dir}")
|
|
||||||
return index
|
|
||||||
|
|
||||||
def load_index(save_dir: str = "mail_index_embedded"):
|
|
||||||
try:
|
|
||||||
storage_context = StorageContext.from_defaults(persist_dir=save_dir)
|
|
||||||
index = VectorStoreIndex.from_vector_store(
|
|
||||||
storage_context.vector_store,
|
|
||||||
storage_context=storage_context
|
|
||||||
)
|
|
||||||
print(f"Index loaded from {save_dir}")
|
|
||||||
return index
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error loading index: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
def query_index(index, query: str):
|
|
||||||
if index is None:
|
|
||||||
print("No index available for querying.")
|
|
||||||
return
|
|
||||||
query_engine = index.as_query_engine()
|
|
||||||
response = query_engine.query(query)
|
|
||||||
print(f"Query: {query}")
|
|
||||||
print(f"Response: {response}")
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# Parse command line arguments
|
|
||||||
parser = argparse.ArgumentParser(description='LlamaIndex Mail Reader - Create and query email index')
|
|
||||||
parser.add_argument('--mail-path', type=str,
|
|
||||||
default="/Users/yichuan/Library/Mail/V10/0FCA0879-FD8C-4B7E-83BF-FDDA930791C5/[Gmail].mbox/All Mail.mbox/78BA5BE1-8819-4F9A-9613-EB63772F1DD0/Data/9/Messages",
|
|
||||||
help='Path to mail data directory')
|
|
||||||
parser.add_argument('--save-dir', type=str, default="mail_index_embedded",
|
|
||||||
help='Directory to store the index (default: mail_index_embedded)')
|
|
||||||
parser.add_argument('--max-emails', type=int, default=10000,
|
|
||||||
help='Maximum number of emails to process')
|
|
||||||
parser.add_argument('--include-html', action='store_true', default=False,
|
|
||||||
help='Include HTML content in email processing (default: False)')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
mail_path = args.mail_path
|
|
||||||
save_dir = args.save_dir
|
|
||||||
|
|
||||||
if os.path.exists(save_dir) and os.path.exists(os.path.join(save_dir, "vector_store.json")):
|
|
||||||
print("Loading existing index...")
|
|
||||||
index = load_index(save_dir)
|
|
||||||
else:
|
|
||||||
print("Creating new index...")
|
|
||||||
index = create_and_save_index(mail_path, save_dir, max_count=args.max_emails, include_html=args.include_html)
|
|
||||||
if index:
|
|
||||||
queries = [
|
|
||||||
"Hows Berkeley Graduate Student Instructor",
|
|
||||||
"how's the icloud related advertisement saying",
|
|
||||||
"Whats the number of class recommend to take per semester for incoming EECS students"
|
|
||||||
]
|
|
||||||
for query in queries:
|
|
||||||
print("\n" + "="*50)
|
|
||||||
query_index(index, query)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,119 +0,0 @@
|
|||||||
import argparse
|
|
||||||
from llama_index.core import SimpleDirectoryReader
|
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
|
||||||
import asyncio
|
|
||||||
import dotenv
|
|
||||||
from leann.api import LeannBuilder, LeannChat
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
dotenv.load_dotenv()
|
|
||||||
|
|
||||||
|
|
||||||
async def main(args):
|
|
||||||
INDEX_DIR = Path(args.index_dir)
|
|
||||||
INDEX_PATH = str(INDEX_DIR / "pdf_documents.leann")
|
|
||||||
|
|
||||||
if not INDEX_DIR.exists():
|
|
||||||
node_parser = SentenceSplitter(
|
|
||||||
chunk_size=256, chunk_overlap=128, separator=" ", paragraph_separator="\n\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
print("Loading documents...")
|
|
||||||
documents = SimpleDirectoryReader(
|
|
||||||
args.data_dir,
|
|
||||||
recursive=True,
|
|
||||||
encoding="utf-8",
|
|
||||||
required_exts=[".pdf", ".txt", ".md"],
|
|
||||||
).load_data(show_progress=True)
|
|
||||||
print("Documents loaded.")
|
|
||||||
all_texts = []
|
|
||||||
for doc in documents:
|
|
||||||
nodes = node_parser.get_nodes_from_documents([doc])
|
|
||||||
for node in nodes:
|
|
||||||
all_texts.append(node.get_content())
|
|
||||||
|
|
||||||
print("--- Index directory not found, building new index ---")
|
|
||||||
|
|
||||||
print("\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Use HNSW backend for better macOS compatibility
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model="facebook/contriever",
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64,
|
|
||||||
is_compact=True,
|
|
||||||
is_recompute=True,
|
|
||||||
num_threads=1, # Force single-threaded mode
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Loaded {len(all_texts)} text chunks from documents.")
|
|
||||||
for chunk_text in all_texts:
|
|
||||||
builder.add_text(chunk_text)
|
|
||||||
|
|
||||||
builder.build_index(INDEX_PATH)
|
|
||||||
print(f"\nLeann index built at {INDEX_PATH}!")
|
|
||||||
else:
|
|
||||||
print(f"--- Using existing index at {INDEX_DIR} ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 2] Starting Leann chat session...")
|
|
||||||
|
|
||||||
llm_config = {"type": "hf", "model": "Qwen/Qwen3-4B"}
|
|
||||||
llm_config = {"type": "ollama", "model": "qwen3:8b"}
|
|
||||||
llm_config = {"type": "openai", "model": "gpt-4o"}
|
|
||||||
|
|
||||||
chat = LeannChat(index_path=INDEX_PATH, llm_config=llm_config)
|
|
||||||
# query = (
|
|
||||||
# "什么是盘古大模型以及盘古开发过程中遇到了什么阴暗面,任务令一般在什么城市颁发"
|
|
||||||
# )
|
|
||||||
query = args.query
|
|
||||||
|
|
||||||
print(f"You: {query}")
|
|
||||||
chat_response = chat.ask(query, top_k=20, recompute_embeddings=True, complexity=32)
|
|
||||||
print(f"Leann chat response: \033[36m{chat_response}\033[0m")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Run Leann Chat with various LLM backends."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--llm",
|
|
||||||
type=str,
|
|
||||||
default="hf",
|
|
||||||
choices=["simulated", "ollama", "hf", "openai"],
|
|
||||||
help="The LLM backend to use.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--model",
|
|
||||||
type=str,
|
|
||||||
default="Qwen/Qwen3-0.6B",
|
|
||||||
help="The model name to use (e.g., 'llama3:8b' for ollama, 'deepseek-ai/deepseek-llm-7b-chat' for hf, 'gpt-4o' for openai).",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--host",
|
|
||||||
type=str,
|
|
||||||
default="http://localhost:11434",
|
|
||||||
help="The host for the Ollama API.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--index-dir",
|
|
||||||
type=str,
|
|
||||||
default="./test_doc_files",
|
|
||||||
help="Directory where the Leann index will be stored.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--data-dir",
|
|
||||||
type=str,
|
|
||||||
default="examples/data",
|
|
||||||
help="Directory containing documents to index (PDF, TXT, MD files).",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--query",
|
|
||||||
type=str,
|
|
||||||
default="Based on the paper, what are the main techniques LEANN explores to reduce the storage overhead and DLPM explore to achieve Fairness and Efiiciency trade-off?",
|
|
||||||
help="The query to ask the Leann chat system.",
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
asyncio.run(main(args))
|
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
from leann.api import LeannBuilder, LeannSearcher, LeannChat
|
|
||||||
|
from leann.api import LeannBuilder, LeannChat
|
||||||
|
|
||||||
# Define the path for our new MLX-based index
|
# Define the path for our new MLX-based index
|
||||||
INDEX_PATH = "./mlx_diskann_index/leann"
|
INDEX_PATH = "./mlx_diskann_index/leann"
|
||||||
@@ -38,7 +39,5 @@ chat = LeannChat(index_path=INDEX_PATH)
|
|||||||
# add query
|
# add query
|
||||||
query = "MLX is an array framework for machine learning on Apple silicon."
|
query = "MLX is an array framework for machine learning on Apple silicon."
|
||||||
print(f"Query: {query}")
|
print(f"Query: {query}")
|
||||||
response = chat.ask(
|
response = chat.ask(query, top_k=3, recompute_beighbor_embeddings=True, complexity=3, beam_width=1)
|
||||||
query, top_k=3, recompute_beighbor_embeddings=True, complexity=3, beam_width=1
|
|
||||||
)
|
|
||||||
print(f"Response: {response}")
|
print(f"Response: {response}")
|
||||||
@@ -1,319 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
Multi-Vector Aggregator for Fat Embeddings
|
|
||||||
==========================================
|
|
||||||
|
|
||||||
This module implements aggregation strategies for multi-vector embeddings,
|
|
||||||
similar to ColPali's approach where multiple patch vectors represent a single document.
|
|
||||||
|
|
||||||
Key features:
|
|
||||||
- MaxSim aggregation (take maximum similarity across patches)
|
|
||||||
- Voting-based aggregation (count patch matches)
|
|
||||||
- Weighted aggregation (attention-score weighted)
|
|
||||||
- Spatial clustering of matching patches
|
|
||||||
- Document-level result consolidation
|
|
||||||
"""
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
from typing import List, Dict, Any, Tuple, Optional
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from collections import defaultdict
|
|
||||||
import json
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class PatchResult:
|
|
||||||
"""Represents a single patch search result."""
|
|
||||||
patch_id: int
|
|
||||||
image_name: str
|
|
||||||
image_path: str
|
|
||||||
coordinates: Tuple[int, int, int, int] # (x1, y1, x2, y2)
|
|
||||||
score: float
|
|
||||||
attention_score: float
|
|
||||||
scale: float
|
|
||||||
metadata: Dict[str, Any]
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AggregatedResult:
|
|
||||||
"""Represents an aggregated document-level result."""
|
|
||||||
image_name: str
|
|
||||||
image_path: str
|
|
||||||
doc_score: float
|
|
||||||
patch_count: int
|
|
||||||
best_patch: PatchResult
|
|
||||||
all_patches: List[PatchResult]
|
|
||||||
aggregation_method: str
|
|
||||||
spatial_clusters: Optional[List[List[PatchResult]]] = None
|
|
||||||
|
|
||||||
class MultiVectorAggregator:
|
|
||||||
"""
|
|
||||||
Aggregates multiple patch-level results into document-level results.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self,
|
|
||||||
aggregation_method: str = "maxsim",
|
|
||||||
spatial_clustering: bool = True,
|
|
||||||
cluster_distance_threshold: float = 100.0):
|
|
||||||
"""
|
|
||||||
Initialize the aggregator.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
aggregation_method: "maxsim", "voting", "weighted", or "mean"
|
|
||||||
spatial_clustering: Whether to cluster spatially close patches
|
|
||||||
cluster_distance_threshold: Distance threshold for spatial clustering
|
|
||||||
"""
|
|
||||||
self.aggregation_method = aggregation_method
|
|
||||||
self.spatial_clustering = spatial_clustering
|
|
||||||
self.cluster_distance_threshold = cluster_distance_threshold
|
|
||||||
|
|
||||||
def aggregate_results(self,
|
|
||||||
search_results: List[Dict[str, Any]],
|
|
||||||
top_k: int = 10) -> List[AggregatedResult]:
|
|
||||||
"""
|
|
||||||
Aggregate patch-level search results into document-level results.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
search_results: List of search results from LeannSearcher
|
|
||||||
top_k: Number of top documents to return
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of aggregated document results
|
|
||||||
"""
|
|
||||||
# Group results by image
|
|
||||||
image_groups = defaultdict(list)
|
|
||||||
|
|
||||||
for result in search_results:
|
|
||||||
metadata = result.metadata
|
|
||||||
if "image_name" in metadata and "patch_id" in metadata:
|
|
||||||
patch_result = PatchResult(
|
|
||||||
patch_id=metadata["patch_id"],
|
|
||||||
image_name=metadata["image_name"],
|
|
||||||
image_path=metadata["image_path"],
|
|
||||||
coordinates=tuple(metadata["coordinates"]),
|
|
||||||
score=result.score,
|
|
||||||
attention_score=metadata.get("attention_score", 0.0),
|
|
||||||
scale=metadata.get("scale", 1.0),
|
|
||||||
metadata=metadata
|
|
||||||
)
|
|
||||||
image_groups[metadata["image_name"]].append(patch_result)
|
|
||||||
|
|
||||||
# Aggregate each image group
|
|
||||||
aggregated_results = []
|
|
||||||
for image_name, patches in image_groups.items():
|
|
||||||
if len(patches) == 0:
|
|
||||||
continue
|
|
||||||
|
|
||||||
agg_result = self._aggregate_image_patches(image_name, patches)
|
|
||||||
aggregated_results.append(agg_result)
|
|
||||||
|
|
||||||
# Sort by aggregated score and return top-k
|
|
||||||
aggregated_results.sort(key=lambda x: x.doc_score, reverse=True)
|
|
||||||
return aggregated_results[:top_k]
|
|
||||||
|
|
||||||
def _aggregate_image_patches(self, image_name: str, patches: List[PatchResult]) -> AggregatedResult:
|
|
||||||
"""Aggregate patches for a single image."""
|
|
||||||
|
|
||||||
if self.aggregation_method == "maxsim":
|
|
||||||
doc_score = max(patch.score for patch in patches)
|
|
||||||
best_patch = max(patches, key=lambda p: p.score)
|
|
||||||
|
|
||||||
elif self.aggregation_method == "voting":
|
|
||||||
# Count patches above threshold
|
|
||||||
threshold = np.percentile([p.score for p in patches], 75)
|
|
||||||
doc_score = sum(1 for patch in patches if patch.score >= threshold)
|
|
||||||
best_patch = max(patches, key=lambda p: p.score)
|
|
||||||
|
|
||||||
elif self.aggregation_method == "weighted":
|
|
||||||
# Weight by attention scores
|
|
||||||
total_weighted_score = sum(p.score * p.attention_score for p in patches)
|
|
||||||
total_weights = sum(p.attention_score for p in patches)
|
|
||||||
doc_score = total_weighted_score / max(total_weights, 1e-8)
|
|
||||||
best_patch = max(patches, key=lambda p: p.score * p.attention_score)
|
|
||||||
|
|
||||||
elif self.aggregation_method == "mean":
|
|
||||||
doc_score = np.mean([patch.score for patch in patches])
|
|
||||||
best_patch = max(patches, key=lambda p: p.score)
|
|
||||||
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Unknown aggregation method: {self.aggregation_method}")
|
|
||||||
|
|
||||||
# Spatial clustering if enabled
|
|
||||||
spatial_clusters = None
|
|
||||||
if self.spatial_clustering:
|
|
||||||
spatial_clusters = self._cluster_patches_spatially(patches)
|
|
||||||
|
|
||||||
return AggregatedResult(
|
|
||||||
image_name=image_name,
|
|
||||||
image_path=patches[0].image_path,
|
|
||||||
doc_score=float(doc_score),
|
|
||||||
patch_count=len(patches),
|
|
||||||
best_patch=best_patch,
|
|
||||||
all_patches=sorted(patches, key=lambda p: p.score, reverse=True),
|
|
||||||
aggregation_method=self.aggregation_method,
|
|
||||||
spatial_clusters=spatial_clusters
|
|
||||||
)
|
|
||||||
|
|
||||||
def _cluster_patches_spatially(self, patches: List[PatchResult]) -> List[List[PatchResult]]:
|
|
||||||
"""Cluster patches that are spatially close to each other."""
|
|
||||||
if len(patches) <= 1:
|
|
||||||
return [patches]
|
|
||||||
|
|
||||||
clusters = []
|
|
||||||
remaining_patches = patches.copy()
|
|
||||||
|
|
||||||
while remaining_patches:
|
|
||||||
# Start new cluster with highest scoring remaining patch
|
|
||||||
seed_patch = max(remaining_patches, key=lambda p: p.score)
|
|
||||||
current_cluster = [seed_patch]
|
|
||||||
remaining_patches.remove(seed_patch)
|
|
||||||
|
|
||||||
# Add nearby patches to cluster
|
|
||||||
added_to_cluster = True
|
|
||||||
while added_to_cluster:
|
|
||||||
added_to_cluster = False
|
|
||||||
for patch in remaining_patches.copy():
|
|
||||||
if self._is_patch_nearby(patch, current_cluster):
|
|
||||||
current_cluster.append(patch)
|
|
||||||
remaining_patches.remove(patch)
|
|
||||||
added_to_cluster = True
|
|
||||||
|
|
||||||
clusters.append(current_cluster)
|
|
||||||
|
|
||||||
return sorted(clusters, key=lambda cluster: max(p.score for p in cluster), reverse=True)
|
|
||||||
|
|
||||||
def _is_patch_nearby(self, patch: PatchResult, cluster: List[PatchResult]) -> bool:
|
|
||||||
"""Check if a patch is spatially close to any patch in the cluster."""
|
|
||||||
patch_center = self._get_patch_center(patch.coordinates)
|
|
||||||
|
|
||||||
for cluster_patch in cluster:
|
|
||||||
cluster_center = self._get_patch_center(cluster_patch.coordinates)
|
|
||||||
distance = np.sqrt((patch_center[0] - cluster_center[0])**2 +
|
|
||||||
(patch_center[1] - cluster_center[1])**2)
|
|
||||||
|
|
||||||
if distance <= self.cluster_distance_threshold:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _get_patch_center(self, coordinates: Tuple[int, int, int, int]) -> Tuple[float, float]:
|
|
||||||
"""Get center point of a patch."""
|
|
||||||
x1, y1, x2, y2 = coordinates
|
|
||||||
return ((x1 + x2) / 2, (y1 + y2) / 2)
|
|
||||||
|
|
||||||
def print_aggregated_results(self, results: List[AggregatedResult], max_patches_per_doc: int = 3):
|
|
||||||
"""Pretty print aggregated results."""
|
|
||||||
print(f"\n🔍 Aggregated Results (method: {self.aggregation_method})")
|
|
||||||
print("=" * 80)
|
|
||||||
|
|
||||||
for i, result in enumerate(results):
|
|
||||||
print(f"\n{i+1}. {result.image_name}")
|
|
||||||
print(f" Doc Score: {result.doc_score:.4f} | Patches: {result.patch_count}")
|
|
||||||
print(f" Path: {result.image_path}")
|
|
||||||
|
|
||||||
# Show best patch
|
|
||||||
best = result.best_patch
|
|
||||||
print(f" 🌟 Best Patch: #{best.patch_id} at {best.coordinates} (score: {best.score:.4f})")
|
|
||||||
|
|
||||||
# Show top patches
|
|
||||||
print(f" 📍 Top Patches:")
|
|
||||||
for j, patch in enumerate(result.all_patches[:max_patches_per_doc]):
|
|
||||||
print(f" {j+1}. Patch #{patch.patch_id}: {patch.score:.4f} at {patch.coordinates}")
|
|
||||||
|
|
||||||
# Show spatial clusters if available
|
|
||||||
if result.spatial_clusters and len(result.spatial_clusters) > 1:
|
|
||||||
print(f" 🗂️ Spatial Clusters: {len(result.spatial_clusters)}")
|
|
||||||
for j, cluster in enumerate(result.spatial_clusters[:2]): # Show top 2 clusters
|
|
||||||
cluster_score = max(p.score for p in cluster)
|
|
||||||
print(f" Cluster {j+1}: {len(cluster)} patches (best: {cluster_score:.4f})")
|
|
||||||
|
|
||||||
def demo_aggregation():
|
|
||||||
"""Demonstrate the multi-vector aggregation functionality."""
|
|
||||||
print("=== Multi-Vector Aggregation Demo ===")
|
|
||||||
|
|
||||||
# Simulate some patch-level search results
|
|
||||||
# In real usage, these would come from LeannSearcher.search()
|
|
||||||
|
|
||||||
class MockResult:
|
|
||||||
def __init__(self, score, metadata):
|
|
||||||
self.score = score
|
|
||||||
self.metadata = metadata
|
|
||||||
|
|
||||||
# Simulate results for 2 images with multiple patches each
|
|
||||||
mock_results = [
|
|
||||||
# Image 1: cats_and_kitchen.jpg - 4 patches
|
|
||||||
MockResult(0.85, {
|
|
||||||
"image_name": "cats_and_kitchen.jpg",
|
|
||||||
"image_path": "/path/to/cats_and_kitchen.jpg",
|
|
||||||
"patch_id": 3,
|
|
||||||
"coordinates": [100, 50, 224, 174], # Kitchen area
|
|
||||||
"attention_score": 0.92,
|
|
||||||
"scale": 1.0
|
|
||||||
}),
|
|
||||||
MockResult(0.78, {
|
|
||||||
"image_name": "cats_and_kitchen.jpg",
|
|
||||||
"image_path": "/path/to/cats_and_kitchen.jpg",
|
|
||||||
"patch_id": 7,
|
|
||||||
"coordinates": [200, 300, 324, 424], # Cat area
|
|
||||||
"attention_score": 0.88,
|
|
||||||
"scale": 1.0
|
|
||||||
}),
|
|
||||||
MockResult(0.72, {
|
|
||||||
"image_name": "cats_and_kitchen.jpg",
|
|
||||||
"image_path": "/path/to/cats_and_kitchen.jpg",
|
|
||||||
"patch_id": 12,
|
|
||||||
"coordinates": [150, 100, 274, 224], # Appliances
|
|
||||||
"attention_score": 0.75,
|
|
||||||
"scale": 1.0
|
|
||||||
}),
|
|
||||||
MockResult(0.65, {
|
|
||||||
"image_name": "cats_and_kitchen.jpg",
|
|
||||||
"image_path": "/path/to/cats_and_kitchen.jpg",
|
|
||||||
"patch_id": 15,
|
|
||||||
"coordinates": [50, 250, 174, 374], # Furniture
|
|
||||||
"attention_score": 0.70,
|
|
||||||
"scale": 1.0
|
|
||||||
}),
|
|
||||||
|
|
||||||
# Image 2: city_street.jpg - 3 patches
|
|
||||||
MockResult(0.68, {
|
|
||||||
"image_name": "city_street.jpg",
|
|
||||||
"image_path": "/path/to/city_street.jpg",
|
|
||||||
"patch_id": 2,
|
|
||||||
"coordinates": [300, 100, 424, 224], # Buildings
|
|
||||||
"attention_score": 0.80,
|
|
||||||
"scale": 1.0
|
|
||||||
}),
|
|
||||||
MockResult(0.62, {
|
|
||||||
"image_name": "city_street.jpg",
|
|
||||||
"image_path": "/path/to/city_street.jpg",
|
|
||||||
"patch_id": 8,
|
|
||||||
"coordinates": [100, 350, 224, 474], # Street level
|
|
||||||
"attention_score": 0.75,
|
|
||||||
"scale": 1.0
|
|
||||||
}),
|
|
||||||
MockResult(0.55, {
|
|
||||||
"image_name": "city_street.jpg",
|
|
||||||
"image_path": "/path/to/city_street.jpg",
|
|
||||||
"patch_id": 11,
|
|
||||||
"coordinates": [400, 200, 524, 324], # Sky area
|
|
||||||
"attention_score": 0.60,
|
|
||||||
"scale": 1.0
|
|
||||||
}),
|
|
||||||
]
|
|
||||||
|
|
||||||
# Test different aggregation methods
|
|
||||||
methods = ["maxsim", "voting", "weighted", "mean"]
|
|
||||||
|
|
||||||
for method in methods:
|
|
||||||
print(f"\n{'='*20} {method.upper()} AGGREGATION {'='*20}")
|
|
||||||
|
|
||||||
aggregator = MultiVectorAggregator(
|
|
||||||
aggregation_method=method,
|
|
||||||
spatial_clustering=True,
|
|
||||||
cluster_distance_threshold=100.0
|
|
||||||
)
|
|
||||||
|
|
||||||
aggregated = aggregator.aggregate_results(mock_results, top_k=5)
|
|
||||||
aggregator.print_aggregated_results(aggregated)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
demo_aggregation()
|
|
||||||
@@ -1,108 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
OpenAI Embedding Example
|
|
||||||
|
|
||||||
Complete example showing how to build and search with OpenAI embeddings using HNSW backend.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import dotenv
|
|
||||||
from pathlib import Path
|
|
||||||
from leann.api import LeannBuilder, LeannSearcher
|
|
||||||
|
|
||||||
# Load environment variables
|
|
||||||
dotenv.load_dotenv()
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# Check if OpenAI API key is available
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
if not api_key:
|
|
||||||
print("ERROR: OPENAI_API_KEY environment variable not set")
|
|
||||||
return False
|
|
||||||
|
|
||||||
print(f"✅ OpenAI API key found: {api_key[:10]}...")
|
|
||||||
|
|
||||||
# Sample texts
|
|
||||||
sample_texts = [
|
|
||||||
"Machine learning is a powerful technology that enables computers to learn from data.",
|
|
||||||
"Natural language processing helps computers understand and generate human language.",
|
|
||||||
"Deep learning uses neural networks with multiple layers to solve complex problems.",
|
|
||||||
"Computer vision allows machines to interpret and understand visual information.",
|
|
||||||
"Reinforcement learning trains agents to make decisions through trial and error.",
|
|
||||||
"Data science combines statistics, math, and programming to extract insights from data.",
|
|
||||||
"Artificial intelligence aims to create machines that can perform human-like tasks.",
|
|
||||||
"Python is a popular programming language used extensively in data science and AI.",
|
|
||||||
"Neural networks are inspired by the structure and function of the human brain.",
|
|
||||||
"Big data refers to extremely large datasets that require special tools to process."
|
|
||||||
]
|
|
||||||
|
|
||||||
INDEX_DIR = Path("./simple_openai_test_index")
|
|
||||||
INDEX_PATH = str(INDEX_DIR / "simple_test.leann")
|
|
||||||
|
|
||||||
print(f"\n=== Building Index with OpenAI Embeddings ===")
|
|
||||||
print(f"Index path: {INDEX_PATH}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Use proper configuration for OpenAI embeddings
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model="text-embedding-3-small",
|
|
||||||
embedding_mode="openai",
|
|
||||||
# HNSW settings for OpenAI embeddings
|
|
||||||
M=16, # Smaller graph degree
|
|
||||||
efConstruction=64, # Smaller construction complexity
|
|
||||||
is_compact=True, # Enable compact storage for recompute
|
|
||||||
is_recompute=True, # MUST enable for OpenAI embeddings
|
|
||||||
num_threads=1,
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Adding {len(sample_texts)} texts to the index...")
|
|
||||||
for i, text in enumerate(sample_texts):
|
|
||||||
metadata = {"id": f"doc_{i}", "topic": "AI"}
|
|
||||||
builder.add_text(text, metadata)
|
|
||||||
|
|
||||||
print("Building index...")
|
|
||||||
builder.build_index(INDEX_PATH)
|
|
||||||
print(f"✅ Index built successfully!")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"❌ Error building index: {e}")
|
|
||||||
import traceback
|
|
||||||
traceback.print_exc()
|
|
||||||
return False
|
|
||||||
|
|
||||||
print(f"\n=== Testing Search ===")
|
|
||||||
|
|
||||||
try:
|
|
||||||
searcher = LeannSearcher(INDEX_PATH)
|
|
||||||
|
|
||||||
test_queries = [
|
|
||||||
"What is machine learning?",
|
|
||||||
"How do neural networks work?",
|
|
||||||
"Programming languages for data science"
|
|
||||||
]
|
|
||||||
|
|
||||||
for query in test_queries:
|
|
||||||
print(f"\n🔍 Query: '{query}'")
|
|
||||||
results = searcher.search(query, top_k=3)
|
|
||||||
|
|
||||||
print(f" Found {len(results)} results:")
|
|
||||||
for i, result in enumerate(results):
|
|
||||||
print(f" {i+1}. Score: {result.score:.4f}")
|
|
||||||
print(f" Text: {result.text[:80]}...")
|
|
||||||
|
|
||||||
print(f"\n✅ Search test completed successfully!")
|
|
||||||
return True
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"❌ Error during search: {e}")
|
|
||||||
import traceback
|
|
||||||
traceback.print_exc()
|
|
||||||
return False
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
success = main()
|
|
||||||
if success:
|
|
||||||
print(f"\n🎉 Simple OpenAI index test completed successfully!")
|
|
||||||
else:
|
|
||||||
print(f"\n💥 Simple OpenAI index test failed!")
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
from leann.api import LeannChat
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
INDEX_DIR = Path("./test_pdf_index_huawei")
|
|
||||||
INDEX_PATH = str(INDEX_DIR / "pdf_documents.leann")
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
print(f"\n[PHASE 2] Starting Leann chat session...")
|
|
||||||
chat = LeannChat(index_path=INDEX_PATH)
|
|
||||||
query = "What is the main idea of RL and give me 5 exapmle of classic RL algorithms?"
|
|
||||||
query = "Based on the paper, what are the main techniques LEANN explores to reduce the storage overhead and DLPM explore to achieve Fairness and Efiiciency trade-off?"
|
|
||||||
# query = "什么是盘古大模型以及盘古开发过程中遇到了什么阴暗面,任务令一般在什么城市颁发"
|
|
||||||
response = chat.ask(query,top_k=20,recompute_beighbor_embeddings=True,complexity=32,beam_width=1)
|
|
||||||
print(f"\n[PHASE 2] Response: {response}")
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
||||||
250
examples/spoiler_free_book_rag.py
Normal file
250
examples/spoiler_free_book_rag.py
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Spoiler-Free Book RAG Example using LEANN Metadata Filtering
|
||||||
|
|
||||||
|
This example demonstrates how to use LEANN's metadata filtering to create
|
||||||
|
a spoiler-free book RAG system where users can search for information
|
||||||
|
up to a specific chapter they've read.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python spoiler_free_book_rag.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
# Add LEANN to path (adjust path as needed)
|
||||||
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../packages/leann-core/src"))
|
||||||
|
|
||||||
|
from leann.api import LeannBuilder, LeannSearcher
|
||||||
|
|
||||||
|
|
||||||
|
def chunk_book_with_metadata(book_title: str = "Sample Book") -> list[dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Create sample book chunks with metadata for demonstration.
|
||||||
|
|
||||||
|
In a real implementation, this would parse actual book files (epub, txt, etc.)
|
||||||
|
and extract chapter boundaries, character mentions, etc.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
book_title: Title of the book
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of chunk dictionaries with text and metadata
|
||||||
|
"""
|
||||||
|
# Sample book chunks with metadata
|
||||||
|
# In practice, you'd use proper text processing libraries
|
||||||
|
|
||||||
|
sample_chunks = [
|
||||||
|
{
|
||||||
|
"text": "Alice was beginning to get very tired of sitting by her sister on the bank, and of having nothing to do.",
|
||||||
|
"metadata": {
|
||||||
|
"book": book_title,
|
||||||
|
"chapter": 1,
|
||||||
|
"page": 1,
|
||||||
|
"characters": ["Alice", "Sister"],
|
||||||
|
"themes": ["boredom", "curiosity"],
|
||||||
|
"location": "riverbank",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "So she was considering in her own mind (as well as she could, for the hot day made her feel very sleepy and stupid), whether the pleasure of making a daisy-chain would be worth the trouble of getting up and picking the daisies, when suddenly a White Rabbit with pink eyes ran close by her.",
|
||||||
|
"metadata": {
|
||||||
|
"book": book_title,
|
||||||
|
"chapter": 1,
|
||||||
|
"page": 2,
|
||||||
|
"characters": ["Alice", "White Rabbit"],
|
||||||
|
"themes": ["decision", "surprise", "magic"],
|
||||||
|
"location": "riverbank",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "Alice found herself falling down a very deep well. Either the well was very deep, or she fell very slowly, for she had plenty of time as she fell to look about her and to wonder what was going to happen next.",
|
||||||
|
"metadata": {
|
||||||
|
"book": book_title,
|
||||||
|
"chapter": 2,
|
||||||
|
"page": 15,
|
||||||
|
"characters": ["Alice"],
|
||||||
|
"themes": ["falling", "wonder", "transformation"],
|
||||||
|
"location": "rabbit hole",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "Alice meets the Cheshire Cat, who tells her that everyone in Wonderland is mad, including Alice herself.",
|
||||||
|
"metadata": {
|
||||||
|
"book": book_title,
|
||||||
|
"chapter": 6,
|
||||||
|
"page": 85,
|
||||||
|
"characters": ["Alice", "Cheshire Cat"],
|
||||||
|
"themes": ["madness", "philosophy", "identity"],
|
||||||
|
"location": "Duchess's house",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "At the Queen's croquet ground, Alice witnesses the absurd trial that reveals the arbitrary nature of Wonderland's justice system.",
|
||||||
|
"metadata": {
|
||||||
|
"book": book_title,
|
||||||
|
"chapter": 8,
|
||||||
|
"page": 120,
|
||||||
|
"characters": ["Alice", "Queen of Hearts", "King of Hearts"],
|
||||||
|
"themes": ["justice", "absurdity", "authority"],
|
||||||
|
"location": "Queen's court",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"text": "Alice realizes that Wonderland was all a dream, even the Rabbit, as she wakes up on the riverbank next to her sister.",
|
||||||
|
"metadata": {
|
||||||
|
"book": book_title,
|
||||||
|
"chapter": 12,
|
||||||
|
"page": 180,
|
||||||
|
"characters": ["Alice", "Sister", "Rabbit"],
|
||||||
|
"themes": ["revelation", "reality", "growth"],
|
||||||
|
"location": "riverbank",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
return sample_chunks
|
||||||
|
|
||||||
|
|
||||||
|
def build_spoiler_free_index(book_chunks: list[dict[str, Any]], index_name: str) -> str:
|
||||||
|
"""
|
||||||
|
Build a LEANN index with book chunks that include spoiler metadata.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
book_chunks: List of book chunks with metadata
|
||||||
|
index_name: Name for the index
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path to the built index
|
||||||
|
"""
|
||||||
|
print(f"📚 Building spoiler-free book index: {index_name}")
|
||||||
|
|
||||||
|
# Initialize LEANN builder
|
||||||
|
builder = LeannBuilder(
|
||||||
|
backend_name="hnsw", embedding_model="text-embedding-3-small", embedding_mode="openai"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add each chunk with its metadata
|
||||||
|
for chunk in book_chunks:
|
||||||
|
builder.add_text(text=chunk["text"], metadata=chunk["metadata"])
|
||||||
|
|
||||||
|
# Build the index
|
||||||
|
index_path = f"{index_name}_book_index"
|
||||||
|
builder.build_index(index_path)
|
||||||
|
|
||||||
|
print(f"✅ Index built successfully: {index_path}")
|
||||||
|
return index_path
|
||||||
|
|
||||||
|
|
||||||
|
def spoiler_free_search(
|
||||||
|
index_path: str,
|
||||||
|
query: str,
|
||||||
|
max_chapter: int,
|
||||||
|
character_filter: Optional[list[str]] = None,
|
||||||
|
) -> list[dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Perform a spoiler-free search on the book index.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
index_path: Path to the LEANN index
|
||||||
|
query: Search query
|
||||||
|
max_chapter: Maximum chapter number to include
|
||||||
|
character_filter: Optional list of characters to focus on
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of search results safe for the reader
|
||||||
|
"""
|
||||||
|
print(f"🔍 Searching: '{query}' (up to chapter {max_chapter})")
|
||||||
|
|
||||||
|
searcher = LeannSearcher(index_path)
|
||||||
|
|
||||||
|
metadata_filters = {"chapter": {"<=": max_chapter}}
|
||||||
|
|
||||||
|
if character_filter:
|
||||||
|
metadata_filters["characters"] = {"contains": character_filter[0]}
|
||||||
|
|
||||||
|
results = searcher.search(query=query, top_k=10, metadata_filters=metadata_filters)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def demo_spoiler_free_rag():
|
||||||
|
"""
|
||||||
|
Demonstrate the spoiler-free book RAG system.
|
||||||
|
"""
|
||||||
|
print("🎭 Spoiler-Free Book RAG Demo")
|
||||||
|
print("=" * 40)
|
||||||
|
|
||||||
|
# Step 1: Prepare book data
|
||||||
|
book_title = "Alice's Adventures in Wonderland"
|
||||||
|
book_chunks = chunk_book_with_metadata(book_title)
|
||||||
|
|
||||||
|
print(f"📖 Loaded {len(book_chunks)} chunks from '{book_title}'")
|
||||||
|
|
||||||
|
# Step 2: Build the index (in practice, this would be done once)
|
||||||
|
try:
|
||||||
|
index_path = build_spoiler_free_index(book_chunks, "alice_wonderland")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ Failed to build index (likely missing dependencies): {e}")
|
||||||
|
print(
|
||||||
|
"💡 This demo shows the filtering logic - actual indexing requires LEANN dependencies"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Step 3: Demonstrate various spoiler-free searches
|
||||||
|
search_scenarios = [
|
||||||
|
{
|
||||||
|
"description": "Reader who has only read Chapter 1",
|
||||||
|
"query": "What can you tell me about the rabbit?",
|
||||||
|
"max_chapter": 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Reader who has read up to Chapter 5",
|
||||||
|
"query": "Tell me about Alice's adventures",
|
||||||
|
"max_chapter": 5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Reader who has read most of the book",
|
||||||
|
"query": "What does the Cheshire Cat represent?",
|
||||||
|
"max_chapter": 10,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Reader who has read the whole book",
|
||||||
|
"query": "What can you tell me about the rabbit?",
|
||||||
|
"max_chapter": 12,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
for scenario in search_scenarios:
|
||||||
|
print(f"\n📚 Scenario: {scenario['description']}")
|
||||||
|
print(f" Query: {scenario['query']}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
results = spoiler_free_search(
|
||||||
|
index_path=index_path,
|
||||||
|
query=scenario["query"],
|
||||||
|
max_chapter=scenario["max_chapter"],
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f" 📄 Found {len(results)} results:")
|
||||||
|
for i, result in enumerate(results[:3], 1): # Show top 3
|
||||||
|
chapter = result.metadata.get("chapter", "?")
|
||||||
|
location = result.metadata.get("location", "?")
|
||||||
|
print(f" {i}. Chapter {chapter} ({location}): {result.text[:80]}...")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ❌ Search failed: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
print("📚 LEANN Spoiler-Free Book RAG Example")
|
||||||
|
print("=====================================")
|
||||||
|
|
||||||
|
try:
|
||||||
|
demo_spoiler_free_rag()
|
||||||
|
except ImportError as e:
|
||||||
|
print(f"❌ Cannot run demo due to missing dependencies: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ Error running demo: {e}")
|
||||||
@@ -1,319 +0,0 @@
|
|||||||
import os
|
|
||||||
import asyncio
|
|
||||||
import dotenv
|
|
||||||
import argparse
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import List, Any, Optional
|
|
||||||
from leann.api import LeannBuilder, LeannSearcher, LeannChat
|
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
|
||||||
import requests
|
|
||||||
import time
|
|
||||||
|
|
||||||
dotenv.load_dotenv()
|
|
||||||
|
|
||||||
# Default WeChat export directory
|
|
||||||
DEFAULT_WECHAT_EXPORT_DIR = "./wechat_export_direct"
|
|
||||||
|
|
||||||
|
|
||||||
def create_leann_index_from_multiple_wechat_exports(
|
|
||||||
export_dirs: List[Path],
|
|
||||||
index_path: str = "wechat_history_index.leann",
|
|
||||||
max_count: int = -1,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Create LEANN index from multiple WeChat export data sources.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
export_dirs: List of Path objects pointing to WeChat export directories
|
|
||||||
index_path: Path to save the LEANN index
|
|
||||||
max_count: Maximum number of chat entries to process per export
|
|
||||||
"""
|
|
||||||
print("Creating LEANN index from multiple WeChat export data sources...")
|
|
||||||
|
|
||||||
# Load documents using WeChatHistoryReader from history_data
|
|
||||||
from history_data.wechat_history import WeChatHistoryReader
|
|
||||||
|
|
||||||
reader = WeChatHistoryReader()
|
|
||||||
|
|
||||||
INDEX_DIR = Path(index_path).parent
|
|
||||||
|
|
||||||
if not INDEX_DIR.exists():
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
all_documents = []
|
|
||||||
total_processed = 0
|
|
||||||
|
|
||||||
# Process each WeChat export directory
|
|
||||||
for i, export_dir in enumerate(export_dirs):
|
|
||||||
print(
|
|
||||||
f"\nProcessing WeChat export {i + 1}/{len(export_dirs)}: {export_dir}"
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
documents = reader.load_data(
|
|
||||||
wechat_export_dir=str(export_dir),
|
|
||||||
max_count=max_count,
|
|
||||||
concatenate_messages=True, # Disable concatenation - one message per document
|
|
||||||
)
|
|
||||||
if documents:
|
|
||||||
print(f"Loaded {len(documents)} chat documents from {export_dir}")
|
|
||||||
all_documents.extend(documents)
|
|
||||||
total_processed += len(documents)
|
|
||||||
|
|
||||||
# Check if we've reached the max count
|
|
||||||
if max_count > 0 and total_processed >= max_count:
|
|
||||||
print(f"Reached max count of {max_count} documents")
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
print(f"No documents loaded from {export_dir}")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing {export_dir}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not all_documents:
|
|
||||||
print("No documents loaded from any source. Exiting.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
print(
|
|
||||||
f"\nTotal loaded {len(all_documents)} chat documents from {len(export_dirs)} exports and starting to split them into chunks"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
|
||||||
text_splitter = SentenceSplitter(chunk_size=192, chunk_overlap=64)
|
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
|
||||||
all_texts = []
|
|
||||||
for doc in all_documents:
|
|
||||||
# Split the document into chunks
|
|
||||||
nodes = text_splitter.get_nodes_from_documents([doc])
|
|
||||||
for node in nodes:
|
|
||||||
text = '[Contact] means the message is from: ' + doc.metadata["contact_name"] + '\n' + node.get_content()
|
|
||||||
all_texts.append(text)
|
|
||||||
|
|
||||||
print(
|
|
||||||
f"Finished splitting {len(all_documents)} documents into {len(all_texts)} text chunks"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create LEANN index directory
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Use HNSW backend for better macOS compatibility
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model="Qwen/Qwen3-Embedding-0.6B",
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64,
|
|
||||||
is_compact=True,
|
|
||||||
is_recompute=True,
|
|
||||||
num_threads=1, # Force single-threaded mode
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Adding {len(all_texts)} chat chunks to index...")
|
|
||||||
for chunk_text in all_texts:
|
|
||||||
builder.add_text(chunk_text)
|
|
||||||
|
|
||||||
builder.build_index(index_path)
|
|
||||||
print(f"\nLEANN index built at {index_path}!")
|
|
||||||
else:
|
|
||||||
print(f"--- Using existing index at {INDEX_DIR} ---")
|
|
||||||
|
|
||||||
return index_path
|
|
||||||
|
|
||||||
|
|
||||||
def create_leann_index(
|
|
||||||
export_dir: str = None,
|
|
||||||
index_path: str = "wechat_history_index.leann",
|
|
||||||
max_count: int = 1000,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Create LEANN index from WeChat chat history data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
export_dir: Path to the WeChat export directory (optional, uses default if None)
|
|
||||||
index_path: Path to save the LEANN index
|
|
||||||
max_count: Maximum number of chat entries to process
|
|
||||||
"""
|
|
||||||
print("Creating LEANN index from WeChat chat history data...")
|
|
||||||
INDEX_DIR = Path(index_path).parent
|
|
||||||
|
|
||||||
if not INDEX_DIR.exists():
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Load documents using WeChatHistoryReader from history_data
|
|
||||||
from history_data.wechat_history import WeChatHistoryReader
|
|
||||||
|
|
||||||
reader = WeChatHistoryReader()
|
|
||||||
|
|
||||||
documents = reader.load_data(
|
|
||||||
wechat_export_dir=export_dir,
|
|
||||||
max_count=max_count,
|
|
||||||
concatenate_messages=False, # Disable concatenation - one message per document
|
|
||||||
)
|
|
||||||
|
|
||||||
if not documents:
|
|
||||||
print("No documents loaded. Exiting.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
print(f"Loaded {len(documents)} chat documents")
|
|
||||||
|
|
||||||
# Create text splitter with 256 chunk size
|
|
||||||
text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=25)
|
|
||||||
|
|
||||||
# Convert Documents to text strings and chunk them
|
|
||||||
all_texts = []
|
|
||||||
for doc in documents:
|
|
||||||
# Split the document into chunks
|
|
||||||
nodes = text_splitter.get_nodes_from_documents([doc])
|
|
||||||
for node in nodes:
|
|
||||||
all_texts.append(node.get_content())
|
|
||||||
|
|
||||||
print(f"Created {len(all_texts)} text chunks from {len(documents)} documents")
|
|
||||||
|
|
||||||
# Create LEANN index directory
|
|
||||||
print(f"--- Index directory not found, building new index ---")
|
|
||||||
INDEX_DIR.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
print(f"--- Building new LEANN index ---")
|
|
||||||
|
|
||||||
print(f"\n[PHASE 1] Building Leann index...")
|
|
||||||
|
|
||||||
# Use HNSW backend for better macOS compatibility
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model="mlx-community/Qwen3-Embedding-0.6B-4bit-DWQ", # MLX-optimized model
|
|
||||||
graph_degree=32,
|
|
||||||
complexity=64,
|
|
||||||
is_compact=True,
|
|
||||||
is_recompute=True,
|
|
||||||
num_threads=1, # Force single-threaded mode
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Adding {len(all_texts)} chat chunks to index...")
|
|
||||||
for chunk_text in all_texts:
|
|
||||||
builder.add_text(chunk_text)
|
|
||||||
|
|
||||||
builder.build_index(index_path)
|
|
||||||
print(f"\nLEANN index built at {index_path}!")
|
|
||||||
else:
|
|
||||||
print(f"--- Using existing index at {INDEX_DIR} ---")
|
|
||||||
|
|
||||||
return index_path
|
|
||||||
|
|
||||||
|
|
||||||
async def query_leann_index(index_path: str, query: str):
|
|
||||||
"""
|
|
||||||
Query the LEANN index.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
index_path: Path to the LEANN index
|
|
||||||
query: The query string
|
|
||||||
"""
|
|
||||||
print(f"\n[PHASE 2] Starting Leann chat session...")
|
|
||||||
chat = LeannChat(index_path=index_path)
|
|
||||||
|
|
||||||
print(f"You: {query}")
|
|
||||||
chat_response = chat.ask(
|
|
||||||
query,
|
|
||||||
top_k=20,
|
|
||||||
recompute_beighbor_embeddings=True,
|
|
||||||
complexity=16,
|
|
||||||
beam_width=1,
|
|
||||||
llm_config={
|
|
||||||
"type": "openai",
|
|
||||||
"model": "gpt-4o",
|
|
||||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
|
||||||
},
|
|
||||||
llm_kwargs={"temperature": 0.0, "max_tokens": 1000},
|
|
||||||
)
|
|
||||||
print(f"Leann chat response: \033[36m{chat_response}\033[0m")
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
"""Main function with integrated WeChat export functionality."""
|
|
||||||
|
|
||||||
# Parse command line arguments
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="LEANN WeChat History Reader - Create and query WeChat chat history index"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--export-dir",
|
|
||||||
type=str,
|
|
||||||
default=DEFAULT_WECHAT_EXPORT_DIR,
|
|
||||||
help=f"Directory to store WeChat exports (default: {DEFAULT_WECHAT_EXPORT_DIR})",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--index-dir",
|
|
||||||
type=str,
|
|
||||||
default="./wechat_history_magic_test_11Debug_new",
|
|
||||||
help="Directory to store the LEANN index (default: ./wechat_history_index_leann_test)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--max-entries",
|
|
||||||
type=int,
|
|
||||||
default=50,
|
|
||||||
help="Maximum number of chat entries to process (default: 5000)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--query",
|
|
||||||
type=str,
|
|
||||||
default=None,
|
|
||||||
help="Single query to run (default: runs example queries)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--force-export",
|
|
||||||
action="store_true",
|
|
||||||
default=False,
|
|
||||||
help="Force re-export of WeChat data even if exports exist",
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
INDEX_DIR = Path(args.index_dir)
|
|
||||||
INDEX_PATH = str(INDEX_DIR / "wechat_history.leann")
|
|
||||||
|
|
||||||
print(f"Using WeChat export directory: {args.export_dir}")
|
|
||||||
print(f"Index directory: {INDEX_DIR}")
|
|
||||||
print(f"Max entries: {args.max_entries}")
|
|
||||||
|
|
||||||
# Initialize WeChat reader with export capabilities
|
|
||||||
from history_data.wechat_history import WeChatHistoryReader
|
|
||||||
|
|
||||||
reader = WeChatHistoryReader()
|
|
||||||
|
|
||||||
# Find existing exports or create new ones using the centralized method
|
|
||||||
export_dirs = reader.find_or_export_wechat_data(args.export_dir)
|
|
||||||
if not export_dirs:
|
|
||||||
print("Failed to find or export WeChat data. Exiting.")
|
|
||||||
return
|
|
||||||
|
|
||||||
# Create or load the LEANN index from all sources
|
|
||||||
index_path = create_leann_index_from_multiple_wechat_exports(
|
|
||||||
export_dirs, INDEX_PATH, max_count=args.max_entries
|
|
||||||
)
|
|
||||||
|
|
||||||
if index_path:
|
|
||||||
if args.query:
|
|
||||||
# Run single query
|
|
||||||
await query_leann_index(index_path, args.query)
|
|
||||||
else:
|
|
||||||
# Example queries
|
|
||||||
queries = [
|
|
||||||
"我想买魔术师约翰逊的球衣,给我一些对应聊天记录?",
|
|
||||||
]
|
|
||||||
|
|
||||||
for query in queries:
|
|
||||||
print("\n" + "=" * 60)
|
|
||||||
await query_leann_index(index_path, query)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
||||||
28
llms.txt
Normal file
28
llms.txt
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# llms.txt — LEANN MCP and Agent Integration
|
||||||
|
product: LEANN
|
||||||
|
homepage: https://github.com/yichuan-w/LEANN
|
||||||
|
contact: https://github.com/yichuan-w/LEANN/issues
|
||||||
|
|
||||||
|
# Installation
|
||||||
|
install: uv tool install leann-core --with leann
|
||||||
|
|
||||||
|
# MCP Server Entry Point
|
||||||
|
mcp.server: leann_mcp
|
||||||
|
mcp.protocol_version: 2024-11-05
|
||||||
|
|
||||||
|
# Tools
|
||||||
|
mcp.tools: leann_list, leann_search
|
||||||
|
|
||||||
|
mcp.tool.leann_list.description: List available LEANN indexes
|
||||||
|
mcp.tool.leann_list.input: {}
|
||||||
|
|
||||||
|
mcp.tool.leann_search.description: Semantic search across a named LEANN index
|
||||||
|
mcp.tool.leann_search.input.index_name: string, required
|
||||||
|
mcp.tool.leann_search.input.query: string, required
|
||||||
|
mcp.tool.leann_search.input.top_k: integer, optional, default=5, min=1, max=20
|
||||||
|
mcp.tool.leann_search.input.complexity: integer, optional, default=32, min=16, max=128
|
||||||
|
|
||||||
|
# Notes
|
||||||
|
note: Build indexes with `leann build <name> --docs <files...>` before searching.
|
||||||
|
example.add: claude mcp add --scope user leann-server -- leann_mcp
|
||||||
|
example.verify: claude mcp list | cat
|
||||||
@@ -1 +0,0 @@
|
|||||||
|
|
||||||
|
|||||||
1
packages/astchunk-leann
Submodule
1
packages/astchunk-leann
Submodule
Submodule packages/astchunk-leann added at ad9afa07b9
@@ -1,8 +0,0 @@
|
|||||||
# packages/leann-backend-diskann/CMakeLists.txt (simplified version)
|
|
||||||
|
|
||||||
cmake_minimum_required(VERSION 3.20)
|
|
||||||
project(leann_backend_diskann_wrapper)
|
|
||||||
|
|
||||||
# Tell CMake to directly enter the DiskANN submodule and execute its own CMakeLists.txt
|
|
||||||
# DiskANN will handle everything itself, including compiling Python bindings
|
|
||||||
add_subdirectory(src/third_party/DiskANN)
|
|
||||||
@@ -1 +1,7 @@
|
|||||||
from . import diskann_backend
|
from . import diskann_backend as diskann_backend
|
||||||
|
from . import graph_partition
|
||||||
|
|
||||||
|
# Export main classes and functions
|
||||||
|
from .graph_partition import GraphPartitioner, partition_graph
|
||||||
|
|
||||||
|
__all__ = ["GraphPartitioner", "diskann_backend", "graph_partition", "partition_graph"]
|
||||||
|
|||||||
@@ -1,20 +1,20 @@
|
|||||||
import numpy as np
|
import contextlib
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import struct
|
import struct
|
||||||
import sys
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, Any, List, Literal, Optional
|
from typing import Any, Literal, Optional
|
||||||
import contextlib
|
|
||||||
|
|
||||||
import logging
|
import numpy as np
|
||||||
|
import psutil
|
||||||
from leann.searcher_base import BaseSearcher
|
|
||||||
from leann.registry import register_backend
|
|
||||||
from leann.interface import (
|
from leann.interface import (
|
||||||
LeannBackendFactoryInterface,
|
|
||||||
LeannBackendBuilderInterface,
|
LeannBackendBuilderInterface,
|
||||||
|
LeannBackendFactoryInterface,
|
||||||
LeannBackendSearcherInterface,
|
LeannBackendSearcherInterface,
|
||||||
)
|
)
|
||||||
|
from leann.registry import register_backend
|
||||||
|
from leann.searcher_base import BaseSearcher
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -22,6 +22,11 @@ logger = logging.getLogger(__name__)
|
|||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def suppress_cpp_output_if_needed():
|
def suppress_cpp_output_if_needed():
|
||||||
"""Suppress C++ stdout/stderr based on LEANN_LOG_LEVEL"""
|
"""Suppress C++ stdout/stderr based on LEANN_LOG_LEVEL"""
|
||||||
|
# In CI we avoid fiddling with low-level file descriptors to prevent aborts
|
||||||
|
if os.getenv("CI") == "true":
|
||||||
|
yield
|
||||||
|
return
|
||||||
|
|
||||||
log_level = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
log_level = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
||||||
|
|
||||||
# Only suppress if log level is WARNING or higher (ERROR, CRITICAL)
|
# Only suppress if log level is WARNING or higher (ERROR, CRITICAL)
|
||||||
@@ -85,6 +90,43 @@ def _write_vectors_to_bin(data: np.ndarray, file_path: Path):
|
|||||||
f.write(data.tobytes())
|
f.write(data.tobytes())
|
||||||
|
|
||||||
|
|
||||||
|
def _calculate_smart_memory_config(data: np.ndarray) -> tuple[float, float]:
|
||||||
|
"""
|
||||||
|
Calculate smart memory configuration for DiskANN based on data size and system specs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: The embedding data array
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (search_memory_maximum, build_memory_maximum) in GB
|
||||||
|
"""
|
||||||
|
num_vectors, dim = data.shape
|
||||||
|
|
||||||
|
# Calculate embedding storage size
|
||||||
|
embedding_size_bytes = num_vectors * dim * 4 # float32 = 4 bytes
|
||||||
|
embedding_size_gb = embedding_size_bytes / (1024**3)
|
||||||
|
|
||||||
|
# search_memory_maximum: 1/10 of embedding size for optimal PQ compression
|
||||||
|
# This controls Product Quantization size - smaller means more compression
|
||||||
|
search_memory_gb = max(0.1, embedding_size_gb / 10) # At least 100MB
|
||||||
|
|
||||||
|
# build_memory_maximum: Based on available system RAM for sharding control
|
||||||
|
# This controls how much memory DiskANN uses during index construction
|
||||||
|
available_memory_gb = psutil.virtual_memory().available / (1024**3)
|
||||||
|
total_memory_gb = psutil.virtual_memory().total / (1024**3)
|
||||||
|
|
||||||
|
# Use 50% of available memory, but at least 2GB and at most 75% of total
|
||||||
|
build_memory_gb = max(2.0, min(available_memory_gb * 0.5, total_memory_gb * 0.75))
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Smart memory config - Data: {embedding_size_gb:.2f}GB, "
|
||||||
|
f"Search mem: {search_memory_gb:.2f}GB (PQ control), "
|
||||||
|
f"Build mem: {build_memory_gb:.2f}GB (sharding control)"
|
||||||
|
)
|
||||||
|
|
||||||
|
return search_memory_gb, build_memory_gb
|
||||||
|
|
||||||
|
|
||||||
@register_backend("diskann")
|
@register_backend("diskann")
|
||||||
class DiskannBackend(LeannBackendFactoryInterface):
|
class DiskannBackend(LeannBackendFactoryInterface):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -100,7 +142,72 @@ class DiskannBuilder(LeannBackendBuilderInterface):
|
|||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.build_params = kwargs
|
self.build_params = kwargs
|
||||||
|
|
||||||
def build(self, data: np.ndarray, ids: List[str], index_path: str, **kwargs):
|
def _safe_cleanup_after_partition(self, index_dir: Path, index_prefix: str):
|
||||||
|
"""
|
||||||
|
Safely cleanup files after partition.
|
||||||
|
In partition mode, C++ doesn't read _disk.index content,
|
||||||
|
so we can delete it if all derived files exist.
|
||||||
|
"""
|
||||||
|
disk_index_file = index_dir / f"{index_prefix}_disk.index"
|
||||||
|
beam_search_file = index_dir / f"{index_prefix}_disk_beam_search.index"
|
||||||
|
|
||||||
|
# Required files that C++ partition mode needs
|
||||||
|
# Note: C++ generates these with _disk.index suffix
|
||||||
|
disk_suffix = "_disk.index"
|
||||||
|
required_files = [
|
||||||
|
f"{index_prefix}{disk_suffix}_medoids.bin", # Critical: assert fails if missing
|
||||||
|
# Note: _centroids.bin is not created in single-shot build - C++ handles this automatically
|
||||||
|
f"{index_prefix}_pq_pivots.bin", # PQ table
|
||||||
|
f"{index_prefix}_pq_compressed.bin", # PQ compressed vectors
|
||||||
|
]
|
||||||
|
|
||||||
|
# Check if all required files exist
|
||||||
|
missing_files = []
|
||||||
|
for filename in required_files:
|
||||||
|
file_path = index_dir / filename
|
||||||
|
if not file_path.exists():
|
||||||
|
missing_files.append(filename)
|
||||||
|
|
||||||
|
if missing_files:
|
||||||
|
logger.warning(
|
||||||
|
f"Cannot safely delete _disk.index - missing required files: {missing_files}"
|
||||||
|
)
|
||||||
|
logger.info("Keeping all original files for safety")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Calculate space savings
|
||||||
|
space_saved = 0
|
||||||
|
files_to_delete = []
|
||||||
|
|
||||||
|
if disk_index_file.exists():
|
||||||
|
space_saved += disk_index_file.stat().st_size
|
||||||
|
files_to_delete.append(disk_index_file)
|
||||||
|
|
||||||
|
if beam_search_file.exists():
|
||||||
|
space_saved += beam_search_file.stat().st_size
|
||||||
|
files_to_delete.append(beam_search_file)
|
||||||
|
|
||||||
|
# Safe to delete!
|
||||||
|
for file_to_delete in files_to_delete:
|
||||||
|
try:
|
||||||
|
os.remove(file_to_delete)
|
||||||
|
logger.info(f"✅ Safely deleted: {file_to_delete.name}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to delete {file_to_delete.name}: {e}")
|
||||||
|
|
||||||
|
if space_saved > 0:
|
||||||
|
space_saved_mb = space_saved / (1024 * 1024)
|
||||||
|
logger.info(f"💾 Space saved: {space_saved_mb:.1f} MB")
|
||||||
|
|
||||||
|
# Show what files are kept
|
||||||
|
logger.info("📁 Kept essential files for partition mode:")
|
||||||
|
for filename in required_files:
|
||||||
|
file_path = index_dir / filename
|
||||||
|
if file_path.exists():
|
||||||
|
size_mb = file_path.stat().st_size / (1024 * 1024)
|
||||||
|
logger.info(f" - {filename} ({size_mb:.1f} MB)")
|
||||||
|
|
||||||
|
def build(self, data: np.ndarray, ids: list[str], index_path: str, **kwargs):
|
||||||
path = Path(index_path)
|
path = Path(index_path)
|
||||||
index_dir = path.parent
|
index_dir = path.parent
|
||||||
index_prefix = path.stem
|
index_prefix = path.stem
|
||||||
@@ -114,6 +221,17 @@ class DiskannBuilder(LeannBackendBuilderInterface):
|
|||||||
_write_vectors_to_bin(data, index_dir / data_filename)
|
_write_vectors_to_bin(data, index_dir / data_filename)
|
||||||
|
|
||||||
build_kwargs = {**self.build_params, **kwargs}
|
build_kwargs = {**self.build_params, **kwargs}
|
||||||
|
|
||||||
|
# Extract is_recompute from nested backend_kwargs if needed
|
||||||
|
is_recompute = build_kwargs.get("is_recompute", False)
|
||||||
|
if not is_recompute and "backend_kwargs" in build_kwargs:
|
||||||
|
is_recompute = build_kwargs["backend_kwargs"].get("is_recompute", False)
|
||||||
|
|
||||||
|
# Flatten all backend_kwargs parameters to top level for compatibility
|
||||||
|
if "backend_kwargs" in build_kwargs:
|
||||||
|
nested_params = build_kwargs.pop("backend_kwargs")
|
||||||
|
build_kwargs.update(nested_params)
|
||||||
|
|
||||||
metric_enum = _get_diskann_metrics().get(
|
metric_enum = _get_diskann_metrics().get(
|
||||||
build_kwargs.get("distance_metric", "mips").lower()
|
build_kwargs.get("distance_metric", "mips").lower()
|
||||||
)
|
)
|
||||||
@@ -122,6 +240,16 @@ class DiskannBuilder(LeannBackendBuilderInterface):
|
|||||||
f"Unsupported distance_metric '{build_kwargs.get('distance_metric', 'unknown')}'."
|
f"Unsupported distance_metric '{build_kwargs.get('distance_metric', 'unknown')}'."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Calculate smart memory configuration if not explicitly provided
|
||||||
|
if (
|
||||||
|
"search_memory_maximum" not in build_kwargs
|
||||||
|
or "build_memory_maximum" not in build_kwargs
|
||||||
|
):
|
||||||
|
smart_search_mem, smart_build_mem = _calculate_smart_memory_config(data)
|
||||||
|
else:
|
||||||
|
smart_search_mem = build_kwargs.get("search_memory_maximum", 4.0)
|
||||||
|
smart_build_mem = build_kwargs.get("build_memory_maximum", 8.0)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from . import _diskannpy as diskannpy # type: ignore
|
from . import _diskannpy as diskannpy # type: ignore
|
||||||
|
|
||||||
@@ -132,12 +260,36 @@ class DiskannBuilder(LeannBackendBuilderInterface):
|
|||||||
index_prefix,
|
index_prefix,
|
||||||
build_kwargs.get("complexity", 64),
|
build_kwargs.get("complexity", 64),
|
||||||
build_kwargs.get("graph_degree", 32),
|
build_kwargs.get("graph_degree", 32),
|
||||||
build_kwargs.get("search_memory_maximum", 4.0),
|
build_kwargs.get("search_memory_maximum", smart_search_mem),
|
||||||
build_kwargs.get("build_memory_maximum", 8.0),
|
build_kwargs.get("build_memory_maximum", smart_build_mem),
|
||||||
build_kwargs.get("num_threads", 8),
|
build_kwargs.get("num_threads", 8),
|
||||||
build_kwargs.get("pq_disk_bytes", 0),
|
build_kwargs.get("pq_disk_bytes", 0),
|
||||||
"",
|
"",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Auto-partition if is_recompute is enabled
|
||||||
|
if build_kwargs.get("is_recompute", False):
|
||||||
|
logger.info("is_recompute=True, starting automatic graph partitioning...")
|
||||||
|
from .graph_partition import partition_graph
|
||||||
|
|
||||||
|
# Partition the index using absolute paths
|
||||||
|
# Convert to absolute paths to avoid issues with working directory changes
|
||||||
|
absolute_index_dir = Path(index_dir).resolve()
|
||||||
|
absolute_index_prefix_path = str(absolute_index_dir / index_prefix)
|
||||||
|
disk_graph_path, partition_bin_path = partition_graph(
|
||||||
|
index_prefix_path=absolute_index_prefix_path,
|
||||||
|
output_dir=str(absolute_index_dir),
|
||||||
|
partition_prefix=index_prefix,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Safe cleanup: In partition mode, C++ doesn't read _disk.index content
|
||||||
|
# but still needs the derived files (_medoids.bin, _centroids.bin, etc.)
|
||||||
|
self._safe_cleanup_after_partition(index_dir, index_prefix)
|
||||||
|
|
||||||
|
logger.info("✅ Graph partitioning completed successfully!")
|
||||||
|
logger.info(f" - Disk graph: {disk_graph_path}")
|
||||||
|
logger.info(f" - Partition file: {partition_bin_path}")
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
temp_data_file = index_dir / data_filename
|
temp_data_file = index_dir / data_filename
|
||||||
if temp_data_file.exists():
|
if temp_data_file.exists():
|
||||||
@@ -164,18 +316,69 @@ class DiskannSearcher(BaseSearcher):
|
|||||||
|
|
||||||
self.num_threads = kwargs.get("num_threads", 8)
|
self.num_threads = kwargs.get("num_threads", 8)
|
||||||
|
|
||||||
fake_zmq_port = 6666
|
# For DiskANN, we need to reinitialize the index when zmq_port changes
|
||||||
full_index_prefix = str(self.index_dir / self.index_path.stem)
|
# Store the initialization parameters for later use
|
||||||
self._index = diskannpy.StaticDiskFloatIndex(
|
# Note: C++ load method expects the BASE path (without _disk.index suffix)
|
||||||
metric_enum,
|
# C++ internally constructs: index_prefix + "_disk.index"
|
||||||
full_index_prefix,
|
index_name = self.index_path.stem # "simple_test.leann" -> "simple_test"
|
||||||
self.num_threads,
|
diskann_index_prefix = str(self.index_dir / index_name) # /path/to/simple_test
|
||||||
kwargs.get("num_nodes_to_cache", 0),
|
full_index_prefix = diskann_index_prefix # /path/to/simple_test (base path)
|
||||||
1,
|
|
||||||
fake_zmq_port, # Initial port, can be updated at runtime
|
# Auto-detect partition files and set partition_prefix
|
||||||
"",
|
partition_graph_file = self.index_dir / f"{index_name}_disk_graph.index"
|
||||||
"",
|
partition_bin_file = self.index_dir / f"{index_name}_partition.bin"
|
||||||
|
|
||||||
|
partition_prefix = ""
|
||||||
|
if partition_graph_file.exists() and partition_bin_file.exists():
|
||||||
|
# C++ expects full path prefix, not just filename
|
||||||
|
partition_prefix = str(self.index_dir / index_name) # /path/to/simple_test
|
||||||
|
logger.info(
|
||||||
|
f"✅ Detected partition files, using partition_prefix='{partition_prefix}'"
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
logger.debug("No partition files detected, using standard index files")
|
||||||
|
|
||||||
|
self._init_params = {
|
||||||
|
"metric_enum": metric_enum,
|
||||||
|
"full_index_prefix": full_index_prefix,
|
||||||
|
"num_threads": self.num_threads,
|
||||||
|
"num_nodes_to_cache": kwargs.get("num_nodes_to_cache", 0),
|
||||||
|
"cache_mechanism": 1,
|
||||||
|
"pq_prefix": "",
|
||||||
|
"partition_prefix": partition_prefix,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Log partition configuration for debugging
|
||||||
|
if partition_prefix:
|
||||||
|
logger.info(
|
||||||
|
f"✅ Detected partition files, using partition_prefix='{partition_prefix}'"
|
||||||
|
)
|
||||||
|
self._diskannpy = diskannpy
|
||||||
|
self._current_zmq_port = None
|
||||||
|
self._index = None
|
||||||
|
logger.debug("DiskANN searcher initialized (index will be loaded on first search)")
|
||||||
|
|
||||||
|
def _ensure_index_loaded(self, zmq_port: int):
|
||||||
|
"""Ensure the index is loaded with the correct zmq_port."""
|
||||||
|
if self._index is None or self._current_zmq_port != zmq_port:
|
||||||
|
# Need to (re)load the index with the correct zmq_port
|
||||||
|
with suppress_cpp_output_if_needed():
|
||||||
|
if self._index is not None:
|
||||||
|
logger.debug(f"Reloading DiskANN index with new zmq_port: {zmq_port}")
|
||||||
|
else:
|
||||||
|
logger.debug(f"Loading DiskANN index with zmq_port: {zmq_port}")
|
||||||
|
|
||||||
|
self._index = self._diskannpy.StaticDiskFloatIndex(
|
||||||
|
self._init_params["metric_enum"],
|
||||||
|
self._init_params["full_index_prefix"],
|
||||||
|
self._init_params["num_threads"],
|
||||||
|
self._init_params["num_nodes_to_cache"],
|
||||||
|
self._init_params["cache_mechanism"],
|
||||||
|
zmq_port,
|
||||||
|
self._init_params["pq_prefix"],
|
||||||
|
self._init_params["partition_prefix"],
|
||||||
|
)
|
||||||
|
self._current_zmq_port = zmq_port
|
||||||
|
|
||||||
def search(
|
def search(
|
||||||
self,
|
self,
|
||||||
@@ -190,7 +393,7 @@ class DiskannSearcher(BaseSearcher):
|
|||||||
batch_recompute: bool = False,
|
batch_recompute: bool = False,
|
||||||
dedup_node_dis: bool = False,
|
dedup_node_dis: bool = False,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> Dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Search for nearest neighbors using DiskANN index.
|
Search for nearest neighbors using DiskANN index.
|
||||||
|
|
||||||
@@ -213,18 +416,15 @@ class DiskannSearcher(BaseSearcher):
|
|||||||
Returns:
|
Returns:
|
||||||
Dict with 'labels' (list of lists) and 'distances' (ndarray)
|
Dict with 'labels' (list of lists) and 'distances' (ndarray)
|
||||||
"""
|
"""
|
||||||
# Handle zmq_port compatibility: DiskANN can now update port at runtime
|
# Handle zmq_port compatibility: Ensure index is loaded with correct port
|
||||||
if recompute_embeddings:
|
if recompute_embeddings:
|
||||||
if zmq_port is None:
|
if zmq_port is None:
|
||||||
raise ValueError(
|
raise ValueError("zmq_port must be provided if recompute_embeddings is True")
|
||||||
"zmq_port must be provided if recompute_embeddings is True"
|
self._ensure_index_loaded(zmq_port)
|
||||||
)
|
else:
|
||||||
current_port = self._index.get_zmq_port()
|
# If not recomputing, we still need an index, use a default port
|
||||||
if zmq_port != current_port:
|
if self._index is None:
|
||||||
logger.debug(
|
self._ensure_index_loaded(6666) # Default port when not recomputing
|
||||||
f"Updating DiskANN zmq_port from {current_port} to {zmq_port}"
|
|
||||||
)
|
|
||||||
self._index.set_zmq_port(zmq_port)
|
|
||||||
|
|
||||||
# DiskANN doesn't support "proportional" strategy
|
# DiskANN doesn't support "proportional" strategy
|
||||||
if pruning_strategy == "proportional":
|
if pruning_strategy == "proportional":
|
||||||
@@ -241,7 +441,14 @@ class DiskannSearcher(BaseSearcher):
|
|||||||
else: # "global"
|
else: # "global"
|
||||||
use_global_pruning = True
|
use_global_pruning = True
|
||||||
|
|
||||||
# Perform search with suppressed C++ output based on log level
|
# Strategy:
|
||||||
|
# - Traversal always uses PQ distances
|
||||||
|
# - If recompute_embeddings=True, do a single final rerank via deferred fetch
|
||||||
|
# (fetch embeddings for the final candidate set only)
|
||||||
|
# - Do not recompute neighbor distances along the path
|
||||||
|
use_deferred_fetch = True if recompute_embeddings else False
|
||||||
|
recompute_neighors = False # Expected typo. For backward compatibility.
|
||||||
|
|
||||||
with suppress_cpp_output_if_needed():
|
with suppress_cpp_output_if_needed():
|
||||||
labels, distances = self._index.batch_search(
|
labels, distances = self._index.batch_search(
|
||||||
query,
|
query,
|
||||||
@@ -250,17 +457,15 @@ class DiskannSearcher(BaseSearcher):
|
|||||||
complexity,
|
complexity,
|
||||||
beam_width,
|
beam_width,
|
||||||
self.num_threads,
|
self.num_threads,
|
||||||
kwargs.get("USE_DEFERRED_FETCH", False),
|
use_deferred_fetch,
|
||||||
kwargs.get("skip_search_reorder", False),
|
kwargs.get("skip_search_reorder", False),
|
||||||
recompute_embeddings,
|
recompute_neighors,
|
||||||
dedup_node_dis,
|
dedup_node_dis,
|
||||||
prune_ratio,
|
prune_ratio,
|
||||||
batch_recompute,
|
batch_recompute,
|
||||||
use_global_pruning,
|
use_global_pruning,
|
||||||
)
|
)
|
||||||
|
|
||||||
string_labels = [
|
string_labels = [[str(int_label) for int_label in batch_labels] for batch_labels in labels]
|
||||||
[str(int_label) for int_label in batch_labels] for batch_labels in labels
|
|
||||||
]
|
|
||||||
|
|
||||||
return {"labels": string_labels, "distances": distances}
|
return {"labels": string_labels, "distances": distances}
|
||||||
|
|||||||
@@ -3,16 +3,17 @@ DiskANN-specific embedding server
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
import os
|
|
||||||
import zmq
|
|
||||||
import numpy as np
|
|
||||||
import json
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Any, Optional
|
||||||
import sys
|
|
||||||
import logging
|
import numpy as np
|
||||||
|
import zmq
|
||||||
|
|
||||||
# Set up logging based on environment variable
|
# Set up logging based on environment variable
|
||||||
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
||||||
@@ -31,11 +32,22 @@ if not logger.handlers:
|
|||||||
logger.propagate = False
|
logger.propagate = False
|
||||||
|
|
||||||
|
|
||||||
|
_RAW_PROVIDER_OPTIONS = os.getenv("LEANN_EMBEDDING_OPTIONS")
|
||||||
|
try:
|
||||||
|
PROVIDER_OPTIONS: dict[str, Any] = (
|
||||||
|
json.loads(_RAW_PROVIDER_OPTIONS) if _RAW_PROVIDER_OPTIONS else {}
|
||||||
|
)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
logger.warning("Failed to parse LEANN_EMBEDDING_OPTIONS; ignoring provider options")
|
||||||
|
PROVIDER_OPTIONS = {}
|
||||||
|
|
||||||
|
|
||||||
def create_diskann_embedding_server(
|
def create_diskann_embedding_server(
|
||||||
passages_file: Optional[str] = None,
|
passages_file: Optional[str] = None,
|
||||||
zmq_port: int = 5555,
|
zmq_port: int = 5555,
|
||||||
model_name: str = "sentence-transformers/all-mpnet-base-v2",
|
model_name: str = "sentence-transformers/all-mpnet-base-v2",
|
||||||
embedding_mode: str = "sentence-transformers",
|
embedding_mode: str = "sentence-transformers",
|
||||||
|
distance_metric: str = "l2",
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Create and start a ZMQ-based embedding server for DiskANN backend.
|
Create and start a ZMQ-based embedding server for DiskANN backend.
|
||||||
@@ -50,8 +62,8 @@ def create_diskann_embedding_server(
|
|||||||
sys.path.insert(0, str(leann_core_path))
|
sys.path.insert(0, str(leann_core_path))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from leann.embedding_compute import compute_embeddings
|
|
||||||
from leann.api import PassageManager
|
from leann.api import PassageManager
|
||||||
|
from leann.embedding_compute import compute_embeddings
|
||||||
|
|
||||||
logger.info("Successfully imported unified embedding computation module")
|
logger.info("Successfully imported unified embedding computation module")
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
@@ -76,13 +88,12 @@ def create_diskann_embedding_server(
|
|||||||
raise ValueError("Only metadata files (.meta.json) are supported")
|
raise ValueError("Only metadata files (.meta.json) are supported")
|
||||||
|
|
||||||
# Load metadata to get passage sources
|
# Load metadata to get passage sources
|
||||||
with open(passages_file, "r") as f:
|
with open(passages_file) as f:
|
||||||
meta = json.load(f)
|
meta = json.load(f)
|
||||||
|
|
||||||
passages = PassageManager(meta["passage_sources"])
|
logger.info(f"Loading PassageManager with metadata_file_path: {passages_file}")
|
||||||
logger.info(
|
passages = PassageManager(meta["passage_sources"], metadata_file_path=passages_file)
|
||||||
f"Loaded PassageManager with {len(passages.global_offset_map)} passages from metadata"
|
logger.info(f"Loaded PassageManager with {len(passages)} passages from metadata")
|
||||||
)
|
|
||||||
|
|
||||||
# Import protobuf after ensuring the path is correct
|
# Import protobuf after ensuring the path is correct
|
||||||
try:
|
try:
|
||||||
@@ -100,8 +111,9 @@ def create_diskann_embedding_server(
|
|||||||
socket.bind(f"tcp://*:{zmq_port}")
|
socket.bind(f"tcp://*:{zmq_port}")
|
||||||
logger.info(f"DiskANN ZMQ REP server listening on port {zmq_port}")
|
logger.info(f"DiskANN ZMQ REP server listening on port {zmq_port}")
|
||||||
|
|
||||||
socket.setsockopt(zmq.RCVTIMEO, 300000)
|
socket.setsockopt(zmq.RCVTIMEO, 1000)
|
||||||
socket.setsockopt(zmq.SNDTIMEO, 300000)
|
socket.setsockopt(zmq.SNDTIMEO, 1000)
|
||||||
|
socket.setsockopt(zmq.LINGER, 0)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
@@ -150,9 +162,7 @@ def create_diskann_embedding_server(
|
|||||||
):
|
):
|
||||||
texts = request
|
texts = request
|
||||||
is_text_request = True
|
is_text_request = True
|
||||||
logger.info(
|
logger.info(f"✅ MSGPACK: Direct text request for {len(texts)} texts")
|
||||||
f"✅ MSGPACK: Direct text request for {len(texts)} texts"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
raise ValueError("Not a valid msgpack text request")
|
raise ValueError("Not a valid msgpack text request")
|
||||||
except Exception as msgpack_error:
|
except Exception as msgpack_error:
|
||||||
@@ -167,9 +177,7 @@ def create_diskann_embedding_server(
|
|||||||
passage_data = passages.get_passage(str(nid))
|
passage_data = passages.get_passage(str(nid))
|
||||||
txt = passage_data["text"]
|
txt = passage_data["text"]
|
||||||
if not txt:
|
if not txt:
|
||||||
raise RuntimeError(
|
raise RuntimeError(f"FATAL: Empty text for passage ID {nid}")
|
||||||
f"FATAL: Empty text for passage ID {nid}"
|
|
||||||
)
|
|
||||||
texts.append(txt)
|
texts.append(txt)
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
logger.error(f"Passage ID {nid} not found: {e}")
|
logger.error(f"Passage ID {nid} not found: {e}")
|
||||||
@@ -180,12 +188,15 @@ def create_diskann_embedding_server(
|
|||||||
|
|
||||||
# Debug logging
|
# Debug logging
|
||||||
logger.debug(f"Processing {len(texts)} texts")
|
logger.debug(f"Processing {len(texts)} texts")
|
||||||
logger.debug(
|
logger.debug(f"Text lengths: {[len(t) for t in texts[:5]]}") # Show first 5
|
||||||
f"Text lengths: {[len(t) for t in texts[:5]]}"
|
|
||||||
) # Show first 5
|
|
||||||
|
|
||||||
# Process embeddings using unified computation
|
# Process embeddings using unified computation
|
||||||
embeddings = compute_embeddings(texts, model_name, mode=embedding_mode)
|
embeddings = compute_embeddings(
|
||||||
|
texts,
|
||||||
|
model_name,
|
||||||
|
mode=embedding_mode,
|
||||||
|
provider_options=PROVIDER_OPTIONS,
|
||||||
|
)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Computed embeddings for {len(texts)} texts, shape: {embeddings.shape}"
|
f"Computed embeddings for {len(texts)} texts, shape: {embeddings.shape}"
|
||||||
)
|
)
|
||||||
@@ -199,9 +210,7 @@ def create_diskann_embedding_server(
|
|||||||
else:
|
else:
|
||||||
# For DiskANN C++ compatibility: return protobuf format
|
# For DiskANN C++ compatibility: return protobuf format
|
||||||
resp_proto = embedding_pb2.NodeEmbeddingResponse()
|
resp_proto = embedding_pb2.NodeEmbeddingResponse()
|
||||||
hidden_contiguous = np.ascontiguousarray(
|
hidden_contiguous = np.ascontiguousarray(embeddings, dtype=np.float32)
|
||||||
embeddings, dtype=np.float32
|
|
||||||
)
|
|
||||||
|
|
||||||
# Serialize embeddings data
|
# Serialize embeddings data
|
||||||
resp_proto.embeddings_data = hidden_contiguous.tobytes()
|
resp_proto.embeddings_data = hidden_contiguous.tobytes()
|
||||||
@@ -226,30 +235,222 @@ def create_diskann_embedding_server(
|
|||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
zmq_thread = threading.Thread(target=zmq_server_thread, daemon=True)
|
def zmq_server_thread_with_shutdown(shutdown_event):
|
||||||
|
"""ZMQ server thread that respects shutdown signal.
|
||||||
|
|
||||||
|
This creates its own REP socket, binds to zmq_port, and periodically
|
||||||
|
checks shutdown_event using recv timeouts to exit cleanly.
|
||||||
|
"""
|
||||||
|
logger.info("DiskANN ZMQ server thread started with shutdown support")
|
||||||
|
|
||||||
|
context = zmq.Context()
|
||||||
|
rep_socket = context.socket(zmq.REP)
|
||||||
|
rep_socket.bind(f"tcp://*:{zmq_port}")
|
||||||
|
logger.info(f"DiskANN ZMQ REP server listening on port {zmq_port}")
|
||||||
|
|
||||||
|
# Set receive timeout so we can check shutdown_event periodically
|
||||||
|
rep_socket.setsockopt(zmq.RCVTIMEO, 1000) # 1 second timeout
|
||||||
|
rep_socket.setsockopt(zmq.SNDTIMEO, 1000)
|
||||||
|
rep_socket.setsockopt(zmq.LINGER, 0)
|
||||||
|
|
||||||
|
try:
|
||||||
|
while not shutdown_event.is_set():
|
||||||
|
try:
|
||||||
|
e2e_start = time.time()
|
||||||
|
# REP socket receives single-part messages
|
||||||
|
message = rep_socket.recv()
|
||||||
|
|
||||||
|
# Check for empty messages - REP socket requires response to every request
|
||||||
|
if not message:
|
||||||
|
logger.warning("Received empty message, sending empty response")
|
||||||
|
rep_socket.send(b"")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Try protobuf first (same logic as original)
|
||||||
|
texts = []
|
||||||
|
is_text_request = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
req_proto = embedding_pb2.NodeEmbeddingRequest()
|
||||||
|
req_proto.ParseFromString(message)
|
||||||
|
node_ids = list(req_proto.node_ids)
|
||||||
|
|
||||||
|
# Look up texts by node IDs
|
||||||
|
for nid in node_ids:
|
||||||
|
try:
|
||||||
|
passage_data = passages.get_passage(str(nid))
|
||||||
|
txt = passage_data["text"]
|
||||||
|
if not txt:
|
||||||
|
raise RuntimeError(f"FATAL: Empty text for passage ID {nid}")
|
||||||
|
texts.append(txt)
|
||||||
|
except KeyError:
|
||||||
|
raise RuntimeError(f"FATAL: Passage with ID {nid} not found")
|
||||||
|
|
||||||
|
logger.info(f"ZMQ received protobuf request for {len(node_ids)} node IDs")
|
||||||
|
except Exception:
|
||||||
|
# Fallback to msgpack for text requests
|
||||||
|
try:
|
||||||
|
import msgpack
|
||||||
|
|
||||||
|
request = msgpack.unpackb(message)
|
||||||
|
if isinstance(request, list) and all(
|
||||||
|
isinstance(item, str) for item in request
|
||||||
|
):
|
||||||
|
texts = request
|
||||||
|
is_text_request = True
|
||||||
|
logger.info(
|
||||||
|
f"ZMQ received msgpack text request for {len(texts)} texts"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError("Not a valid msgpack text request")
|
||||||
|
except Exception:
|
||||||
|
logger.error("Both protobuf and msgpack parsing failed!")
|
||||||
|
# Send error response
|
||||||
|
resp_proto = embedding_pb2.NodeEmbeddingResponse()
|
||||||
|
rep_socket.send(resp_proto.SerializeToString())
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Process the request
|
||||||
|
embeddings = compute_embeddings(
|
||||||
|
texts,
|
||||||
|
model_name,
|
||||||
|
mode=embedding_mode,
|
||||||
|
provider_options=PROVIDER_OPTIONS,
|
||||||
|
)
|
||||||
|
logger.info(f"Computed embeddings shape: {embeddings.shape}")
|
||||||
|
|
||||||
|
# Validation
|
||||||
|
if np.isnan(embeddings).any() or np.isinf(embeddings).any():
|
||||||
|
logger.error("NaN or Inf detected in embeddings!")
|
||||||
|
# Send error response
|
||||||
|
if is_text_request:
|
||||||
|
import msgpack
|
||||||
|
|
||||||
|
response_data = msgpack.packb([])
|
||||||
|
else:
|
||||||
|
resp_proto = embedding_pb2.NodeEmbeddingResponse()
|
||||||
|
response_data = resp_proto.SerializeToString()
|
||||||
|
rep_socket.send(response_data)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Prepare response based on request type
|
||||||
|
if is_text_request:
|
||||||
|
# For direct text requests, return msgpack
|
||||||
|
import msgpack
|
||||||
|
|
||||||
|
response_data = msgpack.packb(embeddings.tolist())
|
||||||
|
else:
|
||||||
|
# For protobuf requests, return protobuf
|
||||||
|
resp_proto = embedding_pb2.NodeEmbeddingResponse()
|
||||||
|
hidden_contiguous = np.ascontiguousarray(embeddings, dtype=np.float32)
|
||||||
|
|
||||||
|
resp_proto.embeddings_data = hidden_contiguous.tobytes()
|
||||||
|
resp_proto.dimensions.append(hidden_contiguous.shape[0])
|
||||||
|
resp_proto.dimensions.append(hidden_contiguous.shape[1])
|
||||||
|
|
||||||
|
response_data = resp_proto.SerializeToString()
|
||||||
|
|
||||||
|
# Send response back to the client
|
||||||
|
rep_socket.send(response_data)
|
||||||
|
|
||||||
|
e2e_end = time.time()
|
||||||
|
logger.info(f"⏱️ ZMQ E2E time: {e2e_end - e2e_start:.6f}s")
|
||||||
|
|
||||||
|
except zmq.Again:
|
||||||
|
# Timeout - check shutdown_event and continue
|
||||||
|
continue
|
||||||
|
except Exception as e:
|
||||||
|
if not shutdown_event.is_set():
|
||||||
|
logger.error(f"Error in ZMQ server loop: {e}")
|
||||||
|
try:
|
||||||
|
# Send error response for REP socket
|
||||||
|
resp_proto = embedding_pb2.NodeEmbeddingResponse()
|
||||||
|
rep_socket.send(resp_proto.SerializeToString())
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
logger.info("Shutdown in progress, ignoring ZMQ error")
|
||||||
|
break
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
rep_socket.close(0)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
context.term()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
logger.info("DiskANN ZMQ server thread exiting gracefully")
|
||||||
|
|
||||||
|
# Add shutdown coordination
|
||||||
|
shutdown_event = threading.Event()
|
||||||
|
|
||||||
|
def shutdown_zmq_server():
|
||||||
|
"""Gracefully shutdown ZMQ server."""
|
||||||
|
logger.info("Initiating graceful shutdown...")
|
||||||
|
shutdown_event.set()
|
||||||
|
|
||||||
|
if zmq_thread.is_alive():
|
||||||
|
logger.info("Waiting for ZMQ thread to finish...")
|
||||||
|
zmq_thread.join(timeout=5)
|
||||||
|
if zmq_thread.is_alive():
|
||||||
|
logger.warning("ZMQ thread did not finish in time")
|
||||||
|
|
||||||
|
# Clean up ZMQ resources
|
||||||
|
try:
|
||||||
|
# Note: socket and context are cleaned up by thread exit
|
||||||
|
logger.info("ZMQ resources cleaned up")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error cleaning ZMQ resources: {e}")
|
||||||
|
|
||||||
|
# Clean up other resources
|
||||||
|
try:
|
||||||
|
import gc
|
||||||
|
|
||||||
|
gc.collect()
|
||||||
|
logger.info("Additional resources cleaned up")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error cleaning additional resources: {e}")
|
||||||
|
|
||||||
|
logger.info("Graceful shutdown completed")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
# Register signal handlers within this function scope
|
||||||
|
import signal
|
||||||
|
|
||||||
|
def signal_handler(sig, frame):
|
||||||
|
logger.info(f"Received signal {sig}, shutting down gracefully...")
|
||||||
|
shutdown_zmq_server()
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, signal_handler)
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
|
||||||
|
# Start ZMQ thread (NOT daemon!)
|
||||||
|
zmq_thread = threading.Thread(
|
||||||
|
target=lambda: zmq_server_thread_with_shutdown(shutdown_event),
|
||||||
|
daemon=False, # Not daemon - we want to wait for it
|
||||||
|
)
|
||||||
zmq_thread.start()
|
zmq_thread.start()
|
||||||
logger.info(f"Started DiskANN ZMQ server thread on port {zmq_port}")
|
logger.info(f"Started DiskANN ZMQ server thread on port {zmq_port}")
|
||||||
|
|
||||||
# Keep the main thread alive
|
# Keep the main thread alive
|
||||||
try:
|
try:
|
||||||
while True:
|
while not shutdown_event.is_set():
|
||||||
time.sleep(1)
|
time.sleep(0.1) # Check shutdown more frequently
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
logger.info("DiskANN Server shutting down...")
|
logger.info("DiskANN Server shutting down...")
|
||||||
|
shutdown_zmq_server()
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# If we reach here, shutdown was triggered by signal
|
||||||
|
logger.info("Main loop exited, process should be shutting down")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import signal
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
def signal_handler(sig, frame):
|
# Signal handlers are now registered within create_diskann_embedding_server
|
||||||
logger.info(f"Received signal {sig}, shutting down gracefully...")
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
# Register signal handlers for graceful shutdown
|
|
||||||
signal.signal(signal.SIGTERM, signal_handler)
|
|
||||||
signal.signal(signal.SIGINT, signal_handler)
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description="DiskANN Embedding service")
|
parser = argparse.ArgumentParser(description="DiskANN Embedding service")
|
||||||
parser.add_argument("--zmq-port", type=int, default=5555, help="ZMQ port to run on")
|
parser.add_argument("--zmq-port", type=int, default=5555, help="ZMQ port to run on")
|
||||||
@@ -268,9 +469,16 @@ if __name__ == "__main__":
|
|||||||
"--embedding-mode",
|
"--embedding-mode",
|
||||||
type=str,
|
type=str,
|
||||||
default="sentence-transformers",
|
default="sentence-transformers",
|
||||||
choices=["sentence-transformers", "openai", "mlx"],
|
choices=["sentence-transformers", "openai", "mlx", "ollama"],
|
||||||
help="Embedding backend mode",
|
help="Embedding backend mode",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--distance-metric",
|
||||||
|
type=str,
|
||||||
|
default="l2",
|
||||||
|
choices=["l2", "mips", "cosine"],
|
||||||
|
help="Distance metric for similarity computation",
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
@@ -280,4 +488,5 @@ if __name__ == "__main__":
|
|||||||
zmq_port=args.zmq_port,
|
zmq_port=args.zmq_port,
|
||||||
model_name=args.model_name,
|
model_name=args.model_name,
|
||||||
embedding_mode=args.embedding_mode,
|
embedding_mode=args.embedding_mode,
|
||||||
|
distance_metric=args.distance_metric,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,27 +1,28 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||||
# source: embedding.proto
|
# source: embedding.proto
|
||||||
|
# ruff: noqa
|
||||||
"""Generated protocol buffer code."""
|
"""Generated protocol buffer code."""
|
||||||
from google.protobuf.internal import builder as _builder
|
|
||||||
from google.protobuf import descriptor as _descriptor
|
from google.protobuf import descriptor as _descriptor
|
||||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||||
from google.protobuf import symbol_database as _symbol_database
|
from google.protobuf import symbol_database as _symbol_database
|
||||||
|
from google.protobuf.internal import builder as _builder
|
||||||
|
|
||||||
# @@protoc_insertion_point(imports)
|
# @@protoc_insertion_point(imports)
|
||||||
|
|
||||||
_sym_db = _symbol_database.Default()
|
_sym_db = _symbol_database.Default()
|
||||||
|
|
||||||
|
|
||||||
|
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
|
||||||
|
b'\n\x0f\x65mbedding.proto\x12\x0eprotoembedding"(\n\x14NodeEmbeddingRequest\x12\x10\n\x08node_ids\x18\x01 \x03(\r"Y\n\x15NodeEmbeddingResponse\x12\x17\n\x0f\x65mbeddings_data\x18\x01 \x01(\x0c\x12\x12\n\ndimensions\x18\x02 \x03(\x05\x12\x13\n\x0bmissing_ids\x18\x03 \x03(\rb\x06proto3'
|
||||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0f\x65mbedding.proto\x12\x0eprotoembedding\"(\n\x14NodeEmbeddingRequest\x12\x10\n\x08node_ids\x18\x01 \x03(\r\"Y\n\x15NodeEmbeddingResponse\x12\x17\n\x0f\x65mbeddings_data\x18\x01 \x01(\x0c\x12\x12\n\ndimensions\x18\x02 \x03(\x05\x12\x13\n\x0bmissing_ids\x18\x03 \x03(\rb\x06proto3')
|
)
|
||||||
|
|
||||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'embedding_pb2', globals())
|
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "embedding_pb2", globals())
|
||||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
if not _descriptor._USE_C_DESCRIPTORS:
|
||||||
|
|
||||||
DESCRIPTOR._options = None
|
DESCRIPTOR._options = None
|
||||||
_NODEEMBEDDINGREQUEST._serialized_start=35
|
_NODEEMBEDDINGREQUEST._serialized_start = 35
|
||||||
_NODEEMBEDDINGREQUEST._serialized_end=75
|
_NODEEMBEDDINGREQUEST._serialized_end = 75
|
||||||
_NODEEMBEDDINGRESPONSE._serialized_start=77
|
_NODEEMBEDDINGRESPONSE._serialized_start = 77
|
||||||
_NODEEMBEDDINGRESPONSE._serialized_end=166
|
_NODEEMBEDDINGRESPONSE._serialized_end = 166
|
||||||
# @@protoc_insertion_point(module_scope)
|
# @@protoc_insertion_point(module_scope)
|
||||||
|
|||||||
@@ -0,0 +1,299 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Graph Partition Module for LEANN DiskANN Backend
|
||||||
|
|
||||||
|
This module provides Python bindings for the graph partition functionality
|
||||||
|
of DiskANN, allowing users to partition disk-based indices for better
|
||||||
|
performance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
class GraphPartitioner:
|
||||||
|
"""
|
||||||
|
A Python interface for DiskANN's graph partition functionality.
|
||||||
|
|
||||||
|
This class provides methods to partition disk-based indices for improved
|
||||||
|
search performance and memory efficiency.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, build_type: str = "release"):
|
||||||
|
"""
|
||||||
|
Initialize the GraphPartitioner.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
build_type: Build type for the executables ("debug" or "release")
|
||||||
|
"""
|
||||||
|
self.build_type = build_type
|
||||||
|
self._ensure_executables()
|
||||||
|
|
||||||
|
def _get_executable_path(self, name: str) -> str:
|
||||||
|
"""Get the path to a graph partition executable."""
|
||||||
|
# Get the directory where this Python module is located
|
||||||
|
module_dir = Path(__file__).parent
|
||||||
|
# Navigate to the graph_partition directory
|
||||||
|
graph_partition_dir = module_dir.parent / "third_party" / "DiskANN" / "graph_partition"
|
||||||
|
executable_path = graph_partition_dir / "build" / self.build_type / "graph_partition" / name
|
||||||
|
|
||||||
|
if not executable_path.exists():
|
||||||
|
raise FileNotFoundError(f"Executable {name} not found at {executable_path}")
|
||||||
|
|
||||||
|
return str(executable_path)
|
||||||
|
|
||||||
|
def _ensure_executables(self):
|
||||||
|
"""Ensure that the required executables are built."""
|
||||||
|
try:
|
||||||
|
self._get_executable_path("partitioner")
|
||||||
|
self._get_executable_path("index_relayout")
|
||||||
|
except FileNotFoundError:
|
||||||
|
# Try to build the executables automatically
|
||||||
|
print("Executables not found, attempting to build them...")
|
||||||
|
self._build_executables()
|
||||||
|
|
||||||
|
def _build_executables(self):
|
||||||
|
"""Build the required executables."""
|
||||||
|
graph_partition_dir = (
|
||||||
|
Path(__file__).parent.parent / "third_party" / "DiskANN" / "graph_partition"
|
||||||
|
)
|
||||||
|
original_dir = os.getcwd()
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.chdir(graph_partition_dir)
|
||||||
|
|
||||||
|
# Clean any existing build
|
||||||
|
if (graph_partition_dir / "build").exists():
|
||||||
|
shutil.rmtree(graph_partition_dir / "build")
|
||||||
|
|
||||||
|
# Run the build script
|
||||||
|
cmd = ["./build.sh", self.build_type, "split_graph", "/tmp/dummy"]
|
||||||
|
subprocess.run(cmd, capture_output=True, text=True, cwd=graph_partition_dir)
|
||||||
|
|
||||||
|
# Check if executables were created
|
||||||
|
partitioner_path = self._get_executable_path("partitioner")
|
||||||
|
relayout_path = self._get_executable_path("index_relayout")
|
||||||
|
|
||||||
|
print(f"✅ Built partitioner: {partitioner_path}")
|
||||||
|
print(f"✅ Built index_relayout: {relayout_path}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise RuntimeError(f"Failed to build executables: {e}")
|
||||||
|
finally:
|
||||||
|
os.chdir(original_dir)
|
||||||
|
|
||||||
|
def partition_graph(
|
||||||
|
self,
|
||||||
|
index_prefix_path: str,
|
||||||
|
output_dir: Optional[str] = None,
|
||||||
|
partition_prefix: Optional[str] = None,
|
||||||
|
**kwargs,
|
||||||
|
) -> tuple[str, str]:
|
||||||
|
"""
|
||||||
|
Partition a disk-based index for improved performance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
index_prefix_path: Path to the index prefix (e.g., "/path/to/index")
|
||||||
|
output_dir: Output directory for results (defaults to parent of index_prefix_path)
|
||||||
|
partition_prefix: Prefix for output files (defaults to basename of index_prefix_path)
|
||||||
|
**kwargs: Additional parameters for graph partitioning:
|
||||||
|
- gp_times: Number of LDG partition iterations (default: 10)
|
||||||
|
- lock_nums: Number of lock nodes (default: 10)
|
||||||
|
- cut: Cut adjacency list degree (default: 100)
|
||||||
|
- scale_factor: Scale factor (default: 1)
|
||||||
|
- data_type: Data type (default: "float")
|
||||||
|
- thread_nums: Number of threads (default: 10)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (disk_graph_index_path, partition_bin_path)
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError: If the partitioning process fails
|
||||||
|
"""
|
||||||
|
# Set default parameters
|
||||||
|
params = {
|
||||||
|
"gp_times": 10,
|
||||||
|
"lock_nums": 10,
|
||||||
|
"cut": 100,
|
||||||
|
"scale_factor": 1,
|
||||||
|
"data_type": "float",
|
||||||
|
"thread_nums": 10,
|
||||||
|
**kwargs,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Determine output directory
|
||||||
|
if output_dir is None:
|
||||||
|
output_dir = str(Path(index_prefix_path).parent)
|
||||||
|
|
||||||
|
# Create output directory if it doesn't exist
|
||||||
|
Path(output_dir).mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Determine partition prefix
|
||||||
|
if partition_prefix is None:
|
||||||
|
partition_prefix = Path(index_prefix_path).name
|
||||||
|
|
||||||
|
# Get executable paths
|
||||||
|
partitioner_path = self._get_executable_path("partitioner")
|
||||||
|
relayout_path = self._get_executable_path("index_relayout")
|
||||||
|
|
||||||
|
# Create temporary directory for processing
|
||||||
|
with tempfile.TemporaryDirectory() as temp_dir:
|
||||||
|
# Change to the graph_partition directory for temporary files
|
||||||
|
graph_partition_dir = (
|
||||||
|
Path(__file__).parent.parent / "third_party" / "DiskANN" / "graph_partition"
|
||||||
|
)
|
||||||
|
original_dir = os.getcwd()
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.chdir(graph_partition_dir)
|
||||||
|
|
||||||
|
# Create temporary data directory
|
||||||
|
temp_data_dir = Path(temp_dir) / "data"
|
||||||
|
temp_data_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Set up paths for temporary files
|
||||||
|
graph_path = temp_data_dir / "starling" / "_M_R_L_B" / "GRAPH"
|
||||||
|
graph_gp_path = (
|
||||||
|
graph_path
|
||||||
|
/ f"GP_TIMES_{params['gp_times']}_LOCK_{params['lock_nums']}_GP_USE_FREQ0_CUT{params['cut']}_SCALE{params['scale_factor']}"
|
||||||
|
)
|
||||||
|
graph_gp_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Find input index file
|
||||||
|
old_index_file = f"{index_prefix_path}_disk_beam_search.index"
|
||||||
|
if not os.path.exists(old_index_file):
|
||||||
|
old_index_file = f"{index_prefix_path}_disk.index"
|
||||||
|
|
||||||
|
if not os.path.exists(old_index_file):
|
||||||
|
raise RuntimeError(f"Index file not found: {old_index_file}")
|
||||||
|
|
||||||
|
# Run partitioner
|
||||||
|
gp_file_path = graph_gp_path / "_part.bin"
|
||||||
|
partitioner_cmd = [
|
||||||
|
partitioner_path,
|
||||||
|
"--index_file",
|
||||||
|
old_index_file,
|
||||||
|
"--data_type",
|
||||||
|
params["data_type"],
|
||||||
|
"--gp_file",
|
||||||
|
str(gp_file_path),
|
||||||
|
"-T",
|
||||||
|
str(params["thread_nums"]),
|
||||||
|
"--ldg_times",
|
||||||
|
str(params["gp_times"]),
|
||||||
|
"--scale",
|
||||||
|
str(params["scale_factor"]),
|
||||||
|
"--mode",
|
||||||
|
"1",
|
||||||
|
]
|
||||||
|
|
||||||
|
print(f"Running partitioner: {' '.join(partitioner_cmd)}")
|
||||||
|
result = subprocess.run(
|
||||||
|
partitioner_cmd, capture_output=True, text=True, cwd=graph_partition_dir
|
||||||
|
)
|
||||||
|
|
||||||
|
if result.returncode != 0:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Partitioner failed with return code {result.returncode}.\n"
|
||||||
|
f"stdout: {result.stdout}\n"
|
||||||
|
f"stderr: {result.stderr}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run relayout
|
||||||
|
part_tmp_index = graph_gp_path / "_part_tmp.index"
|
||||||
|
relayout_cmd = [
|
||||||
|
relayout_path,
|
||||||
|
old_index_file,
|
||||||
|
str(gp_file_path),
|
||||||
|
params["data_type"],
|
||||||
|
"1",
|
||||||
|
]
|
||||||
|
|
||||||
|
print(f"Running relayout: {' '.join(relayout_cmd)}")
|
||||||
|
result = subprocess.run(
|
||||||
|
relayout_cmd, capture_output=True, text=True, cwd=graph_partition_dir
|
||||||
|
)
|
||||||
|
|
||||||
|
if result.returncode != 0:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Relayout failed with return code {result.returncode}.\n"
|
||||||
|
f"stdout: {result.stdout}\n"
|
||||||
|
f"stderr: {result.stderr}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Copy results to output directory
|
||||||
|
disk_graph_path = Path(output_dir) / f"{partition_prefix}_disk_graph.index"
|
||||||
|
partition_bin_path = Path(output_dir) / f"{partition_prefix}_partition.bin"
|
||||||
|
|
||||||
|
shutil.copy2(part_tmp_index, disk_graph_path)
|
||||||
|
shutil.copy2(gp_file_path, partition_bin_path)
|
||||||
|
|
||||||
|
print(f"Results copied to: {output_dir}")
|
||||||
|
return str(disk_graph_path), str(partition_bin_path)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
os.chdir(original_dir)
|
||||||
|
|
||||||
|
def get_partition_info(self, partition_bin_path: str) -> dict:
|
||||||
|
"""
|
||||||
|
Get information about a partition file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
partition_bin_path: Path to the partition binary file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary containing partition information
|
||||||
|
"""
|
||||||
|
if not os.path.exists(partition_bin_path):
|
||||||
|
raise FileNotFoundError(f"Partition file not found: {partition_bin_path}")
|
||||||
|
|
||||||
|
# For now, return basic file information
|
||||||
|
# In the future, this could parse the binary file for detailed info
|
||||||
|
stat = os.stat(partition_bin_path)
|
||||||
|
return {
|
||||||
|
"file_size": stat.st_size,
|
||||||
|
"file_path": partition_bin_path,
|
||||||
|
"modified_time": stat.st_mtime,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def partition_graph(
|
||||||
|
index_prefix_path: str,
|
||||||
|
output_dir: Optional[str] = None,
|
||||||
|
partition_prefix: Optional[str] = None,
|
||||||
|
build_type: str = "release",
|
||||||
|
**kwargs,
|
||||||
|
) -> tuple[str, str]:
|
||||||
|
"""
|
||||||
|
Convenience function to partition a graph index.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
index_prefix_path: Path to the index prefix
|
||||||
|
output_dir: Output directory (defaults to parent of index_prefix_path)
|
||||||
|
partition_prefix: Prefix for output files (defaults to basename of index_prefix_path)
|
||||||
|
build_type: Build type for executables ("debug" or "release")
|
||||||
|
**kwargs: Additional parameters for graph partitioning
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (disk_graph_index_path, partition_bin_path)
|
||||||
|
"""
|
||||||
|
partitioner = GraphPartitioner(build_type=build_type)
|
||||||
|
return partitioner.partition_graph(index_prefix_path, output_dir, partition_prefix, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
# Example usage:
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Example: partition an index
|
||||||
|
try:
|
||||||
|
disk_graph_path, partition_bin_path = partition_graph(
|
||||||
|
"/path/to/your/index_prefix", gp_times=10, lock_nums=10, cut=100
|
||||||
|
)
|
||||||
|
print("Partitioning completed successfully!")
|
||||||
|
print(f"Disk graph index: {disk_graph_path}")
|
||||||
|
print(f"Partition binary: {partition_bin_path}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Partitioning failed: {e}")
|
||||||
@@ -1,11 +1,11 @@
|
|||||||
[build-system]
|
[build-system]
|
||||||
requires = ["scikit-build-core>=0.10", "pybind11>=2.12.0", "numpy"]
|
requires = ["scikit-build-core>=0.10", "pybind11>=2.12.0", "numpy", "cmake>=3.30"]
|
||||||
build-backend = "scikit_build_core.build"
|
build-backend = "scikit_build_core.build"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "leann-backend-diskann"
|
name = "leann-backend-diskann"
|
||||||
version = "0.1.11"
|
version = "0.3.4"
|
||||||
dependencies = ["leann-core==0.1.11", "numpy", "protobuf>=3.19.0"]
|
dependencies = ["leann-core==0.3.4", "numpy", "protobuf>=3.19.0"]
|
||||||
|
|
||||||
[tool.scikit-build]
|
[tool.scikit-build]
|
||||||
# Key: simplified CMake path
|
# Key: simplified CMake path
|
||||||
@@ -17,3 +17,5 @@ editable.mode = "redirect"
|
|||||||
cmake.build-type = "Release"
|
cmake.build-type = "Release"
|
||||||
build.verbose = true
|
build.verbose = true
|
||||||
build.tool-args = ["-j8"]
|
build.tool-args = ["-j8"]
|
||||||
|
# Let CMake find packages via Homebrew prefix
|
||||||
|
cmake.define = {CMAKE_PREFIX_PATH = {env = "CMAKE_PREFIX_PATH"}, OpenMP_ROOT = {env = "OpenMP_ROOT"}}
|
||||||
|
|||||||
Submodule packages/leann-backend-diskann/third_party/DiskANN updated: af2a26481e...19f9603c72
@@ -5,11 +5,28 @@ set(CMAKE_CXX_COMPILER_WORKS 1)
|
|||||||
|
|
||||||
# Set OpenMP path for macOS
|
# Set OpenMP path for macOS
|
||||||
if(APPLE)
|
if(APPLE)
|
||||||
set(OpenMP_C_FLAGS "-Xpreprocessor -fopenmp -I/opt/homebrew/opt/libomp/include")
|
# Detect Homebrew installation path (Apple Silicon vs Intel)
|
||||||
set(OpenMP_CXX_FLAGS "-Xpreprocessor -fopenmp -I/opt/homebrew/opt/libomp/include")
|
if(EXISTS "/opt/homebrew/opt/libomp")
|
||||||
|
set(HOMEBREW_PREFIX "/opt/homebrew")
|
||||||
|
elseif(EXISTS "/usr/local/opt/libomp")
|
||||||
|
set(HOMEBREW_PREFIX "/usr/local")
|
||||||
|
else()
|
||||||
|
message(FATAL_ERROR "Could not find libomp installation. Please install with: brew install libomp")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(OpenMP_C_FLAGS "-Xpreprocessor -fopenmp -I${HOMEBREW_PREFIX}/opt/libomp/include")
|
||||||
|
set(OpenMP_CXX_FLAGS "-Xpreprocessor -fopenmp -I${HOMEBREW_PREFIX}/opt/libomp/include")
|
||||||
set(OpenMP_C_LIB_NAMES "omp")
|
set(OpenMP_C_LIB_NAMES "omp")
|
||||||
set(OpenMP_CXX_LIB_NAMES "omp")
|
set(OpenMP_CXX_LIB_NAMES "omp")
|
||||||
set(OpenMP_omp_LIBRARY "/opt/homebrew/opt/libomp/lib/libomp.dylib")
|
set(OpenMP_omp_LIBRARY "${HOMEBREW_PREFIX}/opt/libomp/lib/libomp.dylib")
|
||||||
|
|
||||||
|
# Force use of system libc++ to avoid version mismatch
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||||
|
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stdlib=libc++")
|
||||||
|
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -stdlib=libc++")
|
||||||
|
|
||||||
|
# Set minimum macOS version for better compatibility
|
||||||
|
set(CMAKE_OSX_DEPLOYMENT_TARGET "11.0" CACHE STRING "Minimum macOS version")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Use system ZeroMQ instead of building from source
|
# Use system ZeroMQ instead of building from source
|
||||||
@@ -32,9 +49,28 @@ set(BUILD_TESTING OFF CACHE BOOL "" FORCE)
|
|||||||
set(FAISS_ENABLE_C_API OFF CACHE BOOL "" FORCE)
|
set(FAISS_ENABLE_C_API OFF CACHE BOOL "" FORCE)
|
||||||
set(FAISS_OPT_LEVEL "generic" CACHE STRING "" FORCE)
|
set(FAISS_OPT_LEVEL "generic" CACHE STRING "" FORCE)
|
||||||
|
|
||||||
# Disable additional SIMD versions to speed up compilation
|
# Disable x86-specific SIMD optimizations (important for ARM64 compatibility)
|
||||||
set(FAISS_ENABLE_AVX2 OFF CACHE BOOL "" FORCE)
|
set(FAISS_ENABLE_AVX2 OFF CACHE BOOL "" FORCE)
|
||||||
set(FAISS_ENABLE_AVX512 OFF CACHE BOOL "" FORCE)
|
set(FAISS_ENABLE_AVX512 OFF CACHE BOOL "" FORCE)
|
||||||
|
set(FAISS_ENABLE_SSE4_1 OFF CACHE BOOL "" FORCE)
|
||||||
|
|
||||||
|
# ARM64-specific configuration
|
||||||
|
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|arm64")
|
||||||
|
message(STATUS "Configuring Faiss for ARM64 architecture")
|
||||||
|
|
||||||
|
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||||
|
# Use SVE optimization level for ARM64 Linux (as seen in Faiss conda build)
|
||||||
|
set(FAISS_OPT_LEVEL "sve" CACHE STRING "" FORCE)
|
||||||
|
message(STATUS "Setting FAISS_OPT_LEVEL to 'sve' for ARM64 Linux")
|
||||||
|
else()
|
||||||
|
# Use generic optimization for other ARM64 platforms (like macOS)
|
||||||
|
set(FAISS_OPT_LEVEL "generic" CACHE STRING "" FORCE)
|
||||||
|
message(STATUS "Setting FAISS_OPT_LEVEL to 'generic' for ARM64 ${CMAKE_SYSTEM_NAME}")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# ARM64 compatibility: Faiss submodule has been modified to fix x86 header inclusion
|
||||||
|
message(STATUS "Using ARM64-compatible Faiss submodule")
|
||||||
|
endif()
|
||||||
|
|
||||||
# Additional optimization options from INSTALL.md
|
# Additional optimization options from INSTALL.md
|
||||||
set(CMAKE_BUILD_TYPE "Release" CACHE STRING "" FORCE)
|
set(CMAKE_BUILD_TYPE "Release" CACHE STRING "" FORCE)
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
from . import hnsw_backend
|
from . import hnsw_backend as hnsw_backend
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,19 +1,20 @@
|
|||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Dict, Any, List, Literal, Optional
|
|
||||||
import shutil
|
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Literal, Optional
|
||||||
|
|
||||||
from leann.searcher_base import BaseSearcher
|
import numpy as np
|
||||||
from .convert_to_csr import convert_hnsw_graph_to_csr
|
|
||||||
|
|
||||||
from leann.registry import register_backend
|
|
||||||
from leann.interface import (
|
from leann.interface import (
|
||||||
LeannBackendFactoryInterface,
|
|
||||||
LeannBackendBuilderInterface,
|
LeannBackendBuilderInterface,
|
||||||
|
LeannBackendFactoryInterface,
|
||||||
LeannBackendSearcherInterface,
|
LeannBackendSearcherInterface,
|
||||||
)
|
)
|
||||||
|
from leann.registry import register_backend
|
||||||
|
from leann.searcher_base import BaseSearcher
|
||||||
|
|
||||||
|
from .convert_to_csr import convert_hnsw_graph_to_csr, prune_hnsw_embeddings_inplace
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -28,6 +29,12 @@ def get_metric_map():
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_l2(data: np.ndarray) -> np.ndarray:
|
||||||
|
norms = np.linalg.norm(data, axis=1, keepdims=True)
|
||||||
|
norms[norms == 0] = 1 # Avoid division by zero
|
||||||
|
return data / norms
|
||||||
|
|
||||||
|
|
||||||
@register_backend("hnsw")
|
@register_backend("hnsw")
|
||||||
class HNSWBackend(LeannBackendFactoryInterface):
|
class HNSWBackend(LeannBackendFactoryInterface):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -48,12 +55,15 @@ class HNSWBuilder(LeannBackendBuilderInterface):
|
|||||||
self.efConstruction = self.build_params.setdefault("efConstruction", 200)
|
self.efConstruction = self.build_params.setdefault("efConstruction", 200)
|
||||||
self.distance_metric = self.build_params.setdefault("distance_metric", "mips")
|
self.distance_metric = self.build_params.setdefault("distance_metric", "mips")
|
||||||
self.dimensions = self.build_params.get("dimensions")
|
self.dimensions = self.build_params.get("dimensions")
|
||||||
if not self.is_recompute:
|
if not self.is_recompute and self.is_compact:
|
||||||
if self.is_compact:
|
# Auto-correct: non-recompute requires non-compact storage for HNSW
|
||||||
# TODO: support this case @andy
|
logger.warning(
|
||||||
raise ValueError("is_recompute is False, but is_compact is True. This is not compatible now. change is compact to False and you can use the original HNSW index.")
|
"is_recompute=False requires non-compact HNSW. Forcing is_compact=False."
|
||||||
|
)
|
||||||
|
self.is_compact = False
|
||||||
|
self.build_params["is_compact"] = False
|
||||||
|
|
||||||
def build(self, data: np.ndarray, ids: List[str], index_path: str, **kwargs):
|
def build(self, data: np.ndarray, ids: list[str], index_path: str, **kwargs):
|
||||||
from . import faiss # type: ignore
|
from . import faiss # type: ignore
|
||||||
|
|
||||||
path = Path(index_path)
|
path = Path(index_path)
|
||||||
@@ -74,7 +84,7 @@ class HNSWBuilder(LeannBackendBuilderInterface):
|
|||||||
index.hnsw.efConstruction = self.efConstruction
|
index.hnsw.efConstruction = self.efConstruction
|
||||||
|
|
||||||
if self.distance_metric.lower() == "cosine":
|
if self.distance_metric.lower() == "cosine":
|
||||||
faiss.normalize_L2(data)
|
data = normalize_l2(data)
|
||||||
|
|
||||||
index.add(data.shape[0], faiss.swig_ptr(data))
|
index.add(data.shape[0], faiss.swig_ptr(data))
|
||||||
index_file = index_dir / f"{index_prefix}.index"
|
index_file = index_dir / f"{index_prefix}.index"
|
||||||
@@ -82,6 +92,8 @@ class HNSWBuilder(LeannBackendBuilderInterface):
|
|||||||
|
|
||||||
if self.is_compact:
|
if self.is_compact:
|
||||||
self._convert_to_csr(index_file)
|
self._convert_to_csr(index_file)
|
||||||
|
elif self.is_recompute:
|
||||||
|
prune_hnsw_embeddings_inplace(str(index_file))
|
||||||
|
|
||||||
def _convert_to_csr(self, index_file: Path):
|
def _convert_to_csr(self, index_file: Path):
|
||||||
"""Convert built index to CSR format"""
|
"""Convert built index to CSR format"""
|
||||||
@@ -99,16 +111,12 @@ class HNSWBuilder(LeannBackendBuilderInterface):
|
|||||||
# index_file_old = index_file.with_suffix(".old")
|
# index_file_old = index_file.with_suffix(".old")
|
||||||
# shutil.move(str(index_file), str(index_file_old))
|
# shutil.move(str(index_file), str(index_file_old))
|
||||||
shutil.move(str(csr_temp_file), str(index_file))
|
shutil.move(str(csr_temp_file), str(index_file))
|
||||||
logger.info(
|
logger.info(f"INFO: Replaced original index with {mode_str} version at '{index_file}'")
|
||||||
f"INFO: Replaced original index with {mode_str} version at '{index_file}'"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
# Clean up and fail fast
|
# Clean up and fail fast
|
||||||
if csr_temp_file.exists():
|
if csr_temp_file.exists():
|
||||||
os.remove(csr_temp_file)
|
os.remove(csr_temp_file)
|
||||||
raise RuntimeError(
|
raise RuntimeError("CSR conversion failed - cannot proceed with compact format")
|
||||||
"CSR conversion failed - cannot proceed with compact format"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class HNSWSearcher(BaseSearcher):
|
class HNSWSearcher(BaseSearcher):
|
||||||
@@ -120,15 +128,17 @@ class HNSWSearcher(BaseSearcher):
|
|||||||
)
|
)
|
||||||
from . import faiss # type: ignore
|
from . import faiss # type: ignore
|
||||||
|
|
||||||
self.distance_metric = self.meta.get("distance_metric", "mips").lower()
|
self.distance_metric = (
|
||||||
|
self.meta.get("backend_kwargs", {}).get("distance_metric", "mips").lower()
|
||||||
|
)
|
||||||
metric_enum = get_metric_map().get(self.distance_metric)
|
metric_enum = get_metric_map().get(self.distance_metric)
|
||||||
if metric_enum is None:
|
if metric_enum is None:
|
||||||
raise ValueError(f"Unsupported distance_metric '{self.distance_metric}'.")
|
raise ValueError(f"Unsupported distance_metric '{self.distance_metric}'.")
|
||||||
|
|
||||||
self.is_compact, self.is_pruned = (
|
backend_meta_kwargs = self.meta.get("backend_kwargs", {})
|
||||||
self.meta.get("is_compact", True),
|
self.is_compact = self.meta.get("is_compact", backend_meta_kwargs.get("is_compact", True))
|
||||||
self.meta.get("is_pruned", True),
|
default_pruned = backend_meta_kwargs.get("is_recompute", self.is_compact)
|
||||||
)
|
self.is_pruned = bool(self.meta.get("is_pruned", default_pruned))
|
||||||
|
|
||||||
index_file = self.index_dir / f"{self.index_path.stem}.index"
|
index_file = self.index_dir / f"{self.index_path.stem}.index"
|
||||||
if not index_file.exists():
|
if not index_file.exists():
|
||||||
@@ -154,7 +164,7 @@ class HNSWSearcher(BaseSearcher):
|
|||||||
pruning_strategy: Literal["global", "local", "proportional"] = "global",
|
pruning_strategy: Literal["global", "local", "proportional"] = "global",
|
||||||
batch_size: int = 0,
|
batch_size: int = 0,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> Dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Search for nearest neighbors using HNSW index.
|
Search for nearest neighbors using HNSW index.
|
||||||
|
|
||||||
@@ -178,28 +188,36 @@ class HNSWSearcher(BaseSearcher):
|
|||||||
"""
|
"""
|
||||||
from . import faiss # type: ignore
|
from . import faiss # type: ignore
|
||||||
|
|
||||||
if not recompute_embeddings:
|
if not recompute_embeddings and self.is_pruned:
|
||||||
if self.is_pruned:
|
raise RuntimeError(
|
||||||
raise RuntimeError("Recompute is required for pruned index.")
|
"Recompute is required for pruned/compact HNSW index. "
|
||||||
|
"Re-run search with --recompute, or rebuild with --no-recompute and --no-compact."
|
||||||
|
)
|
||||||
if recompute_embeddings:
|
if recompute_embeddings:
|
||||||
if zmq_port is None:
|
if zmq_port is None:
|
||||||
raise ValueError(
|
raise ValueError("zmq_port must be provided if recompute_embeddings is True")
|
||||||
"zmq_port must be provided if recompute_embeddings is True"
|
|
||||||
)
|
|
||||||
|
|
||||||
if query.dtype != np.float32:
|
if query.dtype != np.float32:
|
||||||
query = query.astype(np.float32)
|
query = query.astype(np.float32)
|
||||||
if self.distance_metric == "cosine":
|
if self.distance_metric == "cosine":
|
||||||
faiss.normalize_L2(query)
|
query = normalize_l2(query)
|
||||||
|
|
||||||
params = faiss.SearchParametersHNSW()
|
params = faiss.SearchParametersHNSW()
|
||||||
if zmq_port is not None:
|
if zmq_port is not None:
|
||||||
params.zmq_port = (
|
params.zmq_port = zmq_port # C++ code won't use this if recompute_embeddings is False
|
||||||
zmq_port # C++ code won't use this if recompute_embeddings is False
|
|
||||||
)
|
|
||||||
params.efSearch = complexity
|
params.efSearch = complexity
|
||||||
params.beam_size = beam_width
|
params.beam_size = beam_width
|
||||||
|
|
||||||
|
# For OpenAI embeddings with cosine distance, disable relative distance check
|
||||||
|
# This prevents early termination when all scores are in a narrow range
|
||||||
|
embedding_model = self.meta.get("embedding_model", "").lower()
|
||||||
|
if self.distance_metric == "cosine" and any(
|
||||||
|
openai_model in embedding_model for openai_model in ["text-embedding", "openai"]
|
||||||
|
):
|
||||||
|
params.check_relative_distance = False
|
||||||
|
else:
|
||||||
|
params.check_relative_distance = True
|
||||||
|
|
||||||
# PQ pruning: direct mapping to HNSW's pq_pruning_ratio
|
# PQ pruning: direct mapping to HNSW's pq_pruning_ratio
|
||||||
params.pq_pruning_ratio = prune_ratio
|
params.pq_pruning_ratio = prune_ratio
|
||||||
|
|
||||||
@@ -209,9 +227,7 @@ class HNSWSearcher(BaseSearcher):
|
|||||||
params.send_neigh_times_ratio = 0.0
|
params.send_neigh_times_ratio = 0.0
|
||||||
elif pruning_strategy == "proportional":
|
elif pruning_strategy == "proportional":
|
||||||
params.local_prune = False
|
params.local_prune = False
|
||||||
params.send_neigh_times_ratio = (
|
params.send_neigh_times_ratio = 1.0 # Any value > 1e-6 triggers proportional mode
|
||||||
1.0 # Any value > 1e-6 triggers proportional mode
|
|
||||||
)
|
|
||||||
else: # "global"
|
else: # "global"
|
||||||
params.local_prune = False
|
params.local_prune = False
|
||||||
params.send_neigh_times_ratio = 0.0
|
params.send_neigh_times_ratio = 0.0
|
||||||
@@ -223,6 +239,7 @@ class HNSWSearcher(BaseSearcher):
|
|||||||
distances = np.empty((batch_size_query, top_k), dtype=np.float32)
|
distances = np.empty((batch_size_query, top_k), dtype=np.float32)
|
||||||
labels = np.empty((batch_size_query, top_k), dtype=np.int64)
|
labels = np.empty((batch_size_query, top_k), dtype=np.int64)
|
||||||
|
|
||||||
|
search_time = time.time()
|
||||||
self._index.search(
|
self._index.search(
|
||||||
query.shape[0],
|
query.shape[0],
|
||||||
faiss.swig_ptr(query),
|
faiss.swig_ptr(query),
|
||||||
@@ -231,9 +248,8 @@ class HNSWSearcher(BaseSearcher):
|
|||||||
faiss.swig_ptr(labels),
|
faiss.swig_ptr(labels),
|
||||||
params,
|
params,
|
||||||
)
|
)
|
||||||
|
search_time = time.time() - search_time
|
||||||
string_labels = [
|
logger.info(f" Search time in HNSWSearcher.search() backend: {search_time} seconds")
|
||||||
[str(int_label) for int_label in batch_labels] for batch_labels in labels
|
string_labels = [[str(int_label) for int_label in batch_labels] for batch_labels in labels]
|
||||||
]
|
|
||||||
|
|
||||||
return {"labels": string_labels, "distances": distances}
|
return {"labels": string_labels, "distances": distances}
|
||||||
|
|||||||
@@ -3,17 +3,18 @@ HNSW-specific embedding server
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
import os
|
|
||||||
import zmq
|
|
||||||
import numpy as np
|
|
||||||
import msgpack
|
|
||||||
import json
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Any, Optional
|
||||||
import sys
|
|
||||||
import logging
|
import msgpack
|
||||||
|
import numpy as np
|
||||||
|
import zmq
|
||||||
|
|
||||||
# Set up logging based on environment variable
|
# Set up logging based on environment variable
|
||||||
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
||||||
@@ -23,13 +24,35 @@ logger = logging.getLogger(__name__)
|
|||||||
log_level = getattr(logging, LOG_LEVEL, logging.WARNING)
|
log_level = getattr(logging, LOG_LEVEL, logging.WARNING)
|
||||||
logger.setLevel(log_level)
|
logger.setLevel(log_level)
|
||||||
|
|
||||||
# Ensure we have a handler if none exists
|
# Ensure we have handlers if none exist
|
||||||
if not logger.handlers:
|
if not logger.handlers:
|
||||||
handler = logging.StreamHandler()
|
stream_handler = logging.StreamHandler()
|
||||||
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
|
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
|
||||||
handler.setFormatter(formatter)
|
stream_handler.setFormatter(formatter)
|
||||||
logger.addHandler(handler)
|
logger.addHandler(stream_handler)
|
||||||
logger.propagate = False
|
|
||||||
|
log_path = os.getenv("LEANN_HNSW_LOG_PATH")
|
||||||
|
if log_path:
|
||||||
|
try:
|
||||||
|
file_handler = logging.FileHandler(log_path, mode="a", encoding="utf-8")
|
||||||
|
file_formatter = logging.Formatter(
|
||||||
|
"%(asctime)s - %(levelname)s - [pid=%(process)d] %(message)s"
|
||||||
|
)
|
||||||
|
file_handler.setFormatter(file_formatter)
|
||||||
|
logger.addHandler(file_handler)
|
||||||
|
except Exception as exc: # pragma: no cover - best effort logging
|
||||||
|
logger.warning(f"Failed to attach file handler for log path {log_path}: {exc}")
|
||||||
|
|
||||||
|
logger.propagate = False
|
||||||
|
|
||||||
|
_RAW_PROVIDER_OPTIONS = os.getenv("LEANN_EMBEDDING_OPTIONS")
|
||||||
|
try:
|
||||||
|
PROVIDER_OPTIONS: dict[str, Any] = (
|
||||||
|
json.loads(_RAW_PROVIDER_OPTIONS) if _RAW_PROVIDER_OPTIONS else {}
|
||||||
|
)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
logger.warning("Failed to parse LEANN_EMBEDDING_OPTIONS; ignoring provider options")
|
||||||
|
PROVIDER_OPTIONS = {}
|
||||||
|
|
||||||
|
|
||||||
def create_hnsw_embedding_server(
|
def create_hnsw_embedding_server(
|
||||||
@@ -52,8 +75,8 @@ def create_hnsw_embedding_server(
|
|||||||
sys.path.insert(0, str(leann_core_path))
|
sys.path.insert(0, str(leann_core_path))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from leann.embedding_compute import compute_embeddings
|
|
||||||
from leann.api import PassageManager
|
from leann.api import PassageManager
|
||||||
|
from leann.embedding_compute import compute_embeddings
|
||||||
|
|
||||||
logger.info("Successfully imported unified embedding computation module")
|
logger.info("Successfully imported unified embedding computation module")
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
@@ -78,220 +101,331 @@ def create_hnsw_embedding_server(
|
|||||||
raise ValueError("Only metadata files (.meta.json) are supported")
|
raise ValueError("Only metadata files (.meta.json) are supported")
|
||||||
|
|
||||||
# Load metadata to get passage sources
|
# Load metadata to get passage sources
|
||||||
with open(passages_file, "r") as f:
|
with open(passages_file) as f:
|
||||||
meta = json.load(f)
|
meta = json.load(f)
|
||||||
|
|
||||||
# Convert relative paths to absolute paths based on metadata file location
|
# Let PassageManager handle path resolution uniformly. It supports fallback order:
|
||||||
metadata_dir = Path(
|
# 1) path/index_path; 2) *_relative; 3) standard siblings next to meta
|
||||||
passages_file
|
passages = PassageManager(meta["passage_sources"], metadata_file_path=passages_file)
|
||||||
).parent.parent # Go up one level from the metadata file
|
# Dimension from metadata for shaping responses
|
||||||
passage_sources = []
|
|
||||||
for source in meta["passage_sources"]:
|
|
||||||
source_copy = source.copy()
|
|
||||||
# Convert relative paths to absolute paths
|
|
||||||
if not Path(source_copy["path"]).is_absolute():
|
|
||||||
source_copy["path"] = str(metadata_dir / source_copy["path"])
|
|
||||||
if not Path(source_copy["index_path"]).is_absolute():
|
|
||||||
source_copy["index_path"] = str(metadata_dir / source_copy["index_path"])
|
|
||||||
passage_sources.append(source_copy)
|
|
||||||
|
|
||||||
passages = PassageManager(passage_sources)
|
|
||||||
logger.info(
|
|
||||||
f"Loaded PassageManager with {len(passages.global_offset_map)} passages from metadata"
|
|
||||||
)
|
|
||||||
|
|
||||||
def zmq_server_thread():
|
|
||||||
"""ZMQ server thread"""
|
|
||||||
context = zmq.Context()
|
|
||||||
socket = context.socket(zmq.REP)
|
|
||||||
socket.bind(f"tcp://*:{zmq_port}")
|
|
||||||
logger.info(f"HNSW ZMQ server listening on port {zmq_port}")
|
|
||||||
|
|
||||||
socket.setsockopt(zmq.RCVTIMEO, 300000)
|
|
||||||
socket.setsockopt(zmq.SNDTIMEO, 300000)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
try:
|
||||||
message_bytes = socket.recv()
|
embedding_dim: int = int(meta.get("dimensions", 0))
|
||||||
logger.debug(f"Received ZMQ request of size {len(message_bytes)} bytes")
|
except Exception:
|
||||||
|
embedding_dim = 0
|
||||||
|
logger.info(f"Loaded PassageManager with {len(passages)} passages from metadata")
|
||||||
|
|
||||||
|
# (legacy ZMQ thread removed; using shutdown-capable server only)
|
||||||
|
|
||||||
|
def zmq_server_thread_with_shutdown(shutdown_event):
|
||||||
|
"""ZMQ server thread that respects shutdown signal.
|
||||||
|
|
||||||
|
Creates its own REP socket bound to zmq_port and polls with timeouts
|
||||||
|
to allow graceful shutdown.
|
||||||
|
"""
|
||||||
|
logger.info("ZMQ server thread started with shutdown support")
|
||||||
|
|
||||||
|
context = zmq.Context()
|
||||||
|
rep_socket = context.socket(zmq.REP)
|
||||||
|
rep_socket.bind(f"tcp://*:{zmq_port}")
|
||||||
|
logger.info(f"HNSW ZMQ REP server listening on port {zmq_port}")
|
||||||
|
rep_socket.setsockopt(zmq.RCVTIMEO, 1000)
|
||||||
|
# Keep sends from blocking during shutdown; fail fast and drop on close
|
||||||
|
rep_socket.setsockopt(zmq.SNDTIMEO, 1000)
|
||||||
|
rep_socket.setsockopt(zmq.LINGER, 0)
|
||||||
|
|
||||||
|
# Track last request type/length for shape-correct fallbacks
|
||||||
|
last_request_type = "unknown" # 'text' | 'distance' | 'embedding' | 'unknown'
|
||||||
|
last_request_length = 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
while not shutdown_event.is_set():
|
||||||
|
try:
|
||||||
e2e_start = time.time()
|
e2e_start = time.time()
|
||||||
request_payload = msgpack.unpackb(message_bytes)
|
logger.debug("🔍 Waiting for ZMQ message...")
|
||||||
|
request_bytes = rep_socket.recv()
|
||||||
|
|
||||||
# Handle direct text embedding request
|
# Rest of the processing logic (same as original)
|
||||||
if isinstance(request_payload, list) and len(request_payload) > 0:
|
request = msgpack.unpackb(request_bytes)
|
||||||
# Check if this is a direct text request (list of strings)
|
|
||||||
if all(isinstance(item, str) for item in request_payload):
|
|
||||||
logger.info(
|
|
||||||
f"Processing direct text embedding request for {len(request_payload)} texts in {embedding_mode} mode"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Use unified embedding computation (now with model caching)
|
if len(request) == 1 and request[0] == "__QUERY_MODEL__":
|
||||||
embeddings = compute_embeddings(
|
response_bytes = msgpack.packb([model_name])
|
||||||
request_payload, model_name, mode=embedding_mode
|
rep_socket.send(response_bytes)
|
||||||
)
|
|
||||||
|
|
||||||
response = embeddings.tolist()
|
|
||||||
socket.send(msgpack.packb(response))
|
|
||||||
e2e_end = time.time()
|
|
||||||
logger.info(
|
|
||||||
f"⏱️ Text embedding E2E time: {e2e_end - e2e_start:.6f}s"
|
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Handle distance calculation requests
|
# Handle direct text embedding request
|
||||||
if (
|
if (
|
||||||
isinstance(request_payload, list)
|
isinstance(request, list)
|
||||||
and len(request_payload) == 2
|
and request
|
||||||
and isinstance(request_payload[0], list)
|
and all(isinstance(item, str) for item in request)
|
||||||
and isinstance(request_payload[1], list)
|
|
||||||
):
|
):
|
||||||
node_ids = request_payload[0]
|
last_request_type = "text"
|
||||||
query_vector = np.array(request_payload[1], dtype=np.float32)
|
last_request_length = len(request)
|
||||||
|
embeddings = compute_embeddings(
|
||||||
|
request,
|
||||||
|
model_name,
|
||||||
|
mode=embedding_mode,
|
||||||
|
provider_options=PROVIDER_OPTIONS,
|
||||||
|
)
|
||||||
|
rep_socket.send(msgpack.packb(embeddings.tolist()))
|
||||||
|
e2e_end = time.time()
|
||||||
|
logger.info(f"⏱️ Text embedding E2E time: {e2e_end - e2e_start:.6f}s")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Handle distance calculation request: [[ids], [query_vector]]
|
||||||
|
if (
|
||||||
|
isinstance(request, list)
|
||||||
|
and len(request) == 2
|
||||||
|
and isinstance(request[0], list)
|
||||||
|
and isinstance(request[1], list)
|
||||||
|
):
|
||||||
|
node_ids = request[0]
|
||||||
|
# Handle nested [[ids]] shape defensively
|
||||||
|
if len(node_ids) == 1 and isinstance(node_ids[0], list):
|
||||||
|
node_ids = node_ids[0]
|
||||||
|
query_vector = np.array(request[1], dtype=np.float32)
|
||||||
|
last_request_type = "distance"
|
||||||
|
last_request_length = len(node_ids)
|
||||||
|
|
||||||
logger.debug("Distance calculation request received")
|
logger.debug("Distance calculation request received")
|
||||||
logger.debug(f" Node IDs: {node_ids}")
|
logger.debug(f" Node IDs: {node_ids}")
|
||||||
logger.debug(f" Query vector dim: {len(query_vector)}")
|
logger.debug(f" Query vector dim: {len(query_vector)}")
|
||||||
|
|
||||||
# Get embeddings for node IDs
|
# Gather texts for found ids
|
||||||
texts = []
|
texts: list[str] = []
|
||||||
for nid in node_ids:
|
found_indices: list[int] = []
|
||||||
|
for idx, nid in enumerate(node_ids):
|
||||||
try:
|
try:
|
||||||
passage_data = passages.get_passage(str(nid))
|
passage_data = passages.get_passage(str(nid))
|
||||||
txt = passage_data["text"]
|
txt = passage_data.get("text", "")
|
||||||
|
if isinstance(txt, str) and len(txt) > 0:
|
||||||
texts.append(txt)
|
texts.append(txt)
|
||||||
|
found_indices.append(idx)
|
||||||
|
else:
|
||||||
|
logger.error(f"Empty text for passage ID {nid}")
|
||||||
except KeyError:
|
except KeyError:
|
||||||
logger.error(f"Passage ID {nid} not found")
|
logger.error(f"Passage ID {nid} not found")
|
||||||
raise RuntimeError(
|
|
||||||
f"FATAL: Passage with ID {nid} not found"
|
|
||||||
)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Exception looking up passage ID {nid}: {e}")
|
logger.error(f"Exception looking up passage ID {nid}: {e}")
|
||||||
raise
|
|
||||||
|
|
||||||
# Process embeddings
|
# Prepare full-length response with large sentinel values
|
||||||
|
large_distance = 1e9
|
||||||
|
response_distances = [large_distance] * len(node_ids)
|
||||||
|
|
||||||
|
if texts:
|
||||||
|
try:
|
||||||
embeddings = compute_embeddings(
|
embeddings = compute_embeddings(
|
||||||
texts, model_name, mode=embedding_mode
|
texts,
|
||||||
|
model_name,
|
||||||
|
mode=embedding_mode,
|
||||||
|
provider_options=PROVIDER_OPTIONS,
|
||||||
)
|
)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Computed embeddings for {len(texts)} texts, shape: {embeddings.shape}"
|
f"Computed embeddings for {len(texts)} texts, shape: {embeddings.shape}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Calculate distances
|
|
||||||
if distance_metric == "l2":
|
if distance_metric == "l2":
|
||||||
distances = np.sum(
|
partial = np.sum(
|
||||||
np.square(embeddings - query_vector.reshape(1, -1)), axis=1
|
np.square(embeddings - query_vector.reshape(1, -1)), axis=1
|
||||||
)
|
)
|
||||||
else: # mips or cosine
|
else: # mips or cosine
|
||||||
distances = -np.dot(embeddings, query_vector)
|
partial = -np.dot(embeddings, query_vector)
|
||||||
|
|
||||||
response_payload = distances.flatten().tolist()
|
for pos, dval in zip(found_indices, partial.flatten().tolist()):
|
||||||
response_bytes = msgpack.packb(
|
response_distances[pos] = float(dval)
|
||||||
[response_payload], use_single_float=True
|
except Exception as e:
|
||||||
)
|
logger.error(f"Distance computation error, using sentinels: {e}")
|
||||||
logger.debug(
|
|
||||||
f"Sending distance response with {len(distances)} distances"
|
|
||||||
)
|
|
||||||
|
|
||||||
socket.send(response_bytes)
|
# Send response in expected shape [[distances]]
|
||||||
|
rep_socket.send(msgpack.packb([response_distances], use_single_float=True))
|
||||||
e2e_end = time.time()
|
e2e_end = time.time()
|
||||||
logger.info(
|
logger.info(f"⏱️ Distance calculation E2E time: {e2e_end - e2e_start:.6f}s")
|
||||||
f"⏱️ Distance calculation E2E time: {e2e_end - e2e_start:.6f}s"
|
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Standard embedding request (passage ID lookup)
|
# Fallback: treat as embedding-by-id request
|
||||||
if (
|
if (
|
||||||
not isinstance(request_payload, list)
|
isinstance(request, list)
|
||||||
or len(request_payload) != 1
|
and len(request) == 1
|
||||||
or not isinstance(request_payload[0], list)
|
and isinstance(request[0], list)
|
||||||
):
|
):
|
||||||
logger.error(
|
node_ids = request[0]
|
||||||
f"Invalid MessagePack request format. Expected [[ids...]] or [texts...], got: {type(request_payload)}"
|
elif isinstance(request, list):
|
||||||
)
|
node_ids = request
|
||||||
socket.send(msgpack.packb([[], []]))
|
else:
|
||||||
continue
|
node_ids = []
|
||||||
|
last_request_type = "embedding"
|
||||||
|
last_request_length = len(node_ids)
|
||||||
|
logger.info(f"ZMQ received {len(node_ids)} node IDs for embedding fetch")
|
||||||
|
|
||||||
node_ids = request_payload[0]
|
# Preallocate zero-filled flat data for robustness
|
||||||
logger.debug(f"Request for {len(node_ids)} node embeddings")
|
if embedding_dim <= 0:
|
||||||
|
dims = [0, 0]
|
||||||
|
flat_data: list[float] = []
|
||||||
|
else:
|
||||||
|
dims = [len(node_ids), embedding_dim]
|
||||||
|
flat_data = [0.0] * (dims[0] * dims[1])
|
||||||
|
|
||||||
# Look up texts by node IDs
|
# Collect texts for found ids
|
||||||
texts = []
|
texts: list[str] = []
|
||||||
for nid in node_ids:
|
found_indices: list[int] = []
|
||||||
|
for idx, nid in enumerate(node_ids):
|
||||||
try:
|
try:
|
||||||
passage_data = passages.get_passage(str(nid))
|
passage_data = passages.get_passage(str(nid))
|
||||||
txt = passage_data["text"]
|
txt = passage_data.get("text", "")
|
||||||
if not txt:
|
if isinstance(txt, str) and len(txt) > 0:
|
||||||
raise RuntimeError(
|
|
||||||
f"FATAL: Empty text for passage ID {nid}"
|
|
||||||
)
|
|
||||||
texts.append(txt)
|
texts.append(txt)
|
||||||
|
found_indices.append(idx)
|
||||||
|
else:
|
||||||
|
logger.error(f"Empty text for passage ID {nid}")
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise RuntimeError(f"FATAL: Passage with ID {nid} not found")
|
logger.error(f"Passage with ID {nid} not found")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Exception looking up passage ID {nid}: {e}")
|
logger.error(f"Exception looking up passage ID {nid}: {e}")
|
||||||
raise
|
|
||||||
|
|
||||||
# Process embeddings
|
if texts:
|
||||||
embeddings = compute_embeddings(texts, model_name, mode=embedding_mode)
|
try:
|
||||||
|
embeddings = compute_embeddings(
|
||||||
|
texts,
|
||||||
|
model_name,
|
||||||
|
mode=embedding_mode,
|
||||||
|
provider_options=PROVIDER_OPTIONS,
|
||||||
|
)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Computed embeddings for {len(texts)} texts, shape: {embeddings.shape}"
|
f"Computed embeddings for {len(texts)} texts, shape: {embeddings.shape}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Serialization and response
|
|
||||||
if np.isnan(embeddings).any() or np.isinf(embeddings).any():
|
if np.isnan(embeddings).any() or np.isinf(embeddings).any():
|
||||||
logger.error(
|
logger.error(
|
||||||
f"NaN or Inf detected in embeddings! Requested IDs: {node_ids[:5]}..."
|
f"NaN or Inf detected in embeddings! Requested IDs: {node_ids[:5]}..."
|
||||||
)
|
)
|
||||||
assert False
|
dims = [0, embedding_dim]
|
||||||
|
flat_data = []
|
||||||
hidden_contiguous_f32 = np.ascontiguousarray(
|
else:
|
||||||
embeddings, dtype=np.float32
|
emb_f32 = np.ascontiguousarray(embeddings, dtype=np.float32)
|
||||||
)
|
flat = emb_f32.flatten().tolist()
|
||||||
response_payload = [
|
for j, pos in enumerate(found_indices):
|
||||||
list(hidden_contiguous_f32.shape),
|
start = pos * embedding_dim
|
||||||
hidden_contiguous_f32.flatten().tolist(),
|
end = start + embedding_dim
|
||||||
|
if end <= len(flat_data):
|
||||||
|
flat_data[start:end] = flat[
|
||||||
|
j * embedding_dim : (j + 1) * embedding_dim
|
||||||
]
|
]
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Embedding computation error, returning zeros: {e}")
|
||||||
|
|
||||||
|
response_payload = [dims, flat_data]
|
||||||
response_bytes = msgpack.packb(response_payload, use_single_float=True)
|
response_bytes = msgpack.packb(response_payload, use_single_float=True)
|
||||||
|
|
||||||
socket.send(response_bytes)
|
rep_socket.send(response_bytes)
|
||||||
e2e_end = time.time()
|
e2e_end = time.time()
|
||||||
logger.info(f"⏱️ ZMQ E2E time: {e2e_end - e2e_start:.6f}s")
|
logger.info(f"⏱️ ZMQ E2E time: {e2e_end - e2e_start:.6f}s")
|
||||||
|
|
||||||
except zmq.Again:
|
except zmq.Again:
|
||||||
logger.debug("ZMQ socket timeout, continuing to listen")
|
# Timeout - check shutdown_event and continue
|
||||||
continue
|
continue
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if not shutdown_event.is_set():
|
||||||
logger.error(f"Error in ZMQ server loop: {e}")
|
logger.error(f"Error in ZMQ server loop: {e}")
|
||||||
import traceback
|
# Shape-correct fallback
|
||||||
|
try:
|
||||||
|
if last_request_type == "distance":
|
||||||
|
large_distance = 1e9
|
||||||
|
fallback_len = max(0, int(last_request_length))
|
||||||
|
safe = [[large_distance] * fallback_len]
|
||||||
|
elif last_request_type == "embedding":
|
||||||
|
bsz = max(0, int(last_request_length))
|
||||||
|
dim = max(0, int(embedding_dim))
|
||||||
|
safe = (
|
||||||
|
[[bsz, dim], [0.0] * (bsz * dim)] if dim > 0 else [[0, 0], []]
|
||||||
|
)
|
||||||
|
elif last_request_type == "text":
|
||||||
|
safe = [] # direct text embeddings expectation is a flat list
|
||||||
|
else:
|
||||||
|
safe = [[0, int(embedding_dim) if embedding_dim > 0 else 0], []]
|
||||||
|
rep_socket.send(msgpack.packb(safe, use_single_float=True))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
logger.info("Shutdown in progress, ignoring ZMQ error")
|
||||||
|
break
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
rep_socket.close(0)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
context.term()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
traceback.print_exc()
|
logger.info("ZMQ server thread exiting gracefully")
|
||||||
socket.send(msgpack.packb([[], []]))
|
|
||||||
|
|
||||||
zmq_thread = threading.Thread(target=zmq_server_thread, daemon=True)
|
# Add shutdown coordination
|
||||||
|
shutdown_event = threading.Event()
|
||||||
|
|
||||||
|
def shutdown_zmq_server():
|
||||||
|
"""Gracefully shutdown ZMQ server."""
|
||||||
|
logger.info("Initiating graceful shutdown...")
|
||||||
|
shutdown_event.set()
|
||||||
|
|
||||||
|
if zmq_thread.is_alive():
|
||||||
|
logger.info("Waiting for ZMQ thread to finish...")
|
||||||
|
zmq_thread.join(timeout=5)
|
||||||
|
if zmq_thread.is_alive():
|
||||||
|
logger.warning("ZMQ thread did not finish in time")
|
||||||
|
|
||||||
|
# Clean up ZMQ resources
|
||||||
|
try:
|
||||||
|
# Note: socket and context are cleaned up by thread exit
|
||||||
|
logger.info("ZMQ resources cleaned up")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error cleaning ZMQ resources: {e}")
|
||||||
|
|
||||||
|
# Clean up other resources
|
||||||
|
try:
|
||||||
|
import gc
|
||||||
|
|
||||||
|
gc.collect()
|
||||||
|
logger.info("Additional resources cleaned up")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error cleaning additional resources: {e}")
|
||||||
|
|
||||||
|
logger.info("Graceful shutdown completed")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
# Register signal handlers within this function scope
|
||||||
|
import signal
|
||||||
|
|
||||||
|
def signal_handler(sig, frame):
|
||||||
|
logger.info(f"Received signal {sig}, shutting down gracefully...")
|
||||||
|
shutdown_zmq_server()
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, signal_handler)
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
|
||||||
|
# Pass shutdown_event to ZMQ thread
|
||||||
|
zmq_thread = threading.Thread(
|
||||||
|
target=lambda: zmq_server_thread_with_shutdown(shutdown_event),
|
||||||
|
daemon=False, # Not daemon - we want to wait for it
|
||||||
|
)
|
||||||
zmq_thread.start()
|
zmq_thread.start()
|
||||||
logger.info(f"Started HNSW ZMQ server thread on port {zmq_port}")
|
logger.info(f"Started HNSW ZMQ server thread on port {zmq_port}")
|
||||||
|
|
||||||
# Keep the main thread alive
|
# Keep the main thread alive
|
||||||
try:
|
try:
|
||||||
while True:
|
while not shutdown_event.is_set():
|
||||||
time.sleep(1)
|
time.sleep(0.1) # Check shutdown more frequently
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
logger.info("HNSW Server shutting down...")
|
logger.info("HNSW Server shutting down...")
|
||||||
|
shutdown_zmq_server()
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# If we reach here, shutdown was triggered by signal
|
||||||
|
logger.info("Main loop exited, process should be shutting down")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import signal
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
def signal_handler(sig, frame):
|
# Signal handlers are now registered within create_hnsw_embedding_server
|
||||||
logger.info(f"Received signal {sig}, shutting down gracefully...")
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
# Register signal handlers for graceful shutdown
|
|
||||||
signal.signal(signal.SIGTERM, signal_handler)
|
|
||||||
signal.signal(signal.SIGINT, signal_handler)
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description="HNSW Embedding service")
|
parser = argparse.ArgumentParser(description="HNSW Embedding service")
|
||||||
parser.add_argument("--zmq-port", type=int, default=5555, help="ZMQ port to run on")
|
parser.add_argument("--zmq-port", type=int, default=5555, help="ZMQ port to run on")
|
||||||
@@ -313,7 +447,7 @@ if __name__ == "__main__":
|
|||||||
"--embedding-mode",
|
"--embedding-mode",
|
||||||
type=str,
|
type=str,
|
||||||
default="sentence-transformers",
|
default="sentence-transformers",
|
||||||
choices=["sentence-transformers", "openai", "mlx"],
|
choices=["sentence-transformers", "openai", "mlx", "ollama"],
|
||||||
help="Embedding backend mode",
|
help="Embedding backend mode",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -6,10 +6,10 @@ build-backend = "scikit_build_core.build"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "leann-backend-hnsw"
|
name = "leann-backend-hnsw"
|
||||||
version = "0.1.11"
|
version = "0.3.4"
|
||||||
description = "Custom-built HNSW (Faiss) backend for the Leann toolkit."
|
description = "Custom-built HNSW (Faiss) backend for the Leann toolkit."
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"leann-core==0.1.11",
|
"leann-core==0.3.4",
|
||||||
"numpy",
|
"numpy",
|
||||||
"pyzmq>=23.0.0",
|
"pyzmq>=23.0.0",
|
||||||
"msgpack>=1.0.0",
|
"msgpack>=1.0.0",
|
||||||
@@ -22,6 +22,8 @@ cmake.build-type = "Release"
|
|||||||
build.verbose = true
|
build.verbose = true
|
||||||
build.tool-args = ["-j8"]
|
build.tool-args = ["-j8"]
|
||||||
|
|
||||||
# CMake definitions to optimize compilation
|
# CMake definitions to optimize compilation and find Homebrew packages
|
||||||
[tool.scikit-build.cmake.define]
|
[tool.scikit-build.cmake.define]
|
||||||
CMAKE_BUILD_PARALLEL_LEVEL = "8"
|
CMAKE_BUILD_PARALLEL_LEVEL = "8"
|
||||||
|
CMAKE_PREFIX_PATH = {env = "CMAKE_PREFIX_PATH"}
|
||||||
|
OpenMP_ROOT = {env = "OpenMP_ROOT"}
|
||||||
|
|||||||
Submodule packages/leann-backend-hnsw/third_party/faiss updated: ff22e2c86b...1d51f0c074
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "leann-core"
|
name = "leann-core"
|
||||||
version = "0.1.11"
|
version = "0.3.4"
|
||||||
description = "Core API and plugin system for LEANN"
|
description = "Core API and plugin system for LEANN"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
@@ -20,11 +20,33 @@ dependencies = [
|
|||||||
"torch>=2.0.0",
|
"torch>=2.0.0",
|
||||||
"sentence-transformers>=2.2.0",
|
"sentence-transformers>=2.2.0",
|
||||||
"llama-index-core>=0.12.0",
|
"llama-index-core>=0.12.0",
|
||||||
|
"llama-index-readers-file>=0.4.0", # Essential for document reading
|
||||||
|
"llama-index-embeddings-huggingface>=0.5.5", # For embeddings
|
||||||
"python-dotenv>=1.0.0",
|
"python-dotenv>=1.0.0",
|
||||||
|
"openai>=1.0.0",
|
||||||
|
"huggingface-hub>=0.20.0",
|
||||||
|
"transformers>=4.30.0",
|
||||||
|
"requests>=2.25.0",
|
||||||
|
"accelerate>=0.20.0",
|
||||||
|
"PyPDF2>=3.0.0",
|
||||||
|
"pymupdf>=1.23.0",
|
||||||
|
"pdfplumber>=0.10.0",
|
||||||
|
"nbconvert>=7.0.0", # For .ipynb file support
|
||||||
|
"gitignore-parser>=0.1.12", # For proper .gitignore handling
|
||||||
|
"mlx>=0.26.3; sys_platform == 'darwin' and platform_machine == 'arm64'",
|
||||||
|
"mlx-lm>=0.26.0; sys_platform == 'darwin' and platform_machine == 'arm64'",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
colab = [
|
||||||
|
"torch>=2.0.0,<3.0.0", # Limit torch version to avoid conflicts
|
||||||
|
"transformers>=4.30.0,<5.0.0", # Limit transformers version
|
||||||
|
"accelerate>=0.20.0,<1.0.0", # Limit accelerate version
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
leann = "leann.cli:main"
|
leann = "leann.cli:main"
|
||||||
|
leann_mcp = "leann.mcp:main"
|
||||||
|
|
||||||
[tool.setuptools.packages.find]
|
[tool.setuptools.packages.find]
|
||||||
where = ["src"]
|
where = ["src"]
|
||||||
@@ -8,10 +8,14 @@ if platform.system() == "Darwin":
|
|||||||
os.environ["MKL_NUM_THREADS"] = "1"
|
os.environ["MKL_NUM_THREADS"] = "1"
|
||||||
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
||||||
os.environ["KMP_BLOCKTIME"] = "0"
|
os.environ["KMP_BLOCKTIME"] = "0"
|
||||||
|
# Additional fixes for PyTorch/sentence-transformers on macOS ARM64 only in CI
|
||||||
|
if os.environ.get("CI") == "true":
|
||||||
|
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "0"
|
||||||
|
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||||
|
|
||||||
from .api import LeannBuilder, LeannChat, LeannSearcher
|
from .api import LeannBuilder, LeannChat, LeannSearcher
|
||||||
from .registry import BACKEND_REGISTRY, autodiscover_backends
|
from .registry import BACKEND_REGISTRY, autodiscover_backends
|
||||||
|
|
||||||
autodiscover_backends()
|
autodiscover_backends()
|
||||||
|
|
||||||
__all__ = ["LeannBuilder", "LeannSearcher", "LeannChat", "BACKEND_REGISTRY"]
|
__all__ = ["BACKEND_REGISTRY", "LeannBuilder", "LeannChat", "LeannSearcher"]
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -4,23 +4,27 @@ This file contains the chat generation logic for the LEANN project,
|
|||||||
supporting different backends like Ollama, Hugging Face Transformers, and a simulation mode.
|
supporting different backends like Ollama, Hugging Face Transformers, and a simulation mode.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
import difflib
|
||||||
from typing import Dict, Any, Optional, List
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import difflib
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
|
from .settings import resolve_ollama_host, resolve_openai_api_key, resolve_openai_base_url
|
||||||
|
|
||||||
# Configure logging
|
# Configure logging
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def check_ollama_models() -> List[str]:
|
def check_ollama_models(host: str) -> list[str]:
|
||||||
"""Check available Ollama models and return a list"""
|
"""Check available Ollama models and return a list"""
|
||||||
try:
|
try:
|
||||||
import requests
|
import requests
|
||||||
response = requests.get("http://localhost:11434/api/tags", timeout=5)
|
|
||||||
|
response = requests.get(f"{host}/api/tags", timeout=5)
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
data = response.json()
|
data = response.json()
|
||||||
return [model["name"] for model in data.get("models", [])]
|
return [model["name"] for model in data.get("models", [])]
|
||||||
@@ -36,12 +40,13 @@ def check_ollama_model_exists_remotely(model_name: str) -> tuple[bool, list[str]
|
|||||||
(model_exists, available_tags): bool and list of matching tags
|
(model_exists, available_tags): bool and list of matching tags
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
import requests
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
# Split model name and tag
|
# Split model name and tag
|
||||||
if ':' in model_name:
|
if ":" in model_name:
|
||||||
base_model, requested_tag = model_name.split(':', 1)
|
base_model, requested_tag = model_name.split(":", 1)
|
||||||
else:
|
else:
|
||||||
base_model, requested_tag = model_name, None
|
base_model, requested_tag = model_name, None
|
||||||
|
|
||||||
@@ -62,7 +67,7 @@ def check_ollama_model_exists_remotely(model_name: str) -> tuple[bool, list[str]
|
|||||||
return True, [] # Base model exists but can't get tags
|
return True, [] # Base model exists but can't get tags
|
||||||
|
|
||||||
# Extract tags for this model - be more specific to avoid HTML artifacts
|
# Extract tags for this model - be more specific to avoid HTML artifacts
|
||||||
tag_pattern = rf'{re.escape(base_model)}:[a-zA-Z0-9\.\-_]+'
|
tag_pattern = rf"{re.escape(base_model)}:[a-zA-Z0-9\.\-_]+"
|
||||||
raw_tags = re.findall(tag_pattern, tags_response.text)
|
raw_tags = re.findall(tag_pattern, tags_response.text)
|
||||||
|
|
||||||
# Clean up tags - remove HTML artifacts and duplicates
|
# Clean up tags - remove HTML artifacts and duplicates
|
||||||
@@ -70,7 +75,7 @@ def check_ollama_model_exists_remotely(model_name: str) -> tuple[bool, list[str]
|
|||||||
seen = set()
|
seen = set()
|
||||||
for tag in raw_tags:
|
for tag in raw_tags:
|
||||||
# Skip if it looks like HTML (contains < or >)
|
# Skip if it looks like HTML (contains < or >)
|
||||||
if '<' in tag or '>' in tag:
|
if "<" in tag or ">" in tag:
|
||||||
continue
|
continue
|
||||||
if tag not in seen:
|
if tag not in seen:
|
||||||
seen.add(tag)
|
seen.add(tag)
|
||||||
@@ -91,7 +96,7 @@ def check_ollama_model_exists_remotely(model_name: str) -> tuple[bool, list[str]
|
|||||||
return True, []
|
return True, []
|
||||||
|
|
||||||
|
|
||||||
def search_ollama_models_fuzzy(query: str, available_models: List[str]) -> List[str]:
|
def search_ollama_models_fuzzy(query: str, available_models: list[str]) -> list[str]:
|
||||||
"""Use intelligent fuzzy search for Ollama models"""
|
"""Use intelligent fuzzy search for Ollama models"""
|
||||||
if not available_models:
|
if not available_models:
|
||||||
return []
|
return []
|
||||||
@@ -104,7 +109,9 @@ def search_ollama_models_fuzzy(query: str, available_models: List[str]) -> List[
|
|||||||
suggestions.extend(exact_matches)
|
suggestions.extend(exact_matches)
|
||||||
|
|
||||||
# 2. Starts with query
|
# 2. Starts with query
|
||||||
starts_with = [m for m in available_models if m.lower().startswith(query_lower) and m not in suggestions]
|
starts_with = [
|
||||||
|
m for m in available_models if m.lower().startswith(query_lower) and m not in suggestions
|
||||||
|
]
|
||||||
suggestions.extend(starts_with)
|
suggestions.extend(starts_with)
|
||||||
|
|
||||||
# 3. Contains query
|
# 3. Contains query
|
||||||
@@ -114,24 +121,25 @@ def search_ollama_models_fuzzy(query: str, available_models: List[str]) -> List[
|
|||||||
# 4. Base model name matching (remove version numbers)
|
# 4. Base model name matching (remove version numbers)
|
||||||
def get_base_name(model_name: str) -> str:
|
def get_base_name(model_name: str) -> str:
|
||||||
"""Extract base name without version (e.g., 'llama3:8b' -> 'llama3')"""
|
"""Extract base name without version (e.g., 'llama3:8b' -> 'llama3')"""
|
||||||
return model_name.split(':')[0].split('-')[0]
|
return model_name.split(":")[0].split("-")[0]
|
||||||
|
|
||||||
query_base = get_base_name(query_lower)
|
query_base = get_base_name(query_lower)
|
||||||
base_matches = [
|
base_matches = [
|
||||||
m for m in available_models
|
m
|
||||||
|
for m in available_models
|
||||||
if get_base_name(m.lower()) == query_base and m not in suggestions
|
if get_base_name(m.lower()) == query_base and m not in suggestions
|
||||||
]
|
]
|
||||||
suggestions.extend(base_matches)
|
suggestions.extend(base_matches)
|
||||||
|
|
||||||
# 5. Family/variant matching
|
# 5. Family/variant matching
|
||||||
model_families = {
|
model_families = {
|
||||||
'llama': ['llama2', 'llama3', 'alpaca', 'vicuna', 'codellama'],
|
"llama": ["llama2", "llama3", "alpaca", "vicuna", "codellama"],
|
||||||
'qwen': ['qwen', 'qwen2', 'qwen3'],
|
"qwen": ["qwen", "qwen2", "qwen3"],
|
||||||
'gemma': ['gemma', 'gemma2'],
|
"gemma": ["gemma", "gemma2"],
|
||||||
'phi': ['phi', 'phi2', 'phi3'],
|
"phi": ["phi", "phi2", "phi3"],
|
||||||
'mistral': ['mistral', 'mixtral', 'openhermes'],
|
"mistral": ["mistral", "mixtral", "openhermes"],
|
||||||
'dolphin': ['dolphin', 'openchat'],
|
"dolphin": ["dolphin", "openchat"],
|
||||||
'deepseek': ['deepseek', 'deepseek-coder']
|
"deepseek": ["deepseek", "deepseek-coder"],
|
||||||
}
|
}
|
||||||
|
|
||||||
query_family = None
|
query_family = None
|
||||||
@@ -143,7 +151,8 @@ def search_ollama_models_fuzzy(query: str, available_models: List[str]) -> List[
|
|||||||
if query_family:
|
if query_family:
|
||||||
family_variants = model_families[query_family]
|
family_variants = model_families[query_family]
|
||||||
family_matches = [
|
family_matches = [
|
||||||
m for m in available_models
|
m
|
||||||
|
for m in available_models
|
||||||
if any(variant in m.lower() for variant in family_variants) and m not in suggestions
|
if any(variant in m.lower() for variant in family_variants) and m not in suggestions
|
||||||
]
|
]
|
||||||
suggestions.extend(family_matches)
|
suggestions.extend(family_matches)
|
||||||
@@ -162,15 +171,13 @@ def search_ollama_models_fuzzy(query: str, available_models: List[str]) -> List[
|
|||||||
# Remove this too - no need for fallback
|
# Remove this too - no need for fallback
|
||||||
|
|
||||||
|
|
||||||
def suggest_similar_models(invalid_model: str, available_models: List[str]) -> List[str]:
|
def suggest_similar_models(invalid_model: str, available_models: list[str]) -> list[str]:
|
||||||
"""Use difflib to find similar model names"""
|
"""Use difflib to find similar model names"""
|
||||||
if not available_models:
|
if not available_models:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
# Get close matches using fuzzy matching
|
# Get close matches using fuzzy matching
|
||||||
suggestions = difflib.get_close_matches(
|
suggestions = difflib.get_close_matches(invalid_model, available_models, n=3, cutoff=0.3)
|
||||||
invalid_model, available_models, n=3, cutoff=0.3
|
|
||||||
)
|
|
||||||
return suggestions
|
return suggestions
|
||||||
|
|
||||||
|
|
||||||
@@ -178,13 +185,14 @@ def check_hf_model_exists(model_name: str) -> bool:
|
|||||||
"""Quick check if HuggingFace model exists without downloading"""
|
"""Quick check if HuggingFace model exists without downloading"""
|
||||||
try:
|
try:
|
||||||
from huggingface_hub import model_info
|
from huggingface_hub import model_info
|
||||||
|
|
||||||
model_info(model_name)
|
model_info(model_name)
|
||||||
return True
|
return True
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def get_popular_hf_models() -> List[str]:
|
def get_popular_hf_models() -> list[str]:
|
||||||
"""Return a list of popular HuggingFace models for suggestions"""
|
"""Return a list of popular HuggingFace models for suggestions"""
|
||||||
try:
|
try:
|
||||||
from huggingface_hub import list_models
|
from huggingface_hub import list_models
|
||||||
@@ -194,15 +202,15 @@ def get_popular_hf_models() -> List[str]:
|
|||||||
filter="text-generation",
|
filter="text-generation",
|
||||||
sort="downloads",
|
sort="downloads",
|
||||||
direction=-1,
|
direction=-1,
|
||||||
limit=20 # Get top 20 most downloaded
|
limit=20, # Get top 20 most downloaded
|
||||||
)
|
)
|
||||||
|
|
||||||
# Extract model names and filter for chat/conversation models
|
# Extract model names and filter for chat/conversation models
|
||||||
model_names = []
|
model_names = []
|
||||||
chat_keywords = ['chat', 'instruct', 'dialog', 'conversation', 'assistant']
|
chat_keywords = ["chat", "instruct", "dialog", "conversation", "assistant"]
|
||||||
|
|
||||||
for model in models:
|
for model in models:
|
||||||
model_name = model.id if hasattr(model, 'id') else str(model)
|
model_name = model.id if hasattr(model, "id") else str(model)
|
||||||
# Prioritize models with chat-related keywords
|
# Prioritize models with chat-related keywords
|
||||||
if any(keyword in model_name.lower() for keyword in chat_keywords):
|
if any(keyword in model_name.lower() for keyword in chat_keywords):
|
||||||
model_names.append(model_name)
|
model_names.append(model_name)
|
||||||
@@ -216,7 +224,7 @@ def get_popular_hf_models() -> List[str]:
|
|||||||
return _get_fallback_hf_models()
|
return _get_fallback_hf_models()
|
||||||
|
|
||||||
|
|
||||||
def _get_fallback_hf_models() -> List[str]:
|
def _get_fallback_hf_models() -> list[str]:
|
||||||
"""Fallback list of popular HuggingFace models"""
|
"""Fallback list of popular HuggingFace models"""
|
||||||
return [
|
return [
|
||||||
"microsoft/DialoGPT-medium",
|
"microsoft/DialoGPT-medium",
|
||||||
@@ -228,11 +236,11 @@ def _get_fallback_hf_models() -> List[str]:
|
|||||||
"facebook/blenderbot_small-90M",
|
"facebook/blenderbot_small-90M",
|
||||||
"microsoft/phi-1_5",
|
"microsoft/phi-1_5",
|
||||||
"facebook/opt-350m",
|
"facebook/opt-350m",
|
||||||
"EleutherAI/gpt-neo-1.3B"
|
"EleutherAI/gpt-neo-1.3B",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def search_hf_models_fuzzy(query: str, limit: int = 10) -> List[str]:
|
def search_hf_models_fuzzy(query: str, limit: int = 10) -> list[str]:
|
||||||
"""Use HuggingFace Hub's native fuzzy search for model suggestions"""
|
"""Use HuggingFace Hub's native fuzzy search for model suggestions"""
|
||||||
try:
|
try:
|
||||||
from huggingface_hub import list_models
|
from huggingface_hub import list_models
|
||||||
@@ -243,10 +251,10 @@ def search_hf_models_fuzzy(query: str, limit: int = 10) -> List[str]:
|
|||||||
filter="text-generation",
|
filter="text-generation",
|
||||||
sort="downloads",
|
sort="downloads",
|
||||||
direction=-1,
|
direction=-1,
|
||||||
limit=limit
|
limit=limit,
|
||||||
)
|
)
|
||||||
|
|
||||||
model_names = [model.id if hasattr(model, 'id') else str(model) for model in models]
|
model_names = [model.id if hasattr(model, "id") else str(model) for model in models]
|
||||||
|
|
||||||
# If direct search doesn't return enough results, try some variations
|
# If direct search doesn't return enough results, try some variations
|
||||||
if len(model_names) < 3:
|
if len(model_names) < 3:
|
||||||
@@ -254,17 +262,17 @@ def search_hf_models_fuzzy(query: str, limit: int = 10) -> List[str]:
|
|||||||
variations = []
|
variations = []
|
||||||
|
|
||||||
# Extract base name (e.g., "gpt3" from "gpt-3.5")
|
# Extract base name (e.g., "gpt3" from "gpt-3.5")
|
||||||
base_query = query.lower().replace('-', '').replace('.', '').replace('_', '')
|
base_query = query.lower().replace("-", "").replace(".", "").replace("_", "")
|
||||||
if base_query != query.lower():
|
if base_query != query.lower():
|
||||||
variations.append(base_query)
|
variations.append(base_query)
|
||||||
|
|
||||||
# Try common model name patterns
|
# Try common model name patterns
|
||||||
if 'gpt' in query.lower():
|
if "gpt" in query.lower():
|
||||||
variations.extend(['gpt2', 'gpt-neo', 'gpt-j', 'dialoGPT'])
|
variations.extend(["gpt2", "gpt-neo", "gpt-j", "dialoGPT"])
|
||||||
elif 'llama' in query.lower():
|
elif "llama" in query.lower():
|
||||||
variations.extend(['llama2', 'alpaca', 'vicuna'])
|
variations.extend(["llama2", "alpaca", "vicuna"])
|
||||||
elif 'bert' in query.lower():
|
elif "bert" in query.lower():
|
||||||
variations.extend(['roberta', 'distilbert', 'albert'])
|
variations.extend(["roberta", "distilbert", "albert"])
|
||||||
|
|
||||||
# Search with variations
|
# Search with variations
|
||||||
for var in variations[:2]: # Limit to 2 variations to avoid too many API calls
|
for var in variations[:2]: # Limit to 2 variations to avoid too many API calls
|
||||||
@@ -274,11 +282,13 @@ def search_hf_models_fuzzy(query: str, limit: int = 10) -> List[str]:
|
|||||||
filter="text-generation",
|
filter="text-generation",
|
||||||
sort="downloads",
|
sort="downloads",
|
||||||
direction=-1,
|
direction=-1,
|
||||||
limit=3
|
limit=3,
|
||||||
)
|
)
|
||||||
var_names = [model.id if hasattr(model, 'id') else str(model) for model in var_models]
|
var_names = [
|
||||||
|
model.id if hasattr(model, "id") else str(model) for model in var_models
|
||||||
|
]
|
||||||
model_names.extend(var_names)
|
model_names.extend(var_names)
|
||||||
except:
|
except Exception:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Remove duplicates while preserving order
|
# Remove duplicates while preserving order
|
||||||
@@ -296,15 +306,18 @@ def search_hf_models_fuzzy(query: str, limit: int = 10) -> List[str]:
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
def search_hf_models(query: str, limit: int = 10) -> List[str]:
|
def search_hf_models(query: str, limit: int = 10) -> list[str]:
|
||||||
"""Simple search for HuggingFace models based on query (kept for backward compatibility)"""
|
"""Simple search for HuggingFace models based on query (kept for backward compatibility)"""
|
||||||
return search_hf_models_fuzzy(query, limit)
|
return search_hf_models_fuzzy(query, limit)
|
||||||
|
|
||||||
|
|
||||||
def validate_model_and_suggest(model_name: str, llm_type: str) -> Optional[str]:
|
def validate_model_and_suggest(
|
||||||
|
model_name: str, llm_type: str, host: Optional[str] = None
|
||||||
|
) -> Optional[str]:
|
||||||
"""Validate model name and provide suggestions if invalid"""
|
"""Validate model name and provide suggestions if invalid"""
|
||||||
if llm_type == "ollama":
|
if llm_type == "ollama":
|
||||||
available_models = check_ollama_models()
|
resolved_host = resolve_ollama_host(host)
|
||||||
|
available_models = check_ollama_models(resolved_host)
|
||||||
if available_models and model_name not in available_models:
|
if available_models and model_name not in available_models:
|
||||||
error_msg = f"Model '{model_name}' not found in your local Ollama installation."
|
error_msg = f"Model '{model_name}' not found in your local Ollama installation."
|
||||||
|
|
||||||
@@ -313,7 +326,7 @@ def validate_model_and_suggest(model_name: str, llm_type: str) -> Optional[str]:
|
|||||||
|
|
||||||
if model_exists_remotely and model_name in available_tags:
|
if model_exists_remotely and model_name in available_tags:
|
||||||
# Exact model exists remotely - suggest pulling it
|
# Exact model exists remotely - suggest pulling it
|
||||||
error_msg += f"\n\nTo install the requested model:\n"
|
error_msg += "\n\nTo install the requested model:\n"
|
||||||
error_msg += f" ollama pull {model_name}\n"
|
error_msg += f" ollama pull {model_name}\n"
|
||||||
|
|
||||||
# Show local alternatives
|
# Show local alternatives
|
||||||
@@ -325,10 +338,12 @@ def validate_model_and_suggest(model_name: str, llm_type: str) -> Optional[str]:
|
|||||||
|
|
||||||
elif model_exists_remotely and available_tags:
|
elif model_exists_remotely and available_tags:
|
||||||
# Base model exists but requested tag doesn't - suggest correct tags
|
# Base model exists but requested tag doesn't - suggest correct tags
|
||||||
base_model = model_name.split(':')[0]
|
base_model = model_name.split(":")[0]
|
||||||
requested_tag = model_name.split(':', 1)[1] if ':' in model_name else None
|
requested_tag = model_name.split(":", 1)[1] if ":" in model_name else None
|
||||||
|
|
||||||
error_msg += f"\n\nModel '{base_model}' exists, but tag '{requested_tag}' is not available."
|
error_msg += (
|
||||||
|
f"\n\nModel '{base_model}' exists, but tag '{requested_tag}' is not available."
|
||||||
|
)
|
||||||
error_msg += f"\n\nAvailable {base_model} models you can install:\n"
|
error_msg += f"\n\nAvailable {base_model} models you can install:\n"
|
||||||
for i, tag in enumerate(available_tags[:8], 1):
|
for i, tag in enumerate(available_tags[:8], 1):
|
||||||
error_msg += f" {i}. ollama pull {tag}\n"
|
error_msg += f" {i}. ollama pull {tag}\n"
|
||||||
@@ -348,7 +363,11 @@ def validate_model_and_suggest(model_name: str, llm_type: str) -> Optional[str]:
|
|||||||
error_msg += f"\n\nModel '{model_name}' was not found in Ollama's library."
|
error_msg += f"\n\nModel '{model_name}' was not found in Ollama's library."
|
||||||
|
|
||||||
if suggestions:
|
if suggestions:
|
||||||
error_msg += "\n\nDid you mean one of these installed models?\n"
|
error_msg += (
|
||||||
|
"\n\nDid you mean one of these installed models?\n"
|
||||||
|
+ "\nTry to use ollama pull to install the model you need\n"
|
||||||
|
)
|
||||||
|
|
||||||
for i, suggestion in enumerate(suggestions, 1):
|
for i, suggestion in enumerate(suggestions, 1):
|
||||||
error_msg += f" {i}. {suggestion}\n"
|
error_msg += f" {i}. {suggestion}\n"
|
||||||
else:
|
else:
|
||||||
@@ -364,7 +383,9 @@ def validate_model_and_suggest(model_name: str, llm_type: str) -> Optional[str]:
|
|||||||
if model_name in available_tags:
|
if model_name in available_tags:
|
||||||
error_msg += f"\n ollama pull {model_name} # Install requested model"
|
error_msg += f"\n ollama pull {model_name} # Install requested model"
|
||||||
else:
|
else:
|
||||||
error_msg += f"\n ollama pull {available_tags[0]} # Install recommended variant"
|
error_msg += (
|
||||||
|
f"\n ollama pull {available_tags[0]} # Install recommended variant"
|
||||||
|
)
|
||||||
error_msg += "\n https://ollama.com/library # Browse available models"
|
error_msg += "\n https://ollama.com/library # Browse available models"
|
||||||
return error_msg
|
return error_msg
|
||||||
|
|
||||||
@@ -404,7 +425,6 @@ class LLMInterface(ABC):
|
|||||||
top_k=10,
|
top_k=10,
|
||||||
complexity=64,
|
complexity=64,
|
||||||
beam_width=8,
|
beam_width=8,
|
||||||
USE_DEFERRED_FETCH=True,
|
|
||||||
skip_search_reorder=True,
|
skip_search_reorder=True,
|
||||||
recompute_beighbor_embeddings=True,
|
recompute_beighbor_embeddings=True,
|
||||||
dedup_node_dis=True,
|
dedup_node_dis=True,
|
||||||
@@ -416,7 +436,6 @@ class LLMInterface(ABC):
|
|||||||
Supported kwargs:
|
Supported kwargs:
|
||||||
- complexity (int): Search complexity parameter (default: 32)
|
- complexity (int): Search complexity parameter (default: 32)
|
||||||
- beam_width (int): Beam width for search (default: 4)
|
- beam_width (int): Beam width for search (default: 4)
|
||||||
- USE_DEFERRED_FETCH (bool): Enable deferred fetch mode (default: False)
|
|
||||||
- skip_search_reorder (bool): Skip search reorder step (default: False)
|
- skip_search_reorder (bool): Skip search reorder step (default: False)
|
||||||
- recompute_beighbor_embeddings (bool): Enable ZMQ embedding server for neighbor recomputation (default: False)
|
- recompute_beighbor_embeddings (bool): Enable ZMQ embedding server for neighbor recomputation (default: False)
|
||||||
- dedup_node_dis (bool): Deduplicate nodes by distance (default: False)
|
- dedup_node_dis (bool): Deduplicate nodes by distance (default: False)
|
||||||
@@ -441,19 +460,19 @@ class LLMInterface(ABC):
|
|||||||
class OllamaChat(LLMInterface):
|
class OllamaChat(LLMInterface):
|
||||||
"""LLM interface for Ollama models."""
|
"""LLM interface for Ollama models."""
|
||||||
|
|
||||||
def __init__(self, model: str = "llama3:8b", host: str = "http://localhost:11434"):
|
def __init__(self, model: str = "llama3:8b", host: Optional[str] = None):
|
||||||
self.model = model
|
self.model = model
|
||||||
self.host = host
|
self.host = resolve_ollama_host(host)
|
||||||
logger.info(f"Initializing OllamaChat with model='{model}' and host='{host}'")
|
logger.info(f"Initializing OllamaChat with model='{model}' and host='{self.host}'")
|
||||||
try:
|
try:
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
# Check if the Ollama server is responsive
|
# Check if the Ollama server is responsive
|
||||||
if host:
|
if self.host:
|
||||||
requests.get(host)
|
requests.get(self.host)
|
||||||
|
|
||||||
# Pre-check model availability with helpful suggestions
|
# Pre-check model availability with helpful suggestions
|
||||||
model_error = validate_model_and_suggest(model, "ollama")
|
model_error = validate_model_and_suggest(model, "ollama", self.host)
|
||||||
if model_error:
|
if model_error:
|
||||||
raise ValueError(model_error)
|
raise ValueError(model_error)
|
||||||
|
|
||||||
@@ -463,26 +482,51 @@ class OllamaChat(LLMInterface):
|
|||||||
)
|
)
|
||||||
except requests.exceptions.ConnectionError:
|
except requests.exceptions.ConnectionError:
|
||||||
logger.error(
|
logger.error(
|
||||||
f"Could not connect to Ollama at {host}. Please ensure Ollama is running."
|
f"Could not connect to Ollama at {self.host}. Please ensure Ollama is running."
|
||||||
)
|
)
|
||||||
raise ConnectionError(
|
raise ConnectionError(
|
||||||
f"Could not connect to Ollama at {host}. Please ensure Ollama is running."
|
f"Could not connect to Ollama at {self.host}. Please ensure Ollama is running."
|
||||||
)
|
)
|
||||||
|
|
||||||
def ask(self, prompt: str, **kwargs) -> str:
|
def ask(self, prompt: str, **kwargs) -> str:
|
||||||
import requests
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
full_url = f"{self.host}/api/generate"
|
full_url = f"{self.host}/api/generate"
|
||||||
|
|
||||||
|
# Handle thinking budget for reasoning models
|
||||||
|
options = kwargs.copy()
|
||||||
|
thinking_budget = kwargs.get("thinking_budget")
|
||||||
|
if thinking_budget:
|
||||||
|
# Remove thinking_budget from options as it's not a standard Ollama option
|
||||||
|
options.pop("thinking_budget", None)
|
||||||
|
# Only apply reasoning parameters to models that support it
|
||||||
|
reasoning_supported_models = [
|
||||||
|
"gpt-oss:20b",
|
||||||
|
"gpt-oss:120b",
|
||||||
|
"deepseek-r1",
|
||||||
|
"deepseek-coder",
|
||||||
|
]
|
||||||
|
|
||||||
|
if thinking_budget in ["low", "medium", "high"]:
|
||||||
|
if any(model in self.model.lower() for model in reasoning_supported_models):
|
||||||
|
options["reasoning"] = {"effort": thinking_budget, "exclude": False}
|
||||||
|
logger.info(f"Applied reasoning effort={thinking_budget} to model {self.model}")
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
f"Thinking budget '{thinking_budget}' requested but model '{self.model}' may not support reasoning parameters. Proceeding without reasoning."
|
||||||
|
)
|
||||||
|
|
||||||
payload = {
|
payload = {
|
||||||
"model": self.model,
|
"model": self.model,
|
||||||
"prompt": prompt,
|
"prompt": prompt,
|
||||||
"stream": False, # Keep it simple for now
|
"stream": False, # Keep it simple for now
|
||||||
"options": kwargs,
|
"options": options,
|
||||||
}
|
}
|
||||||
logger.debug(f"Sending request to Ollama: {payload}")
|
logger.debug(f"Sending request to Ollama: {payload}")
|
||||||
try:
|
try:
|
||||||
logger.info(f"Sending request to Ollama and waiting for response...")
|
logger.info("Sending request to Ollama and waiting for response...")
|
||||||
response = requests.post(full_url, data=json.dumps(payload))
|
response = requests.post(full_url, data=json.dumps(payload))
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
@@ -513,8 +557,8 @@ class HFChat(LLMInterface):
|
|||||||
raise ValueError(model_error)
|
raise ValueError(model_error)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
||||||
import torch
|
import torch
|
||||||
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise ImportError(
|
raise ImportError(
|
||||||
"The 'transformers' and 'torch' libraries are required for Hugging Face models. Please install them with 'pip install transformers torch'."
|
"The 'transformers' and 'torch' libraries are required for Hugging Face models. Please install them with 'pip install transformers torch'."
|
||||||
@@ -531,14 +575,41 @@ class HFChat(LLMInterface):
|
|||||||
self.device = "cpu"
|
self.device = "cpu"
|
||||||
logger.info("No GPU detected. Using CPU.")
|
logger.info("No GPU detected. Using CPU.")
|
||||||
|
|
||||||
# Load tokenizer and model
|
# Load tokenizer and model with timeout protection
|
||||||
|
try:
|
||||||
|
import signal
|
||||||
|
|
||||||
|
def timeout_handler(signum, frame):
|
||||||
|
raise TimeoutError("Model download/loading timed out")
|
||||||
|
|
||||||
|
# Set timeout for model loading (60 seconds)
|
||||||
|
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
|
||||||
|
signal.alarm(60)
|
||||||
|
|
||||||
|
try:
|
||||||
|
logger.info(f"Loading tokenizer for {model_name}...")
|
||||||
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||||
|
|
||||||
|
logger.info(f"Loading model {model_name}...")
|
||||||
self.model = AutoModelForCausalLM.from_pretrained(
|
self.model = AutoModelForCausalLM.from_pretrained(
|
||||||
model_name,
|
model_name,
|
||||||
torch_dtype=torch.float16 if self.device != "cpu" else torch.float32,
|
torch_dtype=torch.float16 if self.device != "cpu" else torch.float32,
|
||||||
device_map="auto" if self.device != "cpu" else None,
|
device_map="auto" if self.device != "cpu" else None,
|
||||||
trust_remote_code=True
|
trust_remote_code=True,
|
||||||
)
|
)
|
||||||
|
logger.info(f"Successfully loaded {model_name}")
|
||||||
|
finally:
|
||||||
|
signal.alarm(0) # Cancel the alarm
|
||||||
|
signal.signal(signal.SIGALRM, old_handler) # Restore old handler
|
||||||
|
|
||||||
|
except TimeoutError:
|
||||||
|
logger.error(f"Model loading timed out for {model_name}")
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Model loading timed out for {model_name}. Please check your internet connection or try a smaller model."
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to load model {model_name}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
# Move model to device if not using device_map
|
# Move model to device if not using device_map
|
||||||
if self.device != "cpu" and "device_map" not in str(self.model):
|
if self.device != "cpu" and "device_map" not in str(self.model):
|
||||||
@@ -549,7 +620,7 @@ class HFChat(LLMInterface):
|
|||||||
self.tokenizer.pad_token = self.tokenizer.eos_token
|
self.tokenizer.pad_token = self.tokenizer.eos_token
|
||||||
|
|
||||||
def ask(self, prompt: str, **kwargs) -> str:
|
def ask(self, prompt: str, **kwargs) -> str:
|
||||||
print('kwargs in HF: ', kwargs)
|
print("kwargs in HF: ", kwargs)
|
||||||
# Check if this is a Qwen model and add /no_think by default
|
# Check if this is a Qwen model and add /no_think by default
|
||||||
is_qwen_model = "qwen" in self.model.config._name_or_path.lower()
|
is_qwen_model = "qwen" in self.model.config._name_or_path.lower()
|
||||||
|
|
||||||
@@ -564,9 +635,7 @@ class HFChat(LLMInterface):
|
|||||||
if hasattr(self.tokenizer, "apply_chat_template"):
|
if hasattr(self.tokenizer, "apply_chat_template"):
|
||||||
try:
|
try:
|
||||||
formatted_prompt = self.tokenizer.apply_chat_template(
|
formatted_prompt = self.tokenizer.apply_chat_template(
|
||||||
messages,
|
messages, tokenize=False, add_generation_prompt=True
|
||||||
tokenize=False,
|
|
||||||
add_generation_prompt=True
|
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"Chat template failed, using raw prompt: {e}")
|
logger.warning(f"Chat template failed, using raw prompt: {e}")
|
||||||
@@ -581,7 +650,7 @@ class HFChat(LLMInterface):
|
|||||||
return_tensors="pt",
|
return_tensors="pt",
|
||||||
padding=True,
|
padding=True,
|
||||||
truncation=True,
|
truncation=True,
|
||||||
max_length=2048
|
max_length=2048,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Move inputs to device
|
# Move inputs to device
|
||||||
@@ -607,36 +676,97 @@ class HFChat(LLMInterface):
|
|||||||
|
|
||||||
# Generate
|
# Generate
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
outputs = self.model.generate(
|
outputs = self.model.generate(**inputs, **generation_config)
|
||||||
**inputs,
|
|
||||||
**generation_config
|
|
||||||
)
|
|
||||||
|
|
||||||
# Decode response
|
# Decode response
|
||||||
generated_tokens = outputs[0][inputs["input_ids"].shape[1]:]
|
generated_tokens = outputs[0][inputs["input_ids"].shape[1] :]
|
||||||
response = self.tokenizer.decode(generated_tokens, skip_special_tokens=True)
|
response = self.tokenizer.decode(generated_tokens, skip_special_tokens=True)
|
||||||
|
|
||||||
return response.strip()
|
return response.strip()
|
||||||
|
|
||||||
|
|
||||||
|
class GeminiChat(LLMInterface):
|
||||||
|
"""LLM interface for Google Gemini models."""
|
||||||
|
|
||||||
|
def __init__(self, model: str = "gemini-2.5-flash", api_key: Optional[str] = None):
|
||||||
|
self.model = model
|
||||||
|
self.api_key = api_key or os.getenv("GEMINI_API_KEY")
|
||||||
|
|
||||||
|
if not self.api_key:
|
||||||
|
raise ValueError(
|
||||||
|
"Gemini API key is required. Set GEMINI_API_KEY environment variable or pass api_key parameter."
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(f"Initializing Gemini Chat with model='{model}'")
|
||||||
|
|
||||||
|
try:
|
||||||
|
import google.genai as genai
|
||||||
|
|
||||||
|
self.client = genai.Client(api_key=self.api_key)
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"The 'google-genai' library is required for Gemini models. Please install it with 'uv pip install google-genai'."
|
||||||
|
)
|
||||||
|
|
||||||
|
def ask(self, prompt: str, **kwargs) -> str:
|
||||||
|
logger.info(f"Sending request to Gemini with model {self.model}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
from google.genai.types import GenerateContentConfig
|
||||||
|
|
||||||
|
generation_config = GenerateContentConfig(
|
||||||
|
temperature=kwargs.get("temperature", 0.7),
|
||||||
|
max_output_tokens=kwargs.get("max_tokens", 1000),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle top_p parameter
|
||||||
|
if "top_p" in kwargs:
|
||||||
|
generation_config.top_p = kwargs["top_p"]
|
||||||
|
|
||||||
|
response = self.client.models.generate_content(
|
||||||
|
model=self.model,
|
||||||
|
contents=prompt,
|
||||||
|
config=generation_config,
|
||||||
|
)
|
||||||
|
# Handle potential None response text
|
||||||
|
response_text = response.text
|
||||||
|
if response_text is None:
|
||||||
|
logger.warning("Gemini returned None response text")
|
||||||
|
return ""
|
||||||
|
return response_text.strip()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error communicating with Gemini: {e}")
|
||||||
|
return f"Error: Could not get a response from Gemini. Details: {e}"
|
||||||
|
|
||||||
|
|
||||||
class OpenAIChat(LLMInterface):
|
class OpenAIChat(LLMInterface):
|
||||||
"""LLM interface for OpenAI models."""
|
"""LLM interface for OpenAI models."""
|
||||||
|
|
||||||
def __init__(self, model: str = "gpt-4o", api_key: Optional[str] = None):
|
def __init__(
|
||||||
|
self,
|
||||||
|
model: str = "gpt-4o",
|
||||||
|
api_key: Optional[str] = None,
|
||||||
|
base_url: Optional[str] = None,
|
||||||
|
):
|
||||||
self.model = model
|
self.model = model
|
||||||
self.api_key = api_key or os.getenv("OPENAI_API_KEY")
|
self.base_url = resolve_openai_base_url(base_url)
|
||||||
|
self.api_key = resolve_openai_api_key(api_key)
|
||||||
|
|
||||||
if not self.api_key:
|
if not self.api_key:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass api_key parameter."
|
"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass api_key parameter."
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info(f"Initializing OpenAI Chat with model='{model}'")
|
logger.info(
|
||||||
|
"Initializing OpenAI Chat with model='%s' and base_url='%s'",
|
||||||
|
model,
|
||||||
|
self.base_url,
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import openai
|
import openai
|
||||||
|
|
||||||
self.client = openai.OpenAI(api_key=self.api_key)
|
self.client = openai.OpenAI(api_key=self.api_key, base_url=self.base_url)
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise ImportError(
|
raise ImportError(
|
||||||
"The 'openai' library is required for OpenAI models. Please install it with 'pip install openai'."
|
"The 'openai' library is required for OpenAI models. Please install it with 'pip install openai'."
|
||||||
@@ -647,15 +777,38 @@ class OpenAIChat(LLMInterface):
|
|||||||
params = {
|
params = {
|
||||||
"model": self.model,
|
"model": self.model,
|
||||||
"messages": [{"role": "user", "content": prompt}],
|
"messages": [{"role": "user", "content": prompt}],
|
||||||
"max_tokens": kwargs.get("max_tokens", 1000),
|
|
||||||
"temperature": kwargs.get("temperature", 0.7),
|
"temperature": kwargs.get("temperature", 0.7),
|
||||||
**{
|
|
||||||
k: v
|
|
||||||
for k, v in kwargs.items()
|
|
||||||
if k not in ["max_tokens", "temperature"]
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Handle max_tokens vs max_completion_tokens based on model
|
||||||
|
max_tokens = kwargs.get("max_tokens", 1000)
|
||||||
|
if "o3" in self.model or "o4" in self.model or "o1" in self.model:
|
||||||
|
# o-series models use max_completion_tokens
|
||||||
|
params["max_completion_tokens"] = max_tokens
|
||||||
|
params["temperature"] = 1.0
|
||||||
|
else:
|
||||||
|
# Other models use max_tokens
|
||||||
|
params["max_tokens"] = max_tokens
|
||||||
|
|
||||||
|
# Handle thinking budget for reasoning models
|
||||||
|
thinking_budget = kwargs.get("thinking_budget")
|
||||||
|
if thinking_budget and thinking_budget in ["low", "medium", "high"]:
|
||||||
|
# Check if this is an o-series model (partial match for model names)
|
||||||
|
o_series_models = ["o3", "o3-mini", "o4-mini", "o1", "o3-pro", "o3-deep-research"]
|
||||||
|
if any(model in self.model for model in o_series_models):
|
||||||
|
# Use the correct OpenAI reasoning parameter format
|
||||||
|
params["reasoning_effort"] = thinking_budget
|
||||||
|
logger.info(f"Applied reasoning_effort={thinking_budget} to model {self.model}")
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
f"Thinking budget '{thinking_budget}' requested but model '{self.model}' may not support reasoning parameters. Proceeding without reasoning."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add other kwargs (excluding thinking_budget as it's handled above)
|
||||||
|
for k, v in kwargs.items():
|
||||||
|
if k not in ["max_tokens", "temperature", "thinking_budget"]:
|
||||||
|
params[k] = v
|
||||||
|
|
||||||
logger.info(f"Sending request to OpenAI with model {self.model}")
|
logger.info(f"Sending request to OpenAI with model {self.model}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -675,7 +828,7 @@ class SimulatedChat(LLMInterface):
|
|||||||
return "This is a simulated answer from the LLM based on the retrieved context."
|
return "This is a simulated answer from the LLM based on the retrieved context."
|
||||||
|
|
||||||
|
|
||||||
def get_llm(llm_config: Optional[Dict[str, Any]] = None) -> LLMInterface:
|
def get_llm(llm_config: Optional[dict[str, Any]] = None) -> LLMInterface:
|
||||||
"""
|
"""
|
||||||
Factory function to get an LLM interface based on configuration.
|
Factory function to get an LLM interface based on configuration.
|
||||||
|
|
||||||
@@ -703,12 +856,18 @@ def get_llm(llm_config: Optional[Dict[str, Any]] = None) -> LLMInterface:
|
|||||||
if llm_type == "ollama":
|
if llm_type == "ollama":
|
||||||
return OllamaChat(
|
return OllamaChat(
|
||||||
model=model or "llama3:8b",
|
model=model or "llama3:8b",
|
||||||
host=llm_config.get("host", "http://localhost:11434"),
|
host=llm_config.get("host"),
|
||||||
)
|
)
|
||||||
elif llm_type == "hf":
|
elif llm_type == "hf":
|
||||||
return HFChat(model_name=model or "deepseek-ai/deepseek-llm-7b-chat")
|
return HFChat(model_name=model or "deepseek-ai/deepseek-llm-7b-chat")
|
||||||
elif llm_type == "openai":
|
elif llm_type == "openai":
|
||||||
return OpenAIChat(model=model or "gpt-4o", api_key=llm_config.get("api_key"))
|
return OpenAIChat(
|
||||||
|
model=model or "gpt-4o",
|
||||||
|
api_key=llm_config.get("api_key"),
|
||||||
|
base_url=llm_config.get("base_url"),
|
||||||
|
)
|
||||||
|
elif llm_type == "gemini":
|
||||||
|
return GeminiChat(model=model or "gemini-2.5-flash", api_key=llm_config.get("api_key"))
|
||||||
elif llm_type == "simulated":
|
elif llm_type == "simulated":
|
||||||
return SimulatedChat()
|
return SimulatedChat()
|
||||||
else:
|
else:
|
||||||
|
|||||||
220
packages/leann-core/src/leann/chunking_utils.py
Normal file
220
packages/leann-core/src/leann/chunking_utils.py
Normal file
@@ -0,0 +1,220 @@
|
|||||||
|
"""
|
||||||
|
Enhanced chunking utilities with AST-aware code chunking support.
|
||||||
|
Packaged within leann-core so installed wheels can import it reliably.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from llama_index.core.node_parser import SentenceSplitter
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Code file extensions supported by astchunk
|
||||||
|
CODE_EXTENSIONS = {
|
||||||
|
".py": "python",
|
||||||
|
".java": "java",
|
||||||
|
".cs": "csharp",
|
||||||
|
".ts": "typescript",
|
||||||
|
".tsx": "typescript",
|
||||||
|
".js": "typescript",
|
||||||
|
".jsx": "typescript",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def detect_code_files(documents, code_extensions=None) -> tuple[list, list]:
|
||||||
|
"""Separate documents into code files and regular text files."""
|
||||||
|
if code_extensions is None:
|
||||||
|
code_extensions = CODE_EXTENSIONS
|
||||||
|
|
||||||
|
code_docs = []
|
||||||
|
text_docs = []
|
||||||
|
|
||||||
|
for doc in documents:
|
||||||
|
file_path = doc.metadata.get("file_path", "") or doc.metadata.get("file_name", "")
|
||||||
|
if file_path:
|
||||||
|
file_ext = Path(file_path).suffix.lower()
|
||||||
|
if file_ext in code_extensions:
|
||||||
|
doc.metadata["language"] = code_extensions[file_ext]
|
||||||
|
doc.metadata["is_code"] = True
|
||||||
|
code_docs.append(doc)
|
||||||
|
else:
|
||||||
|
doc.metadata["is_code"] = False
|
||||||
|
text_docs.append(doc)
|
||||||
|
else:
|
||||||
|
doc.metadata["is_code"] = False
|
||||||
|
text_docs.append(doc)
|
||||||
|
|
||||||
|
logger.info(f"Detected {len(code_docs)} code files and {len(text_docs)} text files")
|
||||||
|
return code_docs, text_docs
|
||||||
|
|
||||||
|
|
||||||
|
def get_language_from_extension(file_path: str) -> Optional[str]:
|
||||||
|
"""Return language string from a filename/extension using CODE_EXTENSIONS."""
|
||||||
|
ext = Path(file_path).suffix.lower()
|
||||||
|
return CODE_EXTENSIONS.get(ext)
|
||||||
|
|
||||||
|
|
||||||
|
def create_ast_chunks(
|
||||||
|
documents,
|
||||||
|
max_chunk_size: int = 512,
|
||||||
|
chunk_overlap: int = 64,
|
||||||
|
metadata_template: str = "default",
|
||||||
|
) -> list[str]:
|
||||||
|
"""Create AST-aware chunks from code documents using astchunk.
|
||||||
|
|
||||||
|
Falls back to traditional chunking if astchunk is unavailable.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from astchunk import ASTChunkBuilder # optional dependency
|
||||||
|
except ImportError as e:
|
||||||
|
logger.error(f"astchunk not available: {e}")
|
||||||
|
logger.info("Falling back to traditional chunking for code files")
|
||||||
|
return create_traditional_chunks(documents, max_chunk_size, chunk_overlap)
|
||||||
|
|
||||||
|
all_chunks = []
|
||||||
|
for doc in documents:
|
||||||
|
language = doc.metadata.get("language")
|
||||||
|
if not language:
|
||||||
|
logger.warning("No language detected; falling back to traditional chunking")
|
||||||
|
all_chunks.extend(create_traditional_chunks([doc], max_chunk_size, chunk_overlap))
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
configs = {
|
||||||
|
"max_chunk_size": max_chunk_size,
|
||||||
|
"language": language,
|
||||||
|
"metadata_template": metadata_template,
|
||||||
|
"chunk_overlap": chunk_overlap if chunk_overlap > 0 else 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
repo_metadata = {
|
||||||
|
"file_path": doc.metadata.get("file_path", ""),
|
||||||
|
"file_name": doc.metadata.get("file_name", ""),
|
||||||
|
"creation_date": doc.metadata.get("creation_date", ""),
|
||||||
|
"last_modified_date": doc.metadata.get("last_modified_date", ""),
|
||||||
|
}
|
||||||
|
configs["repo_level_metadata"] = repo_metadata
|
||||||
|
|
||||||
|
chunk_builder = ASTChunkBuilder(**configs)
|
||||||
|
code_content = doc.get_content()
|
||||||
|
if not code_content or not code_content.strip():
|
||||||
|
logger.warning("Empty code content, skipping")
|
||||||
|
continue
|
||||||
|
|
||||||
|
chunks = chunk_builder.chunkify(code_content)
|
||||||
|
for chunk in chunks:
|
||||||
|
if hasattr(chunk, "text"):
|
||||||
|
chunk_text = chunk.text
|
||||||
|
elif isinstance(chunk, dict) and "text" in chunk:
|
||||||
|
chunk_text = chunk["text"]
|
||||||
|
elif isinstance(chunk, str):
|
||||||
|
chunk_text = chunk
|
||||||
|
else:
|
||||||
|
chunk_text = str(chunk)
|
||||||
|
|
||||||
|
if chunk_text and chunk_text.strip():
|
||||||
|
all_chunks.append(chunk_text.strip())
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Created {len(chunks)} AST chunks from {language} file: {doc.metadata.get('file_name', 'unknown')}"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"AST chunking failed for {language} file: {e}")
|
||||||
|
logger.info("Falling back to traditional chunking")
|
||||||
|
all_chunks.extend(create_traditional_chunks([doc], max_chunk_size, chunk_overlap))
|
||||||
|
|
||||||
|
return all_chunks
|
||||||
|
|
||||||
|
|
||||||
|
def create_traditional_chunks(
|
||||||
|
documents, chunk_size: int = 256, chunk_overlap: int = 128
|
||||||
|
) -> list[str]:
|
||||||
|
"""Create traditional text chunks using LlamaIndex SentenceSplitter."""
|
||||||
|
if chunk_size <= 0:
|
||||||
|
logger.warning(f"Invalid chunk_size={chunk_size}, using default value of 256")
|
||||||
|
chunk_size = 256
|
||||||
|
if chunk_overlap < 0:
|
||||||
|
chunk_overlap = 0
|
||||||
|
if chunk_overlap >= chunk_size:
|
||||||
|
chunk_overlap = chunk_size // 2
|
||||||
|
|
||||||
|
node_parser = SentenceSplitter(
|
||||||
|
chunk_size=chunk_size,
|
||||||
|
chunk_overlap=chunk_overlap,
|
||||||
|
separator=" ",
|
||||||
|
paragraph_separator="\n\n",
|
||||||
|
)
|
||||||
|
|
||||||
|
all_texts = []
|
||||||
|
for doc in documents:
|
||||||
|
try:
|
||||||
|
nodes = node_parser.get_nodes_from_documents([doc])
|
||||||
|
if nodes:
|
||||||
|
all_texts.extend(node.get_content() for node in nodes)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Traditional chunking failed for document: {e}")
|
||||||
|
content = doc.get_content()
|
||||||
|
if content and content.strip():
|
||||||
|
all_texts.append(content.strip())
|
||||||
|
|
||||||
|
return all_texts
|
||||||
|
|
||||||
|
|
||||||
|
def create_text_chunks(
|
||||||
|
documents,
|
||||||
|
chunk_size: int = 256,
|
||||||
|
chunk_overlap: int = 128,
|
||||||
|
use_ast_chunking: bool = False,
|
||||||
|
ast_chunk_size: int = 512,
|
||||||
|
ast_chunk_overlap: int = 64,
|
||||||
|
code_file_extensions: Optional[list[str]] = None,
|
||||||
|
ast_fallback_traditional: bool = True,
|
||||||
|
) -> list[str]:
|
||||||
|
"""Create text chunks from documents with optional AST support for code files."""
|
||||||
|
if not documents:
|
||||||
|
logger.warning("No documents provided for chunking")
|
||||||
|
return []
|
||||||
|
|
||||||
|
local_code_extensions = CODE_EXTENSIONS.copy()
|
||||||
|
if code_file_extensions:
|
||||||
|
ext_mapping = {
|
||||||
|
".py": "python",
|
||||||
|
".java": "java",
|
||||||
|
".cs": "c_sharp",
|
||||||
|
".ts": "typescript",
|
||||||
|
".tsx": "typescript",
|
||||||
|
}
|
||||||
|
for ext in code_file_extensions:
|
||||||
|
if ext.lower() not in local_code_extensions:
|
||||||
|
if ext.lower() in ext_mapping:
|
||||||
|
local_code_extensions[ext.lower()] = ext_mapping[ext.lower()]
|
||||||
|
else:
|
||||||
|
logger.warning(f"Unsupported extension {ext}, will use traditional chunking")
|
||||||
|
|
||||||
|
all_chunks = []
|
||||||
|
if use_ast_chunking:
|
||||||
|
code_docs, text_docs = detect_code_files(documents, local_code_extensions)
|
||||||
|
if code_docs:
|
||||||
|
try:
|
||||||
|
all_chunks.extend(
|
||||||
|
create_ast_chunks(
|
||||||
|
code_docs, max_chunk_size=ast_chunk_size, chunk_overlap=ast_chunk_overlap
|
||||||
|
)
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"AST chunking failed: {e}")
|
||||||
|
if ast_fallback_traditional:
|
||||||
|
all_chunks.extend(
|
||||||
|
create_traditional_chunks(code_docs, chunk_size, chunk_overlap)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
if text_docs:
|
||||||
|
all_chunks.extend(create_traditional_chunks(text_docs, chunk_size, chunk_overlap))
|
||||||
|
else:
|
||||||
|
all_chunks = create_traditional_chunks(documents, chunk_size, chunk_overlap)
|
||||||
|
|
||||||
|
logger.info(f"Total chunks created: {len(all_chunks)}")
|
||||||
|
return all_chunks
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -4,11 +4,15 @@ Consolidates all embedding computation logic using SentenceTransformer
|
|||||||
Preserves all optimization parameters to ensure performance
|
Preserves all optimization parameters to ensure performance
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import torch
|
|
||||||
from typing import List, Dict, Any
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import time
|
||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from .settings import resolve_ollama_host, resolve_openai_api_key, resolve_openai_base_url
|
||||||
|
|
||||||
# Set up logger with proper level
|
# Set up logger with proper level
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -17,16 +21,19 @@ log_level = getattr(logging, LOG_LEVEL, logging.WARNING)
|
|||||||
logger.setLevel(log_level)
|
logger.setLevel(log_level)
|
||||||
|
|
||||||
# Global model cache to avoid repeated loading
|
# Global model cache to avoid repeated loading
|
||||||
_model_cache: Dict[str, Any] = {}
|
_model_cache: dict[str, Any] = {}
|
||||||
|
|
||||||
|
|
||||||
def compute_embeddings(
|
def compute_embeddings(
|
||||||
texts: List[str],
|
texts: list[str],
|
||||||
model_name: str,
|
model_name: str,
|
||||||
mode: str = "sentence-transformers",
|
mode: str = "sentence-transformers",
|
||||||
is_build: bool = False,
|
is_build: bool = False,
|
||||||
batch_size: int = 32,
|
batch_size: int = 32,
|
||||||
adaptive_optimization: bool = True,
|
adaptive_optimization: bool = True,
|
||||||
|
manual_tokenize: bool = False,
|
||||||
|
max_length: int = 512,
|
||||||
|
provider_options: Optional[dict[str, Any]] = None,
|
||||||
) -> np.ndarray:
|
) -> np.ndarray:
|
||||||
"""
|
"""
|
||||||
Unified embedding computation entry point
|
Unified embedding computation entry point
|
||||||
@@ -34,7 +41,7 @@ def compute_embeddings(
|
|||||||
Args:
|
Args:
|
||||||
texts: List of texts to compute embeddings for
|
texts: List of texts to compute embeddings for
|
||||||
model_name: Model name
|
model_name: Model name
|
||||||
mode: Computation mode ('sentence-transformers', 'openai', 'mlx')
|
mode: Computation mode ('sentence-transformers', 'openai', 'mlx', 'ollama')
|
||||||
is_build: Whether this is a build operation (shows progress bar)
|
is_build: Whether this is a build operation (shows progress bar)
|
||||||
batch_size: Batch size for processing
|
batch_size: Batch size for processing
|
||||||
adaptive_optimization: Whether to use adaptive optimization based on batch size
|
adaptive_optimization: Whether to use adaptive optimization based on batch size
|
||||||
@@ -42,6 +49,8 @@ def compute_embeddings(
|
|||||||
Returns:
|
Returns:
|
||||||
Normalized embeddings array, shape: (len(texts), embedding_dim)
|
Normalized embeddings array, shape: (len(texts), embedding_dim)
|
||||||
"""
|
"""
|
||||||
|
provider_options = provider_options or {}
|
||||||
|
|
||||||
if mode == "sentence-transformers":
|
if mode == "sentence-transformers":
|
||||||
return compute_embeddings_sentence_transformers(
|
return compute_embeddings_sentence_transformers(
|
||||||
texts,
|
texts,
|
||||||
@@ -49,23 +58,41 @@ def compute_embeddings(
|
|||||||
is_build=is_build,
|
is_build=is_build,
|
||||||
batch_size=batch_size,
|
batch_size=batch_size,
|
||||||
adaptive_optimization=adaptive_optimization,
|
adaptive_optimization=adaptive_optimization,
|
||||||
|
manual_tokenize=manual_tokenize,
|
||||||
|
max_length=max_length,
|
||||||
)
|
)
|
||||||
elif mode == "openai":
|
elif mode == "openai":
|
||||||
return compute_embeddings_openai(texts, model_name)
|
return compute_embeddings_openai(
|
||||||
|
texts,
|
||||||
|
model_name,
|
||||||
|
base_url=provider_options.get("base_url"),
|
||||||
|
api_key=provider_options.get("api_key"),
|
||||||
|
)
|
||||||
elif mode == "mlx":
|
elif mode == "mlx":
|
||||||
return compute_embeddings_mlx(texts, model_name)
|
return compute_embeddings_mlx(texts, model_name)
|
||||||
|
elif mode == "ollama":
|
||||||
|
return compute_embeddings_ollama(
|
||||||
|
texts,
|
||||||
|
model_name,
|
||||||
|
is_build=is_build,
|
||||||
|
host=provider_options.get("host"),
|
||||||
|
)
|
||||||
|
elif mode == "gemini":
|
||||||
|
return compute_embeddings_gemini(texts, model_name, is_build=is_build)
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unsupported embedding mode: {mode}")
|
raise ValueError(f"Unsupported embedding mode: {mode}")
|
||||||
|
|
||||||
|
|
||||||
def compute_embeddings_sentence_transformers(
|
def compute_embeddings_sentence_transformers(
|
||||||
texts: List[str],
|
texts: list[str],
|
||||||
model_name: str,
|
model_name: str,
|
||||||
use_fp16: bool = True,
|
use_fp16: bool = True,
|
||||||
device: str = "auto",
|
device: str = "auto",
|
||||||
batch_size: int = 32,
|
batch_size: int = 32,
|
||||||
is_build: bool = False,
|
is_build: bool = False,
|
||||||
adaptive_optimization: bool = True,
|
adaptive_optimization: bool = True,
|
||||||
|
manual_tokenize: bool = False,
|
||||||
|
max_length: int = 512,
|
||||||
) -> np.ndarray:
|
) -> np.ndarray:
|
||||||
"""
|
"""
|
||||||
Compute embeddings using SentenceTransformer with model caching and adaptive optimization
|
Compute embeddings using SentenceTransformer with model caching and adaptive optimization
|
||||||
@@ -114,9 +141,7 @@ def compute_embeddings_sentence_transformers(
|
|||||||
logger.info(f"Using cached optimized model: {model_name}")
|
logger.info(f"Using cached optimized model: {model_name}")
|
||||||
model = _model_cache[cache_key]
|
model = _model_cache[cache_key]
|
||||||
else:
|
else:
|
||||||
logger.info(
|
logger.info(f"Loading and caching optimized SentenceTransformer model: {model_name}")
|
||||||
f"Loading and caching optimized SentenceTransformer model: {model_name}"
|
|
||||||
)
|
|
||||||
from sentence_transformers import SentenceTransformer
|
from sentence_transformers import SentenceTransformer
|
||||||
|
|
||||||
logger.info(f"Using device: {device}")
|
logger.info(f"Using device: {device}")
|
||||||
@@ -134,9 +159,7 @@ def compute_embeddings_sentence_transformers(
|
|||||||
if hasattr(torch.mps, "set_per_process_memory_fraction"):
|
if hasattr(torch.mps, "set_per_process_memory_fraction"):
|
||||||
torch.mps.set_per_process_memory_fraction(0.9)
|
torch.mps.set_per_process_memory_fraction(0.9)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
logger.warning(
|
logger.warning("Some MPS optimizations not available in this PyTorch version")
|
||||||
"Some MPS optimizations not available in this PyTorch version"
|
|
||||||
)
|
|
||||||
elif device == "cpu":
|
elif device == "cpu":
|
||||||
# TODO: Haven't tested this yet
|
# TODO: Haven't tested this yet
|
||||||
torch.set_num_threads(min(8, os.cpu_count() or 4))
|
torch.set_num_threads(min(8, os.cpu_count() or 4))
|
||||||
@@ -213,9 +236,13 @@ def compute_embeddings_sentence_transformers(
|
|||||||
logger.info(f"Model cached: {cache_key}")
|
logger.info(f"Model cached: {cache_key}")
|
||||||
|
|
||||||
# Compute embeddings with optimized inference mode
|
# Compute embeddings with optimized inference mode
|
||||||
logger.info(f"Starting embedding computation... (batch_size: {batch_size})")
|
logger.info(
|
||||||
|
f"Starting embedding computation... (batch_size: {batch_size}, manual_tokenize={manual_tokenize})"
|
||||||
|
)
|
||||||
|
|
||||||
# Use torch.inference_mode for optimal performance
|
start_time = time.time()
|
||||||
|
if not manual_tokenize:
|
||||||
|
# Use SentenceTransformer's optimized encode path (default)
|
||||||
with torch.inference_mode():
|
with torch.inference_mode():
|
||||||
embeddings = model.encode(
|
embeddings = model.encode(
|
||||||
texts,
|
texts,
|
||||||
@@ -225,49 +252,176 @@ def compute_embeddings_sentence_transformers(
|
|||||||
normalize_embeddings=False,
|
normalize_embeddings=False,
|
||||||
device=device,
|
device=device,
|
||||||
)
|
)
|
||||||
|
# Synchronize if CUDA to measure accurate wall time
|
||||||
|
try:
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
torch.cuda.synchronize()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# Manual tokenization + forward pass using HF AutoTokenizer/AutoModel
|
||||||
|
try:
|
||||||
|
from transformers import AutoModel, AutoTokenizer # type: ignore
|
||||||
|
except Exception as e:
|
||||||
|
raise ImportError(f"transformers is required for manual_tokenize=True: {e}")
|
||||||
|
|
||||||
logger.info(
|
# Cache tokenizer and model
|
||||||
f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}"
|
tok_cache_key = f"hf_tokenizer_{model_name}"
|
||||||
|
mdl_cache_key = f"hf_model_{model_name}_{device}_{use_fp16}"
|
||||||
|
if tok_cache_key in _model_cache and mdl_cache_key in _model_cache:
|
||||||
|
hf_tokenizer = _model_cache[tok_cache_key]
|
||||||
|
hf_model = _model_cache[mdl_cache_key]
|
||||||
|
logger.info("Using cached HF tokenizer/model for manual path")
|
||||||
|
else:
|
||||||
|
logger.info("Loading HF tokenizer/model for manual tokenization path")
|
||||||
|
hf_tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
|
||||||
|
torch_dtype = torch.float16 if (use_fp16 and device == "cuda") else torch.float32
|
||||||
|
hf_model = AutoModel.from_pretrained(model_name, torch_dtype=torch_dtype)
|
||||||
|
hf_model.to(device)
|
||||||
|
hf_model.eval()
|
||||||
|
# Optional compile on supported devices
|
||||||
|
if device in ["cuda", "mps"]:
|
||||||
|
try:
|
||||||
|
hf_model = torch.compile(hf_model, mode="reduce-overhead", dynamic=True) # type: ignore
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
_model_cache[tok_cache_key] = hf_tokenizer
|
||||||
|
_model_cache[mdl_cache_key] = hf_model
|
||||||
|
|
||||||
|
all_embeddings: list[np.ndarray] = []
|
||||||
|
# Progress bar when building or for large inputs
|
||||||
|
show_progress = is_build or len(texts) > 32
|
||||||
|
try:
|
||||||
|
if show_progress:
|
||||||
|
from tqdm import tqdm # type: ignore
|
||||||
|
|
||||||
|
batch_iter = tqdm(
|
||||||
|
range(0, len(texts), batch_size),
|
||||||
|
desc="Embedding (manual)",
|
||||||
|
unit="batch",
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
batch_iter = range(0, len(texts), batch_size)
|
||||||
|
except Exception:
|
||||||
|
batch_iter = range(0, len(texts), batch_size)
|
||||||
|
|
||||||
|
start_time_manual = time.time()
|
||||||
|
with torch.inference_mode():
|
||||||
|
for start_index in batch_iter:
|
||||||
|
end_index = min(start_index + batch_size, len(texts))
|
||||||
|
batch_texts = texts[start_index:end_index]
|
||||||
|
tokenize_start_time = time.time()
|
||||||
|
inputs = hf_tokenizer(
|
||||||
|
batch_texts,
|
||||||
|
padding=True,
|
||||||
|
truncation=True,
|
||||||
|
max_length=max_length,
|
||||||
|
return_tensors="pt",
|
||||||
|
)
|
||||||
|
tokenize_end_time = time.time()
|
||||||
|
logger.info(
|
||||||
|
f"Tokenize time taken: {tokenize_end_time - tokenize_start_time} seconds"
|
||||||
|
)
|
||||||
|
# Print shapes of all input tensors for debugging
|
||||||
|
for k, v in inputs.items():
|
||||||
|
print(f"inputs[{k!r}] shape: {getattr(v, 'shape', type(v))}")
|
||||||
|
to_device_start_time = time.time()
|
||||||
|
inputs = {k: v.to(device) for k, v in inputs.items()}
|
||||||
|
to_device_end_time = time.time()
|
||||||
|
logger.info(
|
||||||
|
f"To device time taken: {to_device_end_time - to_device_start_time} seconds"
|
||||||
|
)
|
||||||
|
forward_start_time = time.time()
|
||||||
|
outputs = hf_model(**inputs)
|
||||||
|
forward_end_time = time.time()
|
||||||
|
logger.info(f"Forward time taken: {forward_end_time - forward_start_time} seconds")
|
||||||
|
last_hidden_state = outputs.last_hidden_state # (B, L, H)
|
||||||
|
attention_mask = inputs.get("attention_mask")
|
||||||
|
if attention_mask is None:
|
||||||
|
# Fallback: assume all tokens are valid
|
||||||
|
pooled = last_hidden_state.mean(dim=1)
|
||||||
|
else:
|
||||||
|
mask = attention_mask.unsqueeze(-1).to(last_hidden_state.dtype)
|
||||||
|
masked = last_hidden_state * mask
|
||||||
|
lengths = mask.sum(dim=1).clamp(min=1)
|
||||||
|
pooled = masked.sum(dim=1) / lengths
|
||||||
|
# Move to CPU float32
|
||||||
|
batch_embeddings = pooled.detach().to("cpu").float().numpy()
|
||||||
|
all_embeddings.append(batch_embeddings)
|
||||||
|
|
||||||
|
embeddings = np.vstack(all_embeddings).astype(np.float32, copy=False)
|
||||||
|
try:
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
torch.cuda.synchronize()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
end_time = time.time()
|
||||||
|
logger.info(f"Manual tokenize time taken: {end_time - start_time_manual} seconds")
|
||||||
|
end_time = time.time()
|
||||||
|
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
||||||
|
logger.info(f"Time taken: {end_time - start_time} seconds")
|
||||||
|
|
||||||
# Validate results
|
# Validate results
|
||||||
if np.isnan(embeddings).any() or np.isinf(embeddings).any():
|
if np.isnan(embeddings).any() or np.isinf(embeddings).any():
|
||||||
raise RuntimeError(
|
raise RuntimeError(f"Detected NaN or Inf values in embeddings, model: {model_name}")
|
||||||
f"Detected NaN or Inf values in embeddings, model: {model_name}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return embeddings
|
return embeddings
|
||||||
|
|
||||||
|
|
||||||
def compute_embeddings_openai(texts: List[str], model_name: str) -> np.ndarray:
|
def compute_embeddings_openai(
|
||||||
|
texts: list[str],
|
||||||
|
model_name: str,
|
||||||
|
base_url: Optional[str] = None,
|
||||||
|
api_key: Optional[str] = None,
|
||||||
|
) -> np.ndarray:
|
||||||
# TODO: @yichuan-w add progress bar only in build mode
|
# TODO: @yichuan-w add progress bar only in build mode
|
||||||
"""Compute embeddings using OpenAI API"""
|
"""Compute embeddings using OpenAI API"""
|
||||||
try:
|
try:
|
||||||
import openai
|
import openai
|
||||||
import os
|
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
raise ImportError(f"OpenAI package not installed: {e}")
|
raise ImportError(f"OpenAI package not installed: {e}")
|
||||||
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
# Validate input list
|
||||||
if not api_key:
|
if not texts:
|
||||||
|
raise ValueError("Cannot compute embeddings for empty text list")
|
||||||
|
# Extra validation: abort early if any item is empty/whitespace
|
||||||
|
invalid_count = sum(1 for t in texts if not isinstance(t, str) or not t.strip())
|
||||||
|
if invalid_count > 0:
|
||||||
|
raise ValueError(
|
||||||
|
f"Found {invalid_count} empty/invalid text(s) in input. Upstream should filter before calling OpenAI."
|
||||||
|
)
|
||||||
|
|
||||||
|
resolved_base_url = resolve_openai_base_url(base_url)
|
||||||
|
resolved_api_key = resolve_openai_api_key(api_key)
|
||||||
|
|
||||||
|
if not resolved_api_key:
|
||||||
raise RuntimeError("OPENAI_API_KEY environment variable not set")
|
raise RuntimeError("OPENAI_API_KEY environment variable not set")
|
||||||
|
|
||||||
# Cache OpenAI client
|
# Cache OpenAI client
|
||||||
cache_key = "openai_client"
|
cache_key = f"openai_client::{resolved_base_url}"
|
||||||
if cache_key in _model_cache:
|
if cache_key in _model_cache:
|
||||||
client = _model_cache[cache_key]
|
client = _model_cache[cache_key]
|
||||||
else:
|
else:
|
||||||
client = openai.OpenAI(api_key=api_key)
|
client = openai.OpenAI(api_key=resolved_api_key, base_url=resolved_base_url)
|
||||||
_model_cache[cache_key] = client
|
_model_cache[cache_key] = client
|
||||||
logger.info("OpenAI client cached")
|
logger.info("OpenAI client cached")
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Computing embeddings for {len(texts)} texts using OpenAI API, model: '{model_name}'"
|
f"Computing embeddings for {len(texts)} texts using OpenAI API, model: '{model_name}'"
|
||||||
)
|
)
|
||||||
|
print(f"len of texts: {len(texts)}")
|
||||||
|
|
||||||
# OpenAI has limits on batch size and input length
|
# OpenAI has limits on batch size and input length
|
||||||
max_batch_size = 100 # Conservative batch size
|
max_batch_size = 800 # Conservative batch size because the token limit is 300K
|
||||||
all_embeddings = []
|
all_embeddings = []
|
||||||
|
# get the avg len of texts
|
||||||
|
avg_len = sum(len(text) for text in texts) / len(texts)
|
||||||
|
print(f"avg len of texts: {avg_len}")
|
||||||
|
# if avg len is less than 1000, use the max batch size
|
||||||
|
if avg_len > 300:
|
||||||
|
max_batch_size = 500
|
||||||
|
|
||||||
|
# if avg len is less than 1000, use the max batch size
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
@@ -293,15 +447,12 @@ def compute_embeddings_openai(texts: List[str], model_name: str) -> np.ndarray:
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
embeddings = np.array(all_embeddings, dtype=np.float32)
|
embeddings = np.array(all_embeddings, dtype=np.float32)
|
||||||
logger.info(
|
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
||||||
f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}"
|
print(f"len of embeddings: {len(embeddings)}")
|
||||||
)
|
|
||||||
return embeddings
|
return embeddings
|
||||||
|
|
||||||
|
|
||||||
def compute_embeddings_mlx(
|
def compute_embeddings_mlx(chunks: list[str], model_name: str, batch_size: int = 16) -> np.ndarray:
|
||||||
chunks: List[str], model_name: str, batch_size: int = 16
|
|
||||||
) -> np.ndarray:
|
|
||||||
# TODO: @yichuan-w add progress bar only in build mode
|
# TODO: @yichuan-w add progress bar only in build mode
|
||||||
"""Computes embeddings using an MLX model."""
|
"""Computes embeddings using an MLX model."""
|
||||||
try:
|
try:
|
||||||
@@ -373,3 +524,373 @@ def compute_embeddings_mlx(
|
|||||||
|
|
||||||
# Stack numpy arrays
|
# Stack numpy arrays
|
||||||
return np.stack(all_embeddings)
|
return np.stack(all_embeddings)
|
||||||
|
|
||||||
|
|
||||||
|
def compute_embeddings_ollama(
|
||||||
|
texts: list[str],
|
||||||
|
model_name: str,
|
||||||
|
is_build: bool = False,
|
||||||
|
host: Optional[str] = None,
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Compute embeddings using Ollama API with simplified batch processing.
|
||||||
|
|
||||||
|
Uses batch size of 32 for MPS/CPU and 128 for CUDA to optimize performance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
texts: List of texts to compute embeddings for
|
||||||
|
model_name: Ollama model name (e.g., "nomic-embed-text", "mxbai-embed-large")
|
||||||
|
is_build: Whether this is a build operation (shows progress bar)
|
||||||
|
host: Ollama host URL (defaults to environment or http://localhost:11434)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Normalized embeddings array, shape: (len(texts), embedding_dim)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"The 'requests' library is required for Ollama embeddings. Install with: uv pip install requests"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not texts:
|
||||||
|
raise ValueError("Cannot compute embeddings for empty text list")
|
||||||
|
|
||||||
|
resolved_host = resolve_ollama_host(host)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Computing embeddings for {len(texts)} texts using Ollama API, model: '{model_name}', host: '{resolved_host}'"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check if Ollama is running
|
||||||
|
try:
|
||||||
|
response = requests.get(f"{resolved_host}/api/version", timeout=5)
|
||||||
|
response.raise_for_status()
|
||||||
|
except requests.exceptions.ConnectionError:
|
||||||
|
error_msg = (
|
||||||
|
f"❌ Could not connect to Ollama at {resolved_host}.\n\n"
|
||||||
|
"Please ensure Ollama is running:\n"
|
||||||
|
" • macOS/Linux: ollama serve\n"
|
||||||
|
" • Windows: Make sure Ollama is running in the system tray\n\n"
|
||||||
|
"Installation: https://ollama.com/download"
|
||||||
|
)
|
||||||
|
raise RuntimeError(error_msg)
|
||||||
|
except Exception as e:
|
||||||
|
raise RuntimeError(f"Unexpected error connecting to Ollama: {e}")
|
||||||
|
|
||||||
|
# Check if model exists and provide helpful suggestions
|
||||||
|
try:
|
||||||
|
response = requests.get(f"{resolved_host}/api/tags", timeout=5)
|
||||||
|
response.raise_for_status()
|
||||||
|
models = response.json()
|
||||||
|
model_names = [model["name"] for model in models.get("models", [])]
|
||||||
|
|
||||||
|
# Filter for embedding models (models that support embeddings)
|
||||||
|
embedding_models = []
|
||||||
|
suggested_embedding_models = [
|
||||||
|
"nomic-embed-text",
|
||||||
|
"mxbai-embed-large",
|
||||||
|
"bge-m3",
|
||||||
|
"all-minilm",
|
||||||
|
"snowflake-arctic-embed",
|
||||||
|
]
|
||||||
|
|
||||||
|
for model in model_names:
|
||||||
|
# Check if it's an embedding model (by name patterns or known models)
|
||||||
|
base_name = model.split(":")[0]
|
||||||
|
if any(emb in base_name for emb in ["embed", "bge", "minilm", "e5"]):
|
||||||
|
embedding_models.append(model)
|
||||||
|
|
||||||
|
# Check if model exists (handle versioned names) and resolve to full name
|
||||||
|
resolved_model_name = None
|
||||||
|
for name in model_names:
|
||||||
|
# Exact match
|
||||||
|
if model_name == name:
|
||||||
|
resolved_model_name = name
|
||||||
|
break
|
||||||
|
# Match without version tag (use the versioned name)
|
||||||
|
elif model_name == name.split(":")[0]:
|
||||||
|
resolved_model_name = name
|
||||||
|
break
|
||||||
|
|
||||||
|
if not resolved_model_name:
|
||||||
|
error_msg = f"❌ Model '{model_name}' not found in local Ollama.\n\n"
|
||||||
|
|
||||||
|
# Suggest pulling the model
|
||||||
|
error_msg += "📦 To install this embedding model:\n"
|
||||||
|
error_msg += f" ollama pull {model_name}\n\n"
|
||||||
|
|
||||||
|
# Show available embedding models
|
||||||
|
if embedding_models:
|
||||||
|
error_msg += "✅ Available embedding models:\n"
|
||||||
|
for model in embedding_models[:5]:
|
||||||
|
error_msg += f" • {model}\n"
|
||||||
|
if len(embedding_models) > 5:
|
||||||
|
error_msg += f" ... and {len(embedding_models) - 5} more\n"
|
||||||
|
else:
|
||||||
|
error_msg += "💡 Popular embedding models to install:\n"
|
||||||
|
for model in suggested_embedding_models[:3]:
|
||||||
|
error_msg += f" • ollama pull {model}\n"
|
||||||
|
|
||||||
|
error_msg += "\n📚 Browse more: https://ollama.com/library"
|
||||||
|
raise ValueError(error_msg)
|
||||||
|
|
||||||
|
# Use the resolved model name for all subsequent operations
|
||||||
|
if resolved_model_name != model_name:
|
||||||
|
logger.info(f"Resolved model name '{model_name}' to '{resolved_model_name}'")
|
||||||
|
model_name = resolved_model_name
|
||||||
|
|
||||||
|
# Verify the model supports embeddings by testing it
|
||||||
|
try:
|
||||||
|
test_response = requests.post(
|
||||||
|
f"{resolved_host}/api/embeddings",
|
||||||
|
json={"model": model_name, "prompt": "test"},
|
||||||
|
timeout=10,
|
||||||
|
)
|
||||||
|
if test_response.status_code != 200:
|
||||||
|
error_msg = (
|
||||||
|
f"⚠️ Model '{model_name}' exists but may not support embeddings.\n\n"
|
||||||
|
f"Please use an embedding model like:\n"
|
||||||
|
)
|
||||||
|
for model in suggested_embedding_models[:3]:
|
||||||
|
error_msg += f" • {model}\n"
|
||||||
|
raise ValueError(error_msg)
|
||||||
|
except requests.exceptions.RequestException:
|
||||||
|
# If test fails, continue anyway - model might still work
|
||||||
|
pass
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
logger.warning(f"Could not verify model existence: {e}")
|
||||||
|
|
||||||
|
# Determine batch size based on device availability
|
||||||
|
# Check for CUDA/MPS availability using torch if available
|
||||||
|
batch_size = 32 # Default for MPS/CPU
|
||||||
|
try:
|
||||||
|
import torch
|
||||||
|
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
batch_size = 128 # CUDA gets larger batch size
|
||||||
|
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
||||||
|
batch_size = 32 # MPS gets smaller batch size
|
||||||
|
except ImportError:
|
||||||
|
# If torch is not available, use conservative batch size
|
||||||
|
batch_size = 32
|
||||||
|
|
||||||
|
logger.info(f"Using batch size: {batch_size}")
|
||||||
|
|
||||||
|
def get_batch_embeddings(batch_texts):
|
||||||
|
"""Get embeddings for a batch of texts."""
|
||||||
|
all_embeddings = []
|
||||||
|
failed_indices = []
|
||||||
|
|
||||||
|
for i, text in enumerate(batch_texts):
|
||||||
|
max_retries = 3
|
||||||
|
retry_count = 0
|
||||||
|
|
||||||
|
# Truncate very long texts to avoid API issues
|
||||||
|
truncated_text = text[:8000] if len(text) > 8000 else text
|
||||||
|
while retry_count < max_retries:
|
||||||
|
try:
|
||||||
|
response = requests.post(
|
||||||
|
f"{resolved_host}/api/embeddings",
|
||||||
|
json={"model": model_name, "prompt": truncated_text},
|
||||||
|
timeout=30,
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
result = response.json()
|
||||||
|
embedding = result.get("embedding")
|
||||||
|
|
||||||
|
if embedding is None:
|
||||||
|
raise ValueError(f"No embedding returned for text {i}")
|
||||||
|
|
||||||
|
if not isinstance(embedding, list) or len(embedding) == 0:
|
||||||
|
raise ValueError(f"Invalid embedding format for text {i}")
|
||||||
|
|
||||||
|
all_embeddings.append(embedding)
|
||||||
|
break
|
||||||
|
|
||||||
|
except requests.exceptions.Timeout:
|
||||||
|
retry_count += 1
|
||||||
|
if retry_count >= max_retries:
|
||||||
|
logger.warning(f"Timeout for text {i} after {max_retries} retries")
|
||||||
|
failed_indices.append(i)
|
||||||
|
all_embeddings.append(None)
|
||||||
|
break
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
retry_count += 1
|
||||||
|
if retry_count >= max_retries:
|
||||||
|
logger.error(f"Failed to get embedding for text {i}: {e}")
|
||||||
|
failed_indices.append(i)
|
||||||
|
all_embeddings.append(None)
|
||||||
|
break
|
||||||
|
return all_embeddings, failed_indices
|
||||||
|
|
||||||
|
# Process texts in batches
|
||||||
|
all_embeddings = []
|
||||||
|
all_failed_indices = []
|
||||||
|
|
||||||
|
# Setup progress bar if needed
|
||||||
|
show_progress = is_build or len(texts) > 10
|
||||||
|
try:
|
||||||
|
if show_progress:
|
||||||
|
from tqdm import tqdm
|
||||||
|
except ImportError:
|
||||||
|
show_progress = False
|
||||||
|
|
||||||
|
# Process batches
|
||||||
|
num_batches = (len(texts) + batch_size - 1) // batch_size
|
||||||
|
|
||||||
|
if show_progress:
|
||||||
|
batch_iterator = tqdm(range(num_batches), desc="Computing Ollama embeddings")
|
||||||
|
else:
|
||||||
|
batch_iterator = range(num_batches)
|
||||||
|
|
||||||
|
for batch_idx in batch_iterator:
|
||||||
|
start_idx = batch_idx * batch_size
|
||||||
|
end_idx = min(start_idx + batch_size, len(texts))
|
||||||
|
batch_texts = texts[start_idx:end_idx]
|
||||||
|
|
||||||
|
batch_embeddings, batch_failed = get_batch_embeddings(batch_texts)
|
||||||
|
|
||||||
|
# Adjust failed indices to global indices
|
||||||
|
global_failed = [start_idx + idx for idx in batch_failed]
|
||||||
|
all_failed_indices.extend(global_failed)
|
||||||
|
all_embeddings.extend(batch_embeddings)
|
||||||
|
|
||||||
|
# Handle failed embeddings
|
||||||
|
if all_failed_indices:
|
||||||
|
if len(all_failed_indices) == len(texts):
|
||||||
|
raise RuntimeError("Failed to compute any embeddings")
|
||||||
|
|
||||||
|
logger.warning(
|
||||||
|
f"Failed to compute embeddings for {len(all_failed_indices)}/{len(texts)} texts"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use zero embeddings as fallback for failed ones
|
||||||
|
valid_embedding = next((e for e in all_embeddings if e is not None), None)
|
||||||
|
if valid_embedding:
|
||||||
|
embedding_dim = len(valid_embedding)
|
||||||
|
for i, embedding in enumerate(all_embeddings):
|
||||||
|
if embedding is None:
|
||||||
|
all_embeddings[i] = [0.0] * embedding_dim
|
||||||
|
|
||||||
|
# Remove None values
|
||||||
|
all_embeddings = [e for e in all_embeddings if e is not None]
|
||||||
|
|
||||||
|
if not all_embeddings:
|
||||||
|
raise RuntimeError("No valid embeddings were computed")
|
||||||
|
|
||||||
|
# Validate embedding dimensions
|
||||||
|
expected_dim = len(all_embeddings[0])
|
||||||
|
inconsistent_dims = []
|
||||||
|
for i, embedding in enumerate(all_embeddings):
|
||||||
|
if len(embedding) != expected_dim:
|
||||||
|
inconsistent_dims.append((i, len(embedding)))
|
||||||
|
|
||||||
|
if inconsistent_dims:
|
||||||
|
error_msg = f"Ollama returned inconsistent embedding dimensions. Expected {expected_dim}, but got:\n"
|
||||||
|
for idx, dim in inconsistent_dims[:10]: # Show first 10 inconsistent ones
|
||||||
|
error_msg += f" - Text {idx}: {dim} dimensions\n"
|
||||||
|
if len(inconsistent_dims) > 10:
|
||||||
|
error_msg += f" ... and {len(inconsistent_dims) - 10} more\n"
|
||||||
|
error_msg += f"\nThis is likely an Ollama API bug with model '{model_name}'. Please try:\n"
|
||||||
|
error_msg += "1. Restart Ollama service: 'ollama serve'\n"
|
||||||
|
error_msg += f"2. Re-pull the model: 'ollama pull {model_name}'\n"
|
||||||
|
error_msg += (
|
||||||
|
"3. Use sentence-transformers instead: --embedding-mode sentence-transformers\n"
|
||||||
|
)
|
||||||
|
error_msg += "4. Report this issue to Ollama: https://github.com/ollama/ollama/issues"
|
||||||
|
raise ValueError(error_msg)
|
||||||
|
|
||||||
|
# Convert to numpy array and normalize
|
||||||
|
embeddings = np.array(all_embeddings, dtype=np.float32)
|
||||||
|
|
||||||
|
# Normalize embeddings (L2 normalization)
|
||||||
|
norms = np.linalg.norm(embeddings, axis=1, keepdims=True)
|
||||||
|
embeddings = embeddings / (norms + 1e-8) # Add small epsilon to avoid division by zero
|
||||||
|
|
||||||
|
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
||||||
|
|
||||||
|
return embeddings
|
||||||
|
|
||||||
|
|
||||||
|
def compute_embeddings_gemini(
|
||||||
|
texts: list[str], model_name: str = "text-embedding-004", is_build: bool = False
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Compute embeddings using Google Gemini API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
texts: List of texts to compute embeddings for
|
||||||
|
model_name: Gemini model name (default: "text-embedding-004")
|
||||||
|
is_build: Whether this is a build operation (shows progress bar)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Embeddings array, shape: (len(texts), embedding_dim)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import os
|
||||||
|
|
||||||
|
import google.genai as genai
|
||||||
|
except ImportError as e:
|
||||||
|
raise ImportError(f"Google GenAI package not installed: {e}")
|
||||||
|
|
||||||
|
api_key = os.getenv("GEMINI_API_KEY")
|
||||||
|
if not api_key:
|
||||||
|
raise RuntimeError("GEMINI_API_KEY environment variable not set")
|
||||||
|
|
||||||
|
# Cache Gemini client
|
||||||
|
cache_key = "gemini_client"
|
||||||
|
if cache_key in _model_cache:
|
||||||
|
client = _model_cache[cache_key]
|
||||||
|
else:
|
||||||
|
client = genai.Client(api_key=api_key)
|
||||||
|
_model_cache[cache_key] = client
|
||||||
|
logger.info("Gemini client cached")
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Computing embeddings for {len(texts)} texts using Gemini API, model: '{model_name}'"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Gemini supports batch embedding
|
||||||
|
max_batch_size = 100 # Conservative batch size for Gemini
|
||||||
|
all_embeddings = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
total_batches = (len(texts) + max_batch_size - 1) // max_batch_size
|
||||||
|
batch_range = range(0, len(texts), max_batch_size)
|
||||||
|
batch_iterator = tqdm(
|
||||||
|
batch_range, desc="Computing embeddings", unit="batch", total=total_batches
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
# Fallback when tqdm is not available
|
||||||
|
batch_iterator = range(0, len(texts), max_batch_size)
|
||||||
|
|
||||||
|
for i in batch_iterator:
|
||||||
|
batch_texts = texts[i : i + max_batch_size]
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Use the embed_content method from the new Google GenAI SDK
|
||||||
|
response = client.models.embed_content(
|
||||||
|
model=model_name,
|
||||||
|
contents=batch_texts,
|
||||||
|
config=genai.types.EmbedContentConfig(
|
||||||
|
task_type="RETRIEVAL_DOCUMENT" # For document embedding
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract embeddings from response
|
||||||
|
for embedding_data in response.embeddings:
|
||||||
|
all_embeddings.append(embedding_data.values)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Batch {i} failed: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
embeddings = np.array(all_embeddings, dtype=np.float32)
|
||||||
|
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
||||||
|
|
||||||
|
return embeddings
|
||||||
|
|||||||
@@ -1,13 +1,16 @@
|
|||||||
import time
|
|
||||||
import atexit
|
import atexit
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
import socket
|
import socket
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import os
|
import time
|
||||||
import logging
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
import psutil
|
|
||||||
|
from .settings import encode_provider_options
|
||||||
|
|
||||||
|
# Lightweight, self-contained server manager with no cross-process inspection
|
||||||
|
|
||||||
# Set up logging based on environment variable
|
# Set up logging based on environment variable
|
||||||
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
||||||
@@ -18,136 +21,31 @@ logging.basicConfig(
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _is_colab_environment() -> bool:
|
||||||
|
"""Check if we're running in Google Colab environment."""
|
||||||
|
return "COLAB_GPU" in os.environ or "COLAB_TPU" in os.environ
|
||||||
|
|
||||||
|
|
||||||
|
def _get_available_port(start_port: int = 5557) -> int:
|
||||||
|
"""Get an available port starting from start_port."""
|
||||||
|
port = start_port
|
||||||
|
while port < start_port + 100: # Try up to 100 ports
|
||||||
|
try:
|
||||||
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
|
s.bind(("localhost", port))
|
||||||
|
return port
|
||||||
|
except OSError:
|
||||||
|
port += 1
|
||||||
|
raise RuntimeError(f"No available ports found in range {start_port}-{start_port + 100}")
|
||||||
|
|
||||||
|
|
||||||
def _check_port(port: int) -> bool:
|
def _check_port(port: int) -> bool:
|
||||||
"""Check if a port is in use"""
|
"""Check if a port is in use"""
|
||||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
return s.connect_ex(("localhost", port)) == 0
|
return s.connect_ex(("localhost", port)) == 0
|
||||||
|
|
||||||
|
|
||||||
def _check_process_matches_config(
|
# Note: All cross-process scanning helpers removed for simplicity
|
||||||
port: int, expected_model: str, expected_passages_file: str
|
|
||||||
) -> bool:
|
|
||||||
"""
|
|
||||||
Check if the process using the port matches our expected model and passages file.
|
|
||||||
Returns True if matches, False otherwise.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
for proc in psutil.process_iter(["pid", "cmdline"]):
|
|
||||||
if not _is_process_listening_on_port(proc, port):
|
|
||||||
continue
|
|
||||||
|
|
||||||
cmdline = proc.info["cmdline"]
|
|
||||||
if not cmdline:
|
|
||||||
continue
|
|
||||||
|
|
||||||
return _check_cmdline_matches_config(
|
|
||||||
cmdline, port, expected_model, expected_passages_file
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug(f"No process found listening on port {port}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"Could not check process on port {port}: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def _is_process_listening_on_port(proc, port: int) -> bool:
|
|
||||||
"""Check if a process is listening on the given port."""
|
|
||||||
try:
|
|
||||||
connections = proc.net_connections()
|
|
||||||
for conn in connections:
|
|
||||||
if conn.laddr.port == port and conn.status == psutil.CONN_LISTEN:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def _check_cmdline_matches_config(
|
|
||||||
cmdline: list, port: int, expected_model: str, expected_passages_file: str
|
|
||||||
) -> bool:
|
|
||||||
"""Check if command line matches our expected configuration."""
|
|
||||||
cmdline_str = " ".join(cmdline)
|
|
||||||
logger.debug(f"Found process on port {port}: {cmdline_str}")
|
|
||||||
|
|
||||||
# Check if it's our embedding server
|
|
||||||
is_embedding_server = any(
|
|
||||||
server_type in cmdline_str
|
|
||||||
for server_type in [
|
|
||||||
"embedding_server",
|
|
||||||
"leann_backend_diskann.embedding_server",
|
|
||||||
"leann_backend_hnsw.hnsw_embedding_server",
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
if not is_embedding_server:
|
|
||||||
logger.debug(f"Process on port {port} is not our embedding server")
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Check model name
|
|
||||||
model_matches = _check_model_in_cmdline(cmdline, expected_model)
|
|
||||||
|
|
||||||
# Check passages file if provided
|
|
||||||
passages_matches = _check_passages_in_cmdline(cmdline, expected_passages_file)
|
|
||||||
|
|
||||||
result = model_matches and passages_matches
|
|
||||||
logger.debug(
|
|
||||||
f"model_matches: {model_matches}, passages_matches: {passages_matches}, overall: {result}"
|
|
||||||
)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def _check_model_in_cmdline(cmdline: list, expected_model: str) -> bool:
|
|
||||||
"""Check if the command line contains the expected model."""
|
|
||||||
if "--model-name" not in cmdline:
|
|
||||||
return False
|
|
||||||
|
|
||||||
model_idx = cmdline.index("--model-name")
|
|
||||||
if model_idx + 1 >= len(cmdline):
|
|
||||||
return False
|
|
||||||
|
|
||||||
actual_model = cmdline[model_idx + 1]
|
|
||||||
return actual_model == expected_model
|
|
||||||
|
|
||||||
|
|
||||||
def _check_passages_in_cmdline(cmdline: list, expected_passages_file: str) -> bool:
|
|
||||||
"""Check if the command line contains the expected passages file."""
|
|
||||||
if "--passages-file" not in cmdline:
|
|
||||||
return False # Expected but not found
|
|
||||||
|
|
||||||
passages_idx = cmdline.index("--passages-file")
|
|
||||||
if passages_idx + 1 >= len(cmdline):
|
|
||||||
return False
|
|
||||||
|
|
||||||
actual_passages = cmdline[passages_idx + 1]
|
|
||||||
expected_path = Path(expected_passages_file).resolve()
|
|
||||||
actual_path = Path(actual_passages).resolve()
|
|
||||||
return actual_path == expected_path
|
|
||||||
|
|
||||||
|
|
||||||
def _find_compatible_port_or_next_available(
|
|
||||||
start_port: int, model_name: str, passages_file: str, max_attempts: int = 100
|
|
||||||
) -> tuple[int, bool]:
|
|
||||||
"""
|
|
||||||
Find a port that either has a compatible server or is available.
|
|
||||||
Returns (port, is_compatible) where is_compatible indicates if we found a matching server.
|
|
||||||
"""
|
|
||||||
for port in range(start_port, start_port + max_attempts):
|
|
||||||
if not _check_port(port):
|
|
||||||
# Port is available
|
|
||||||
return port, False
|
|
||||||
|
|
||||||
# Port is in use, check if it's compatible
|
|
||||||
if _check_process_matches_config(port, model_name, passages_file):
|
|
||||||
logger.info(f"Found compatible server on port {port}")
|
|
||||||
return port, True
|
|
||||||
else:
|
|
||||||
logger.info(f"Port {port} has incompatible server, trying next port...")
|
|
||||||
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Could not find compatible or available port in range {start_port}-{start_port + max_attempts}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class EmbeddingServerManager:
|
class EmbeddingServerManager:
|
||||||
@@ -166,7 +64,16 @@ class EmbeddingServerManager:
|
|||||||
self.backend_module_name = backend_module_name
|
self.backend_module_name = backend_module_name
|
||||||
self.server_process: Optional[subprocess.Popen] = None
|
self.server_process: Optional[subprocess.Popen] = None
|
||||||
self.server_port: Optional[int] = None
|
self.server_port: Optional[int] = None
|
||||||
|
# Track last-started config for in-process reuse only
|
||||||
|
self._server_config: Optional[dict] = None
|
||||||
self._atexit_registered = False
|
self._atexit_registered = False
|
||||||
|
# Also register a weakref finalizer to ensure cleanup when manager is GC'ed
|
||||||
|
try:
|
||||||
|
import weakref
|
||||||
|
|
||||||
|
self._finalizer = weakref.finalize(self, self._finalize_process)
|
||||||
|
except Exception:
|
||||||
|
self._finalizer = None
|
||||||
|
|
||||||
def start_server(
|
def start_server(
|
||||||
self,
|
self,
|
||||||
@@ -175,72 +82,111 @@ class EmbeddingServerManager:
|
|||||||
embedding_mode: str = "sentence-transformers",
|
embedding_mode: str = "sentence-transformers",
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> tuple[bool, int]:
|
) -> tuple[bool, int]:
|
||||||
"""
|
"""Start the embedding server."""
|
||||||
Starts the embedding server process.
|
# passages_file may be present in kwargs for server CLI, but we don't need it here
|
||||||
|
provider_options = kwargs.pop("provider_options", None)
|
||||||
|
|
||||||
Args:
|
config_signature = {
|
||||||
port (int): The preferred ZMQ port for the server.
|
"model_name": model_name,
|
||||||
model_name (str): The name of the embedding model to use.
|
"passages_file": kwargs.get("passages_file", ""),
|
||||||
**kwargs: Additional arguments for the server.
|
"embedding_mode": embedding_mode,
|
||||||
|
"provider_options": provider_options or {},
|
||||||
|
}
|
||||||
|
|
||||||
Returns:
|
# If this manager already has a live server, just reuse it
|
||||||
tuple[bool, int]: (success, actual_port_used)
|
if (
|
||||||
"""
|
|
||||||
passages_file = kwargs.get("passages_file")
|
|
||||||
assert isinstance(passages_file, str), "passages_file must be a string"
|
|
||||||
|
|
||||||
# Check if we have a compatible running server
|
|
||||||
if self._has_compatible_running_server(model_name, passages_file):
|
|
||||||
assert self.server_port is not None, (
|
|
||||||
"a compatible running server should set server_port"
|
|
||||||
)
|
|
||||||
return True, self.server_port
|
|
||||||
|
|
||||||
# Find available port (compatible or free)
|
|
||||||
try:
|
|
||||||
actual_port, is_compatible = _find_compatible_port_or_next_available(
|
|
||||||
port, model_name, passages_file
|
|
||||||
)
|
|
||||||
except RuntimeError as e:
|
|
||||||
logger.error(str(e))
|
|
||||||
return False, port
|
|
||||||
|
|
||||||
if is_compatible:
|
|
||||||
logger.info(f"Using existing compatible server on port {actual_port}")
|
|
||||||
self.server_port = actual_port
|
|
||||||
self.server_process = None # We don't own this process
|
|
||||||
return True, actual_port
|
|
||||||
|
|
||||||
if actual_port != port:
|
|
||||||
logger.info(f"Using port {actual_port} instead of {port}")
|
|
||||||
|
|
||||||
# Start new server
|
|
||||||
return self._start_new_server(actual_port, model_name, embedding_mode, **kwargs)
|
|
||||||
|
|
||||||
def _has_compatible_running_server(
|
|
||||||
self, model_name: str, passages_file: str
|
|
||||||
) -> bool:
|
|
||||||
"""Check if we have a compatible running server."""
|
|
||||||
if not (
|
|
||||||
self.server_process
|
self.server_process
|
||||||
and self.server_process.poll() is None
|
and self.server_process.poll() is None
|
||||||
and self.server_port
|
and self.server_port
|
||||||
|
and self._server_config == config_signature
|
||||||
):
|
):
|
||||||
return False
|
logger.info("Reusing in-process server")
|
||||||
|
return True, self.server_port
|
||||||
|
|
||||||
if _check_process_matches_config(self.server_port, model_name, passages_file):
|
# Configuration changed, stop existing server before starting a new one
|
||||||
logger.info(
|
if self.server_process and self.server_process.poll() is None:
|
||||||
f"Existing server process (PID {self.server_process.pid}) is compatible"
|
logger.info("Existing server configuration differs; restarting embedding server")
|
||||||
)
|
self.stop_server()
|
||||||
return True
|
|
||||||
|
|
||||||
logger.info(
|
# For Colab environment, use a different strategy
|
||||||
"Existing server process is incompatible. Should start a new server."
|
if _is_colab_environment():
|
||||||
|
logger.info("Detected Colab environment, using alternative startup strategy")
|
||||||
|
return self._start_server_colab(
|
||||||
|
port,
|
||||||
|
model_name,
|
||||||
|
embedding_mode,
|
||||||
|
provider_options=provider_options,
|
||||||
|
**kwargs,
|
||||||
)
|
)
|
||||||
return False
|
|
||||||
|
# Always pick a fresh available port
|
||||||
|
try:
|
||||||
|
actual_port = _get_available_port(port)
|
||||||
|
except RuntimeError:
|
||||||
|
logger.error("No available ports found")
|
||||||
|
return False, port
|
||||||
|
|
||||||
|
# Start a new server
|
||||||
|
return self._start_new_server(
|
||||||
|
actual_port,
|
||||||
|
model_name,
|
||||||
|
embedding_mode,
|
||||||
|
provider_options=provider_options,
|
||||||
|
config_signature=config_signature,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _start_server_colab(
|
||||||
|
self,
|
||||||
|
port: int,
|
||||||
|
model_name: str,
|
||||||
|
embedding_mode: str = "sentence-transformers",
|
||||||
|
provider_options: Optional[dict] = None,
|
||||||
|
**kwargs,
|
||||||
|
) -> tuple[bool, int]:
|
||||||
|
"""Start server with Colab-specific configuration."""
|
||||||
|
# Try to find an available port
|
||||||
|
try:
|
||||||
|
actual_port = _get_available_port(port)
|
||||||
|
except RuntimeError:
|
||||||
|
logger.error("No available ports found")
|
||||||
|
return False, port
|
||||||
|
|
||||||
|
logger.info(f"Starting server on port {actual_port} for Colab environment")
|
||||||
|
|
||||||
|
# Use a simpler startup strategy for Colab
|
||||||
|
command = self._build_server_command(actual_port, model_name, embedding_mode, **kwargs)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# In Colab, we'll use a more direct approach
|
||||||
|
self._launch_server_process_colab(
|
||||||
|
command,
|
||||||
|
actual_port,
|
||||||
|
provider_options=provider_options,
|
||||||
|
)
|
||||||
|
started, ready_port = self._wait_for_server_ready_colab(actual_port)
|
||||||
|
if started:
|
||||||
|
self._server_config = {
|
||||||
|
"model_name": model_name,
|
||||||
|
"passages_file": kwargs.get("passages_file", ""),
|
||||||
|
"embedding_mode": embedding_mode,
|
||||||
|
"provider_options": provider_options or {},
|
||||||
|
}
|
||||||
|
return started, ready_port
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to start embedding server in Colab: {e}")
|
||||||
|
return False, actual_port
|
||||||
|
|
||||||
|
# Note: No compatibility check needed; manager is per-searcher and configs are stable per instance
|
||||||
|
|
||||||
def _start_new_server(
|
def _start_new_server(
|
||||||
self, port: int, model_name: str, embedding_mode: str, **kwargs
|
self,
|
||||||
|
port: int,
|
||||||
|
model_name: str,
|
||||||
|
embedding_mode: str,
|
||||||
|
provider_options: Optional[dict] = None,
|
||||||
|
config_signature: Optional[dict] = None,
|
||||||
|
**kwargs,
|
||||||
) -> tuple[bool, int]:
|
) -> tuple[bool, int]:
|
||||||
"""Start a new embedding server on the given port."""
|
"""Start a new embedding server on the given port."""
|
||||||
logger.info(f"Starting embedding server on port {port}...")
|
logger.info(f"Starting embedding server on port {port}...")
|
||||||
@@ -248,8 +194,20 @@ class EmbeddingServerManager:
|
|||||||
command = self._build_server_command(port, model_name, embedding_mode, **kwargs)
|
command = self._build_server_command(port, model_name, embedding_mode, **kwargs)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self._launch_server_process(command, port)
|
self._launch_server_process(
|
||||||
return self._wait_for_server_ready(port)
|
command,
|
||||||
|
port,
|
||||||
|
provider_options=provider_options,
|
||||||
|
)
|
||||||
|
started, ready_port = self._wait_for_server_ready(port)
|
||||||
|
if started:
|
||||||
|
self._server_config = config_signature or {
|
||||||
|
"model_name": model_name,
|
||||||
|
"passages_file": kwargs.get("passages_file", ""),
|
||||||
|
"embedding_mode": embedding_mode,
|
||||||
|
"provider_options": provider_options or {},
|
||||||
|
}
|
||||||
|
return started, ready_port
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Failed to start embedding server: {e}")
|
logger.error(f"Failed to start embedding server: {e}")
|
||||||
return False, port
|
return False, port
|
||||||
@@ -274,30 +232,85 @@ class EmbeddingServerManager:
|
|||||||
command.extend(["--passages-file", str(passages_file)])
|
command.extend(["--passages-file", str(passages_file)])
|
||||||
if embedding_mode != "sentence-transformers":
|
if embedding_mode != "sentence-transformers":
|
||||||
command.extend(["--embedding-mode", embedding_mode])
|
command.extend(["--embedding-mode", embedding_mode])
|
||||||
|
if kwargs.get("distance_metric"):
|
||||||
|
command.extend(["--distance-metric", kwargs["distance_metric"]])
|
||||||
|
|
||||||
return command
|
return command
|
||||||
|
|
||||||
def _launch_server_process(self, command: list, port: int) -> None:
|
def _launch_server_process(
|
||||||
|
self,
|
||||||
|
command: list,
|
||||||
|
port: int,
|
||||||
|
provider_options: Optional[dict] = None,
|
||||||
|
) -> None:
|
||||||
"""Launch the server process."""
|
"""Launch the server process."""
|
||||||
project_root = Path(__file__).parent.parent.parent.parent.parent
|
project_root = Path(__file__).parent.parent.parent.parent.parent
|
||||||
logger.info(f"Command: {' '.join(command)}")
|
logger.info(f"Command: {' '.join(command)}")
|
||||||
|
|
||||||
# Let server output go directly to console
|
# In CI environment, redirect stdout to avoid buffer deadlock but keep stderr for debugging
|
||||||
# The server will respect LEANN_LOG_LEVEL environment variable
|
# Embedding servers use many print statements that can fill stdout buffers
|
||||||
|
is_ci = os.environ.get("CI") == "true"
|
||||||
|
if is_ci:
|
||||||
|
stdout_target = subprocess.DEVNULL
|
||||||
|
stderr_target = None # Keep stderr for error debugging in CI
|
||||||
|
logger.info(
|
||||||
|
"CI environment detected, redirecting embedding server stdout to DEVNULL, keeping stderr"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
stdout_target = None # Direct to console for visible logs
|
||||||
|
stderr_target = None # Direct to console for visible logs
|
||||||
|
|
||||||
|
# Start embedding server subprocess
|
||||||
|
logger.info(f"Starting server process with command: {' '.join(command)}")
|
||||||
|
env = os.environ.copy()
|
||||||
|
encoded_options = encode_provider_options(provider_options)
|
||||||
|
if encoded_options:
|
||||||
|
env["LEANN_EMBEDDING_OPTIONS"] = encoded_options
|
||||||
|
|
||||||
self.server_process = subprocess.Popen(
|
self.server_process = subprocess.Popen(
|
||||||
command,
|
command,
|
||||||
cwd=project_root,
|
cwd=project_root,
|
||||||
stdout=None, # Direct to console
|
stdout=stdout_target,
|
||||||
stderr=None, # Direct to console
|
stderr=stderr_target,
|
||||||
|
env=env,
|
||||||
)
|
)
|
||||||
self.server_port = port
|
self.server_port = port
|
||||||
|
# Record config for in-process reuse (best effort; refined later when ready)
|
||||||
|
try:
|
||||||
|
self._server_config = {
|
||||||
|
"model_name": command[command.index("--model-name") + 1]
|
||||||
|
if "--model-name" in command
|
||||||
|
else "",
|
||||||
|
"passages_file": command[command.index("--passages-file") + 1]
|
||||||
|
if "--passages-file" in command
|
||||||
|
else "",
|
||||||
|
"embedding_mode": command[command.index("--embedding-mode") + 1]
|
||||||
|
if "--embedding-mode" in command
|
||||||
|
else "sentence-transformers",
|
||||||
|
"provider_options": provider_options or {},
|
||||||
|
}
|
||||||
|
except Exception:
|
||||||
|
self._server_config = {
|
||||||
|
"model_name": "",
|
||||||
|
"passages_file": "",
|
||||||
|
"embedding_mode": "sentence-transformers",
|
||||||
|
"provider_options": provider_options or {},
|
||||||
|
}
|
||||||
logger.info(f"Server process started with PID: {self.server_process.pid}")
|
logger.info(f"Server process started with PID: {self.server_process.pid}")
|
||||||
|
|
||||||
# Register atexit callback only when we actually start a process
|
# Register atexit callback only when we actually start a process
|
||||||
if not self._atexit_registered:
|
if not self._atexit_registered:
|
||||||
# Use a lambda to avoid issues with bound methods
|
# Always attempt best-effort finalize at interpreter exit
|
||||||
atexit.register(lambda: self.stop_server() if self.server_process else None)
|
atexit.register(self._finalize_process)
|
||||||
self._atexit_registered = True
|
self._atexit_registered = True
|
||||||
|
# Touch finalizer so it knows there is a live process
|
||||||
|
if getattr(self, "_finalizer", None) is not None and not self._finalizer.alive:
|
||||||
|
try:
|
||||||
|
import weakref
|
||||||
|
|
||||||
|
self._finalizer = weakref.finalize(self, self._finalize_process)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
def _wait_for_server_ready(self, port: int) -> tuple[bool, int]:
|
def _wait_for_server_ready(self, port: int) -> tuple[bool, int]:
|
||||||
"""Wait for the server to be ready."""
|
"""Wait for the server to be ready."""
|
||||||
@@ -322,29 +335,126 @@ class EmbeddingServerManager:
|
|||||||
if not self.server_process:
|
if not self.server_process:
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.server_process.poll() is not None:
|
if self.server_process and self.server_process.poll() is not None:
|
||||||
# Process already terminated
|
# Process already terminated
|
||||||
self.server_process = None
|
self.server_process = None
|
||||||
|
self.server_port = None
|
||||||
|
self._server_config = None
|
||||||
return
|
return
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Terminating server process (PID: {self.server_process.pid}) for backend {self.backend_module_name}..."
|
f"Terminating server process (PID: {self.server_process.pid}) for backend {self.backend_module_name}..."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Use simple termination first; if the server installed signal handlers,
|
||||||
|
# it will exit cleanly. Otherwise escalate to kill after a short wait.
|
||||||
|
try:
|
||||||
self.server_process.terminate()
|
self.server_process.terminate()
|
||||||
|
|
||||||
try:
|
|
||||||
self.server_process.wait(timeout=5)
|
|
||||||
logger.info(f"Server process {self.server_process.pid} terminated.")
|
|
||||||
except subprocess.TimeoutExpired:
|
|
||||||
logger.warning(
|
|
||||||
f"Server process {self.server_process.pid} did not terminate gracefully, killing it."
|
|
||||||
)
|
|
||||||
self.server_process.kill()
|
|
||||||
|
|
||||||
# Clean up process resources to prevent resource tracker warnings
|
|
||||||
try:
|
|
||||||
self.server_process.wait() # Ensure process is fully cleaned up
|
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.server_process.wait(timeout=5) # Give more time for graceful shutdown
|
||||||
|
logger.info(f"Server process {self.server_process.pid} terminated gracefully.")
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
logger.warning(
|
||||||
|
f"Server process {self.server_process.pid} did not terminate within 5 seconds, force killing..."
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
self.server_process.kill()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
self.server_process.wait(timeout=2)
|
||||||
|
logger.info(f"Server process {self.server_process.pid} killed successfully.")
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to kill server process {self.server_process.pid} - it may be hung"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Clean up process resources with timeout to avoid CI hang
|
||||||
|
try:
|
||||||
|
# Use shorter timeout in CI environments
|
||||||
|
is_ci = os.environ.get("CI") == "true"
|
||||||
|
timeout = 3 if is_ci else 10
|
||||||
|
self.server_process.wait(timeout=timeout)
|
||||||
|
logger.info(f"Server process {self.server_process.pid} cleanup completed")
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
logger.warning(f"Process cleanup timeout after {timeout}s, proceeding anyway")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error during process cleanup: {e}")
|
||||||
|
finally:
|
||||||
self.server_process = None
|
self.server_process = None
|
||||||
|
self.server_port = None
|
||||||
|
self._server_config = None
|
||||||
|
|
||||||
|
def _finalize_process(self) -> None:
|
||||||
|
"""Best-effort cleanup used by weakref.finalize/atexit."""
|
||||||
|
try:
|
||||||
|
self.stop_server()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _adopt_existing_server(self, *args, **kwargs) -> None:
|
||||||
|
# Removed: cross-process adoption no longer supported
|
||||||
|
return
|
||||||
|
|
||||||
|
def _launch_server_process_colab(
|
||||||
|
self,
|
||||||
|
command: list,
|
||||||
|
port: int,
|
||||||
|
provider_options: Optional[dict] = None,
|
||||||
|
) -> None:
|
||||||
|
"""Launch the server process with Colab-specific settings."""
|
||||||
|
logger.info(f"Colab Command: {' '.join(command)}")
|
||||||
|
|
||||||
|
# In Colab, we need to be more careful about process management
|
||||||
|
env = os.environ.copy()
|
||||||
|
encoded_options = encode_provider_options(provider_options)
|
||||||
|
if encoded_options:
|
||||||
|
env["LEANN_EMBEDDING_OPTIONS"] = encoded_options
|
||||||
|
|
||||||
|
self.server_process = subprocess.Popen(
|
||||||
|
command,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
text=True,
|
||||||
|
env=env,
|
||||||
|
)
|
||||||
|
self.server_port = port
|
||||||
|
logger.info(f"Colab server process started with PID: {self.server_process.pid}")
|
||||||
|
|
||||||
|
# Register atexit callback (unified)
|
||||||
|
if not self._atexit_registered:
|
||||||
|
atexit.register(self._finalize_process)
|
||||||
|
self._atexit_registered = True
|
||||||
|
# Record config for in-process reuse is best-effort in Colab mode
|
||||||
|
self._server_config = {
|
||||||
|
"model_name": "",
|
||||||
|
"passages_file": "",
|
||||||
|
"embedding_mode": "sentence-transformers",
|
||||||
|
"provider_options": provider_options or {},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _wait_for_server_ready_colab(self, port: int) -> tuple[bool, int]:
|
||||||
|
"""Wait for the server to be ready with Colab-specific timeout."""
|
||||||
|
max_wait, wait_interval = 30, 0.5 # Shorter timeout for Colab
|
||||||
|
|
||||||
|
for _ in range(int(max_wait / wait_interval)):
|
||||||
|
if _check_port(port):
|
||||||
|
logger.info("Colab embedding server is ready!")
|
||||||
|
return True, port
|
||||||
|
|
||||||
|
if self.server_process and self.server_process.poll() is not None:
|
||||||
|
# Check for error output
|
||||||
|
stdout, stderr = self.server_process.communicate()
|
||||||
|
logger.error("Colab server terminated during startup.")
|
||||||
|
logger.error(f"stdout: {stdout}")
|
||||||
|
logger.error(f"stderr: {stderr}")
|
||||||
|
return False, port
|
||||||
|
|
||||||
|
time.sleep(wait_interval)
|
||||||
|
|
||||||
|
logger.error(f"Colab server failed to start within {max_wait} seconds.")
|
||||||
|
self.stop_server()
|
||||||
|
return False, port
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user