Compare commits
4 Commits
feature/im
...
cli_fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
00c44e3980 | ||
|
|
e6a542bf4b | ||
|
|
7e84dae02e | ||
|
|
2f05ed4535 |
50
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
50
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,50 +0,0 @@
|
|||||||
name: Bug Report
|
|
||||||
description: Report a bug in LEANN
|
|
||||||
labels: ["bug"]
|
|
||||||
|
|
||||||
body:
|
|
||||||
- type: textarea
|
|
||||||
id: description
|
|
||||||
attributes:
|
|
||||||
label: What happened?
|
|
||||||
description: A clear description of the bug
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: reproduce
|
|
||||||
attributes:
|
|
||||||
label: How to reproduce
|
|
||||||
placeholder: |
|
|
||||||
1. Install with...
|
|
||||||
2. Run command...
|
|
||||||
3. See error
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: error
|
|
||||||
attributes:
|
|
||||||
label: Error message
|
|
||||||
description: Paste any error messages
|
|
||||||
render: shell
|
|
||||||
|
|
||||||
- type: input
|
|
||||||
id: version
|
|
||||||
attributes:
|
|
||||||
label: LEANN Version
|
|
||||||
placeholder: "0.1.0"
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: dropdown
|
|
||||||
id: os
|
|
||||||
attributes:
|
|
||||||
label: Operating System
|
|
||||||
options:
|
|
||||||
- macOS
|
|
||||||
- Linux
|
|
||||||
- Windows
|
|
||||||
- Docker
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
8
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,8 +0,0 @@
|
|||||||
blank_issues_enabled: true
|
|
||||||
contact_links:
|
|
||||||
- name: Documentation
|
|
||||||
url: https://github.com/LEANN-RAG/LEANN-RAG/tree/main/docs
|
|
||||||
about: Read the docs first
|
|
||||||
- name: Discussions
|
|
||||||
url: https://github.com/LEANN-RAG/LEANN-RAG/discussions
|
|
||||||
about: Ask questions and share ideas
|
|
||||||
27
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
27
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -1,27 +0,0 @@
|
|||||||
name: Feature Request
|
|
||||||
description: Suggest a new feature for LEANN
|
|
||||||
labels: ["enhancement"]
|
|
||||||
|
|
||||||
body:
|
|
||||||
- type: textarea
|
|
||||||
id: problem
|
|
||||||
attributes:
|
|
||||||
label: What problem does this solve?
|
|
||||||
description: Describe the problem or need
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: solution
|
|
||||||
attributes:
|
|
||||||
label: Proposed solution
|
|
||||||
description: How would you like this to work?
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: example
|
|
||||||
attributes:
|
|
||||||
label: Example usage
|
|
||||||
description: Show how the API might look
|
|
||||||
render: python
|
|
||||||
13
.github/pull_request_template.md
vendored
13
.github/pull_request_template.md
vendored
@@ -1,13 +0,0 @@
|
|||||||
## What does this PR do?
|
|
||||||
|
|
||||||
<!-- Brief description of your changes -->
|
|
||||||
|
|
||||||
## Related Issues
|
|
||||||
|
|
||||||
Fixes #
|
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
- [ ] Tests pass (`uv run pytest`)
|
|
||||||
- [ ] Code formatted (`ruff format` and `ruff check`)
|
|
||||||
- [ ] Pre-commit hooks pass (`pre-commit run --all-files`)
|
|
||||||
58
.github/workflows/build-reusable.yml
vendored
58
.github/workflows/build-reusable.yml
vendored
@@ -54,17 +54,6 @@ jobs:
|
|||||||
python: '3.12'
|
python: '3.12'
|
||||||
- os: ubuntu-22.04
|
- os: ubuntu-22.04
|
||||||
python: '3.13'
|
python: '3.13'
|
||||||
# ARM64 Linux builds
|
|
||||||
- os: ubuntu-24.04-arm
|
|
||||||
python: '3.9'
|
|
||||||
- os: ubuntu-24.04-arm
|
|
||||||
python: '3.10'
|
|
||||||
- os: ubuntu-24.04-arm
|
|
||||||
python: '3.11'
|
|
||||||
- os: ubuntu-24.04-arm
|
|
||||||
python: '3.12'
|
|
||||||
- os: ubuntu-24.04-arm
|
|
||||||
python: '3.13'
|
|
||||||
- os: macos-14
|
- os: macos-14
|
||||||
python: '3.9'
|
python: '3.9'
|
||||||
- os: macos-14
|
- os: macos-14
|
||||||
@@ -119,46 +108,13 @@ jobs:
|
|||||||
pkg-config libabsl-dev libaio-dev libprotobuf-dev \
|
pkg-config libabsl-dev libaio-dev libprotobuf-dev \
|
||||||
patchelf
|
patchelf
|
||||||
|
|
||||||
# Debug: Show system information
|
# Install Intel MKL for DiskANN
|
||||||
echo "🔍 System Information:"
|
wget -q https://registrationcenter-download.intel.com/akdlm/IRC_NAS/79153e0f-74d7-45af-b8c2-258941adf58a/intel-onemkl-2025.0.0.940.sh
|
||||||
echo "Architecture: $(uname -m)"
|
sudo sh intel-onemkl-2025.0.0.940.sh -a --components intel.oneapi.lin.mkl.devel --action install --eula accept -s
|
||||||
echo "OS: $(uname -a)"
|
source /opt/intel/oneapi/setvars.sh
|
||||||
echo "CPU info: $(lscpu | head -5)"
|
echo "MKLROOT=/opt/intel/oneapi/mkl/latest" >> $GITHUB_ENV
|
||||||
|
echo "LD_LIBRARY_PATH=/opt/intel/oneapi/compiler/latest/linux/compiler/lib/intel64_lin" >> $GITHUB_ENV
|
||||||
# Install math library based on architecture
|
echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/oneapi/mkl/latest/lib/intel64" >> $GITHUB_ENV
|
||||||
ARCH=$(uname -m)
|
|
||||||
echo "🔍 Setting up math library for architecture: $ARCH"
|
|
||||||
|
|
||||||
if [[ "$ARCH" == "x86_64" ]]; then
|
|
||||||
# Install Intel MKL for DiskANN on x86_64
|
|
||||||
echo "📦 Installing Intel MKL for x86_64..."
|
|
||||||
wget -q https://registrationcenter-download.intel.com/akdlm/IRC_NAS/79153e0f-74d7-45af-b8c2-258941adf58a/intel-onemkl-2025.0.0.940.sh
|
|
||||||
sudo sh intel-onemkl-2025.0.0.940.sh -a --components intel.oneapi.lin.mkl.devel --action install --eula accept -s
|
|
||||||
source /opt/intel/oneapi/setvars.sh
|
|
||||||
echo "MKLROOT=/opt/intel/oneapi/mkl/latest" >> $GITHUB_ENV
|
|
||||||
echo "LD_LIBRARY_PATH=/opt/intel/oneapi/compiler/latest/linux/compiler/lib/intel64_lin" >> $GITHUB_ENV
|
|
||||||
echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/oneapi/mkl/latest/lib/intel64" >> $GITHUB_ENV
|
|
||||||
echo "✅ Intel MKL installed for x86_64"
|
|
||||||
|
|
||||||
# Debug: Check MKL installation
|
|
||||||
echo "🔍 MKL Installation Check:"
|
|
||||||
ls -la /opt/intel/oneapi/mkl/latest/ || echo "MKL directory not found"
|
|
||||||
ls -la /opt/intel/oneapi/mkl/latest/lib/ || echo "MKL lib directory not found"
|
|
||||||
|
|
||||||
elif [[ "$ARCH" == "aarch64" ]]; then
|
|
||||||
# Use OpenBLAS for ARM64 (MKL installer not compatible with ARM64)
|
|
||||||
echo "📦 Installing OpenBLAS for ARM64..."
|
|
||||||
sudo apt-get install -y libopenblas-dev liblapack-dev liblapacke-dev
|
|
||||||
echo "✅ OpenBLAS installed for ARM64"
|
|
||||||
|
|
||||||
# Debug: Check OpenBLAS installation
|
|
||||||
echo "🔍 OpenBLAS Installation Check:"
|
|
||||||
dpkg -l | grep openblas || echo "OpenBLAS package not found"
|
|
||||||
ls -la /usr/lib/aarch64-linux-gnu/openblas/ || echo "OpenBLAS directory not found"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Debug: Show final library paths
|
|
||||||
echo "🔍 Final LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
|
|
||||||
|
|
||||||
- name: Install system dependencies (macOS)
|
- name: Install system dependencies (macOS)
|
||||||
if: runner.os == 'macOS'
|
if: runner.os == 'macOS'
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -22,7 +22,6 @@ demo/experiment_results/**/*.json
|
|||||||
*.sh
|
*.sh
|
||||||
*.txt
|
*.txt
|
||||||
!CMakeLists.txt
|
!CMakeLists.txt
|
||||||
!llms.txt
|
|
||||||
latency_breakdown*.json
|
latency_breakdown*.json
|
||||||
experiment_results/eval_results/diskann/*.json
|
experiment_results/eval_results/diskann/*.json
|
||||||
aws/
|
aws/
|
||||||
|
|||||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -14,6 +14,3 @@
|
|||||||
[submodule "packages/leann-backend-hnsw/third_party/libzmq"]
|
[submodule "packages/leann-backend-hnsw/third_party/libzmq"]
|
||||||
path = packages/leann-backend-hnsw/third_party/libzmq
|
path = packages/leann-backend-hnsw/third_party/libzmq
|
||||||
url = https://github.com/zeromq/libzmq.git
|
url = https://github.com/zeromq/libzmq.git
|
||||||
[submodule "packages/astchunk-leann"]
|
|
||||||
path = packages/astchunk-leann
|
|
||||||
url = https://github.com/yichuan-w/astchunk-leann.git
|
|
||||||
|
|||||||
249
README.md
249
README.md
@@ -20,7 +20,7 @@ LEANN is an innovative vector database that democratizes personal AI. Transform
|
|||||||
|
|
||||||
LEANN achieves this through *graph-based selective recomputation* with *high-degree preserving pruning*, computing embeddings on-demand instead of storing them all. [Illustration Fig →](#️-architecture--how-it-works) | [Paper →](https://arxiv.org/abs/2506.08276)
|
LEANN achieves this through *graph-based selective recomputation* with *high-degree preserving pruning*, computing embeddings on-demand instead of storing them all. [Illustration Fig →](#️-architecture--how-it-works) | [Paper →](https://arxiv.org/abs/2506.08276)
|
||||||
|
|
||||||
**Ready to RAG Everything?** Transform your laptop into a personal AI assistant that can semantic search your **[file system](#-personal-data-manager-process-any-documents-pdf-txt-md)**, **[emails](#-your-personal-email-secretary-rag-on-apple-mail)**, **[browser history](#-time-machine-for-the-web-rag-your-entire-browser-history)**, **[chat history](#-wechat-detective-unlock-your-golden-memories)** ([WeChat](#-wechat-detective-unlock-your-golden-memories), [iMessage](#-imessage-history-your-personal-conversation-archive)), **[agent memory](#-chatgpt-chat-history-your-personal-ai-conversation-archive)** ([ChatGPT](#-chatgpt-chat-history-your-personal-ai-conversation-archive), [Claude](#-claude-chat-history-your-personal-ai-conversation-archive)), **[codebase](#-claude-code-integration-transform-your-development-workflow)**\* , or external knowledge bases (i.e., 60M documents) - all on your laptop, with zero cloud costs and complete privacy.
|
**Ready to RAG Everything?** Transform your laptop into a personal AI assistant that can semantic search your **[file system](#-personal-data-manager-process-any-documents-pdf-txt-md)**, **[emails](#-your-personal-email-secretary-rag-on-apple-mail)**, **[browser history](#-time-machine-for-the-web-rag-your-entire-browser-history)**, **[chat history](#-wechat-detective-unlock-your-golden-memories)**, **[codebase](#-claude-code-integration-transform-your-development-workflow)**\* , or external knowledge bases (i.e., 60M documents) - all on your laptop, with zero cloud costs and complete privacy.
|
||||||
|
|
||||||
|
|
||||||
\* Claude Code only supports basic `grep`-style keyword search. **LEANN** is a drop-in **semantic search MCP service fully compatible with Claude Code**, unlocking intelligent retrieval without changing your workflow. 🔥 Check out [the easy setup →](packages/leann-mcp/README.md)
|
\* Claude Code only supports basic `grep`-style keyword search. **LEANN** is a drop-in **semantic search MCP service fully compatible with Claude Code**, unlocking intelligent retrieval without changing your workflow. 🔥 Check out [the easy setup →](packages/leann-mcp/README.md)
|
||||||
@@ -176,7 +176,7 @@ response = chat.ask("How much storage does LEANN save?", top_k=1)
|
|||||||
|
|
||||||
## RAG on Everything!
|
## RAG on Everything!
|
||||||
|
|
||||||
LEANN supports RAG on various data sources including documents (`.pdf`, `.txt`, `.md`), Apple Mail, Google Search History, WeChat, ChatGPT conversations, Claude conversations, iMessage conversations, and more.
|
LEANN supports RAG on various data sources including documents (`.pdf`, `.txt`, `.md`), Apple Mail, Google Search History, WeChat, and more.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -477,238 +477,6 @@ Once the index is built, you can ask questions like:
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### 🤖 ChatGPT Chat History: Your Personal AI Conversation Archive!
|
|
||||||
|
|
||||||
Transform your ChatGPT conversations into a searchable knowledge base! Search through all your ChatGPT discussions about coding, research, brainstorming, and more.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python -m apps.chatgpt_rag --export-path chatgpt_export.html --query "How do I create a list in Python?"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Unlock your AI conversation history.** Never lose track of valuable insights from your ChatGPT discussions again.
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>📋 Click to expand: How to Export ChatGPT Data</strong></summary>
|
|
||||||
|
|
||||||
**Step-by-step export process:**
|
|
||||||
|
|
||||||
1. **Sign in to ChatGPT**
|
|
||||||
2. **Click your profile icon** in the top right corner
|
|
||||||
3. **Navigate to Settings** → **Data Controls**
|
|
||||||
4. **Click "Export"** under Export Data
|
|
||||||
5. **Confirm the export** request
|
|
||||||
6. **Download the ZIP file** from the email link (expires in 24 hours)
|
|
||||||
7. **Extract or use directly** with LEANN
|
|
||||||
|
|
||||||
**Supported formats:**
|
|
||||||
- `.html` files from ChatGPT exports
|
|
||||||
- `.zip` archives from ChatGPT
|
|
||||||
- Directories with multiple export files
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>📋 Click to expand: ChatGPT-Specific Arguments</strong></summary>
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
```bash
|
|
||||||
--export-path PATH # Path to ChatGPT export file (.html/.zip) or directory (default: ./chatgpt_export)
|
|
||||||
--separate-messages # Process each message separately instead of concatenated conversations
|
|
||||||
--chunk-size N # Text chunk size (default: 512)
|
|
||||||
--chunk-overlap N # Overlap between chunks (default: 128)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Example Commands
|
|
||||||
```bash
|
|
||||||
# Basic usage with HTML export
|
|
||||||
python -m apps.chatgpt_rag --export-path conversations.html
|
|
||||||
|
|
||||||
# Process ZIP archive from ChatGPT
|
|
||||||
python -m apps.chatgpt_rag --export-path chatgpt_export.zip
|
|
||||||
|
|
||||||
# Search with specific query
|
|
||||||
python -m apps.chatgpt_rag --export-path chatgpt_data.html --query "Python programming help"
|
|
||||||
|
|
||||||
# Process individual messages for fine-grained search
|
|
||||||
python -m apps.chatgpt_rag --separate-messages --export-path chatgpt_export.html
|
|
||||||
|
|
||||||
# Process directory containing multiple exports
|
|
||||||
python -m apps.chatgpt_rag --export-path ./chatgpt_exports/ --max-items 1000
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>💡 Click to expand: Example queries you can try</strong></summary>
|
|
||||||
|
|
||||||
Once your ChatGPT conversations are indexed, you can search with queries like:
|
|
||||||
- "What did I ask ChatGPT about Python programming?"
|
|
||||||
- "Show me conversations about machine learning algorithms"
|
|
||||||
- "Find discussions about web development frameworks"
|
|
||||||
- "What coding advice did ChatGPT give me?"
|
|
||||||
- "Search for conversations about debugging techniques"
|
|
||||||
- "Find ChatGPT's recommendations for learning resources"
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
### 🤖 Claude Chat History: Your Personal AI Conversation Archive!
|
|
||||||
|
|
||||||
Transform your Claude conversations into a searchable knowledge base! Search through all your Claude discussions about coding, research, brainstorming, and more.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python -m apps.claude_rag --export-path claude_export.json --query "What did I ask about Python dictionaries?"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Unlock your AI conversation history.** Never lose track of valuable insights from your Claude discussions again.
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>📋 Click to expand: How to Export Claude Data</strong></summary>
|
|
||||||
|
|
||||||
**Step-by-step export process:**
|
|
||||||
|
|
||||||
1. **Open Claude** in your browser
|
|
||||||
2. **Navigate to Settings** (look for gear icon or settings menu)
|
|
||||||
3. **Find Export/Download** options in your account settings
|
|
||||||
4. **Download conversation data** (usually in JSON format)
|
|
||||||
5. **Place the file** in your project directory
|
|
||||||
|
|
||||||
*Note: Claude export methods may vary depending on the interface you're using. Check Claude's help documentation for the most current export instructions.*
|
|
||||||
|
|
||||||
**Supported formats:**
|
|
||||||
- `.json` files (recommended)
|
|
||||||
- `.zip` archives containing JSON data
|
|
||||||
- Directories with multiple export files
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>📋 Click to expand: Claude-Specific Arguments</strong></summary>
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
```bash
|
|
||||||
--export-path PATH # Path to Claude export file (.json/.zip) or directory (default: ./claude_export)
|
|
||||||
--separate-messages # Process each message separately instead of concatenated conversations
|
|
||||||
--chunk-size N # Text chunk size (default: 512)
|
|
||||||
--chunk-overlap N # Overlap between chunks (default: 128)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Example Commands
|
|
||||||
```bash
|
|
||||||
# Basic usage with JSON export
|
|
||||||
python -m apps.claude_rag --export-path my_claude_conversations.json
|
|
||||||
|
|
||||||
# Process ZIP archive from Claude
|
|
||||||
python -m apps.claude_rag --export-path claude_export.zip
|
|
||||||
|
|
||||||
# Search with specific query
|
|
||||||
python -m apps.claude_rag --export-path claude_data.json --query "machine learning advice"
|
|
||||||
|
|
||||||
# Process individual messages for fine-grained search
|
|
||||||
python -m apps.claude_rag --separate-messages --export-path claude_export.json
|
|
||||||
|
|
||||||
# Process directory containing multiple exports
|
|
||||||
python -m apps.claude_rag --export-path ./claude_exports/ --max-items 1000
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>💡 Click to expand: Example queries you can try</strong></summary>
|
|
||||||
|
|
||||||
Once your Claude conversations are indexed, you can search with queries like:
|
|
||||||
- "What did I ask Claude about Python programming?"
|
|
||||||
- "Show me conversations about machine learning algorithms"
|
|
||||||
- "Find discussions about software architecture patterns"
|
|
||||||
- "What debugging advice did Claude give me?"
|
|
||||||
- "Search for conversations about data structures"
|
|
||||||
- "Find Claude's recommendations for learning resources"
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
### 💬 iMessage History: Your Personal Conversation Archive!
|
|
||||||
|
|
||||||
Transform your iMessage conversations into a searchable knowledge base! Search through all your text messages, group chats, and conversations with friends, family, and colleagues.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python -m apps.imessage_rag --query "What did we discuss about the weekend plans?"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Unlock your message history.** Never lose track of important conversations, shared links, or memorable moments from your iMessage history.
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>📋 Click to expand: How to Access iMessage Data</strong></summary>
|
|
||||||
|
|
||||||
**iMessage data location:**
|
|
||||||
|
|
||||||
iMessage conversations are stored in a SQLite database on your Mac at:
|
|
||||||
```
|
|
||||||
~/Library/Messages/chat.db
|
|
||||||
```
|
|
||||||
|
|
||||||
**Important setup requirements:**
|
|
||||||
|
|
||||||
1. **Grant Full Disk Access** to your terminal or IDE:
|
|
||||||
- Open **System Preferences** → **Security & Privacy** → **Privacy**
|
|
||||||
- Select **Full Disk Access** from the left sidebar
|
|
||||||
- Click the **+** button and add your terminal app (Terminal, iTerm2) or IDE (VS Code, etc.)
|
|
||||||
- Restart your terminal/IDE after granting access
|
|
||||||
|
|
||||||
2. **Alternative: Use a backup database**
|
|
||||||
- If you have Time Machine backups or manual copies of the database
|
|
||||||
- Use `--db-path` to specify a custom location
|
|
||||||
|
|
||||||
**Supported formats:**
|
|
||||||
- Direct access to `~/Library/Messages/chat.db` (default)
|
|
||||||
- Custom database path with `--db-path`
|
|
||||||
- Works with backup copies of the database
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>📋 Click to expand: iMessage-Specific Arguments</strong></summary>
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
```bash
|
|
||||||
--db-path PATH # Path to chat.db file (default: ~/Library/Messages/chat.db)
|
|
||||||
--concatenate-conversations # Group messages by conversation (default: True)
|
|
||||||
--no-concatenate-conversations # Process each message individually
|
|
||||||
--chunk-size N # Text chunk size (default: 1000)
|
|
||||||
--chunk-overlap N # Overlap between chunks (default: 200)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Example Commands
|
|
||||||
```bash
|
|
||||||
# Basic usage (requires Full Disk Access)
|
|
||||||
python -m apps.imessage_rag
|
|
||||||
|
|
||||||
# Search with specific query
|
|
||||||
python -m apps.imessage_rag --query "family dinner plans"
|
|
||||||
|
|
||||||
# Use custom database path
|
|
||||||
python -m apps.imessage_rag --db-path /path/to/backup/chat.db
|
|
||||||
|
|
||||||
# Process individual messages instead of conversations
|
|
||||||
python -m apps.imessage_rag --no-concatenate-conversations
|
|
||||||
|
|
||||||
# Limit processing for testing
|
|
||||||
python -m apps.imessage_rag --max-items 100 --query "weekend"
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>💡 Click to expand: Example queries you can try</strong></summary>
|
|
||||||
|
|
||||||
Once your iMessage conversations are indexed, you can search with queries like:
|
|
||||||
- "What did we discuss about vacation plans?"
|
|
||||||
- "Find messages about restaurant recommendations"
|
|
||||||
- "Show me conversations with John about the project"
|
|
||||||
- "Search for shared links about technology"
|
|
||||||
- "Find group chat discussions about weekend events"
|
|
||||||
- "What did mom say about the family gathering?"
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
### 🚀 Claude Code Integration: Transform Your Development Workflow!
|
### 🚀 Claude Code Integration: Transform Your Development Workflow!
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
@@ -888,19 +656,6 @@ results = searcher.search(
|
|||||||
|
|
||||||
📖 **[Complete Metadata filtering guide →](docs/metadata_filtering.md)**
|
📖 **[Complete Metadata filtering guide →](docs/metadata_filtering.md)**
|
||||||
|
|
||||||
### 🔍 Grep Search
|
|
||||||
|
|
||||||
For exact text matching instead of semantic search, use the `use_grep` parameter:
|
|
||||||
|
|
||||||
```python
|
|
||||||
# Exact text search
|
|
||||||
results = searcher.search("banana‑crocodile", use_grep=True, top_k=1)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Use cases**: Finding specific code patterns, error messages, function names, or exact phrases where semantic similarity isn't needed.
|
|
||||||
|
|
||||||
📖 **[Complete grep search guide →](docs/grep_search.md)**
|
|
||||||
|
|
||||||
## 🏗️ Architecture & How It Works
|
## 🏗️ Architecture & How It Works
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
|
|||||||
@@ -1,413 +0,0 @@
|
|||||||
"""
|
|
||||||
ChatGPT export data reader.
|
|
||||||
|
|
||||||
Reads and processes ChatGPT export data from chat.html files.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import re
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any
|
|
||||||
from zipfile import ZipFile
|
|
||||||
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
from llama_index.core import Document
|
|
||||||
from llama_index.core.readers.base import BaseReader
|
|
||||||
|
|
||||||
|
|
||||||
class ChatGPTReader(BaseReader):
|
|
||||||
"""
|
|
||||||
ChatGPT export data reader.
|
|
||||||
|
|
||||||
Reads ChatGPT conversation data from exported chat.html files or zip archives.
|
|
||||||
Processes conversations into structured documents with metadata.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, concatenate_conversations: bool = True) -> None:
|
|
||||||
"""
|
|
||||||
Initialize.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
concatenate_conversations: Whether to concatenate messages within conversations for better context
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
from bs4 import BeautifulSoup # noqa
|
|
||||||
except ImportError:
|
|
||||||
raise ImportError("`beautifulsoup4` package not found: `pip install beautifulsoup4`")
|
|
||||||
|
|
||||||
self.concatenate_conversations = concatenate_conversations
|
|
||||||
|
|
||||||
def _extract_html_from_zip(self, zip_path: Path) -> str | None:
|
|
||||||
"""
|
|
||||||
Extract chat.html from ChatGPT export zip file.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
zip_path: Path to the ChatGPT export zip file
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
HTML content as string, or None if not found
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
with ZipFile(zip_path, "r") as zip_file:
|
|
||||||
# Look for chat.html or conversations.html
|
|
||||||
html_files = [
|
|
||||||
f
|
|
||||||
for f in zip_file.namelist()
|
|
||||||
if f.endswith(".html") and ("chat" in f.lower() or "conversation" in f.lower())
|
|
||||||
]
|
|
||||||
|
|
||||||
if not html_files:
|
|
||||||
print(f"No HTML chat file found in {zip_path}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Use the first HTML file found
|
|
||||||
html_file = html_files[0]
|
|
||||||
print(f"Found HTML file: {html_file}")
|
|
||||||
|
|
||||||
with zip_file.open(html_file) as f:
|
|
||||||
return f.read().decode("utf-8", errors="ignore")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error extracting HTML from zip {zip_path}: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _parse_chatgpt_html(self, html_content: str) -> list[dict]:
|
|
||||||
"""
|
|
||||||
Parse ChatGPT HTML export to extract conversations.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
html_content: HTML content from ChatGPT export
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of conversation dictionaries
|
|
||||||
"""
|
|
||||||
soup = BeautifulSoup(html_content, "html.parser")
|
|
||||||
conversations = []
|
|
||||||
|
|
||||||
# Try different possible structures for ChatGPT exports
|
|
||||||
# Structure 1: Look for conversation containers
|
|
||||||
conversation_containers = soup.find_all(
|
|
||||||
["div", "section"], class_=re.compile(r"conversation|chat", re.I)
|
|
||||||
)
|
|
||||||
|
|
||||||
if not conversation_containers:
|
|
||||||
# Structure 2: Look for message containers directly
|
|
||||||
conversation_containers = [soup] # Use the entire document as one conversation
|
|
||||||
|
|
||||||
for container in conversation_containers:
|
|
||||||
conversation = self._extract_conversation_from_container(container)
|
|
||||||
if conversation and conversation.get("messages"):
|
|
||||||
conversations.append(conversation)
|
|
||||||
|
|
||||||
# If no structured conversations found, try to extract all text as one conversation
|
|
||||||
if not conversations:
|
|
||||||
all_text = soup.get_text(separator="\n", strip=True)
|
|
||||||
if all_text:
|
|
||||||
conversations.append(
|
|
||||||
{
|
|
||||||
"title": "ChatGPT Conversation",
|
|
||||||
"messages": [{"role": "mixed", "content": all_text, "timestamp": None}],
|
|
||||||
"timestamp": None,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
return conversations
|
|
||||||
|
|
||||||
def _extract_conversation_from_container(self, container) -> dict | None:
|
|
||||||
"""
|
|
||||||
Extract conversation data from a container element.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
container: BeautifulSoup element containing conversation
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary with conversation data or None
|
|
||||||
"""
|
|
||||||
messages = []
|
|
||||||
|
|
||||||
# Look for message elements with various possible structures
|
|
||||||
message_selectors = ['[class*="message"]', '[class*="chat"]', "[data-message]", "p", "div"]
|
|
||||||
|
|
||||||
for selector in message_selectors:
|
|
||||||
message_elements = container.select(selector)
|
|
||||||
if message_elements:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
message_elements = []
|
|
||||||
|
|
||||||
# If no structured messages found, treat the entire container as one message
|
|
||||||
if not message_elements:
|
|
||||||
text_content = container.get_text(separator="\n", strip=True)
|
|
||||||
if text_content:
|
|
||||||
messages.append({"role": "mixed", "content": text_content, "timestamp": None})
|
|
||||||
else:
|
|
||||||
for element in message_elements:
|
|
||||||
message = self._extract_message_from_element(element)
|
|
||||||
if message:
|
|
||||||
messages.append(message)
|
|
||||||
|
|
||||||
if not messages:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Try to extract conversation title
|
|
||||||
title_element = container.find(["h1", "h2", "h3", "title"])
|
|
||||||
title = title_element.get_text(strip=True) if title_element else "ChatGPT Conversation"
|
|
||||||
|
|
||||||
# Try to extract timestamp from various possible locations
|
|
||||||
timestamp = self._extract_timestamp_from_container(container)
|
|
||||||
|
|
||||||
return {"title": title, "messages": messages, "timestamp": timestamp}
|
|
||||||
|
|
||||||
def _extract_message_from_element(self, element) -> dict | None:
|
|
||||||
"""
|
|
||||||
Extract message data from an element.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
element: BeautifulSoup element containing message
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary with message data or None
|
|
||||||
"""
|
|
||||||
text_content = element.get_text(separator=" ", strip=True)
|
|
||||||
|
|
||||||
# Skip empty or very short messages
|
|
||||||
if not text_content or len(text_content.strip()) < 3:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Try to determine role (user/assistant) from class names or content
|
|
||||||
role = "mixed" # Default role
|
|
||||||
|
|
||||||
class_names = " ".join(element.get("class", [])).lower()
|
|
||||||
if "user" in class_names or "human" in class_names:
|
|
||||||
role = "user"
|
|
||||||
elif "assistant" in class_names or "ai" in class_names or "gpt" in class_names:
|
|
||||||
role = "assistant"
|
|
||||||
elif text_content.lower().startswith(("you:", "user:", "me:")):
|
|
||||||
role = "user"
|
|
||||||
text_content = re.sub(r"^(you|user|me):\s*", "", text_content, flags=re.IGNORECASE)
|
|
||||||
elif text_content.lower().startswith(("chatgpt:", "assistant:", "ai:")):
|
|
||||||
role = "assistant"
|
|
||||||
text_content = re.sub(
|
|
||||||
r"^(chatgpt|assistant|ai):\s*", "", text_content, flags=re.IGNORECASE
|
|
||||||
)
|
|
||||||
|
|
||||||
# Try to extract timestamp
|
|
||||||
timestamp = self._extract_timestamp_from_element(element)
|
|
||||||
|
|
||||||
return {"role": role, "content": text_content, "timestamp": timestamp}
|
|
||||||
|
|
||||||
def _extract_timestamp_from_element(self, element) -> str | None:
|
|
||||||
"""Extract timestamp from element."""
|
|
||||||
# Look for timestamp in various attributes and child elements
|
|
||||||
timestamp_attrs = ["data-timestamp", "timestamp", "datetime"]
|
|
||||||
for attr in timestamp_attrs:
|
|
||||||
if element.get(attr):
|
|
||||||
return element.get(attr)
|
|
||||||
|
|
||||||
# Look for time elements
|
|
||||||
time_element = element.find("time")
|
|
||||||
if time_element:
|
|
||||||
return time_element.get("datetime") or time_element.get_text(strip=True)
|
|
||||||
|
|
||||||
# Look for date-like text patterns
|
|
||||||
text = element.get_text()
|
|
||||||
date_patterns = [r"\d{4}-\d{2}-\d{2}", r"\d{1,2}/\d{1,2}/\d{4}", r"\w+ \d{1,2}, \d{4}"]
|
|
||||||
|
|
||||||
for pattern in date_patterns:
|
|
||||||
match = re.search(pattern, text)
|
|
||||||
if match:
|
|
||||||
return match.group()
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _extract_timestamp_from_container(self, container) -> str | None:
|
|
||||||
"""Extract timestamp from conversation container."""
|
|
||||||
return self._extract_timestamp_from_element(container)
|
|
||||||
|
|
||||||
def _create_concatenated_content(self, conversation: dict) -> str:
|
|
||||||
"""
|
|
||||||
Create concatenated content from conversation messages.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
conversation: Dictionary containing conversation data
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Formatted concatenated content
|
|
||||||
"""
|
|
||||||
title = conversation.get("title", "ChatGPT Conversation")
|
|
||||||
messages = conversation.get("messages", [])
|
|
||||||
timestamp = conversation.get("timestamp", "Unknown")
|
|
||||||
|
|
||||||
# Build message content
|
|
||||||
message_parts = []
|
|
||||||
for message in messages:
|
|
||||||
role = message.get("role", "mixed")
|
|
||||||
content = message.get("content", "")
|
|
||||||
msg_timestamp = message.get("timestamp", "")
|
|
||||||
|
|
||||||
if role == "user":
|
|
||||||
prefix = "[You]"
|
|
||||||
elif role == "assistant":
|
|
||||||
prefix = "[ChatGPT]"
|
|
||||||
else:
|
|
||||||
prefix = "[Message]"
|
|
||||||
|
|
||||||
# Add timestamp if available
|
|
||||||
if msg_timestamp:
|
|
||||||
prefix += f" ({msg_timestamp})"
|
|
||||||
|
|
||||||
message_parts.append(f"{prefix}: {content}")
|
|
||||||
|
|
||||||
concatenated_text = "\n\n".join(message_parts)
|
|
||||||
|
|
||||||
# Create final document content
|
|
||||||
doc_content = f"""Conversation: {title}
|
|
||||||
Date: {timestamp}
|
|
||||||
Messages ({len(messages)} messages):
|
|
||||||
|
|
||||||
{concatenated_text}
|
|
||||||
"""
|
|
||||||
return doc_content
|
|
||||||
|
|
||||||
def load_data(self, input_dir: str | None = None, **load_kwargs: Any) -> list[Document]:
|
|
||||||
"""
|
|
||||||
Load ChatGPT export data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
input_dir: Directory containing ChatGPT export files or path to specific file
|
|
||||||
**load_kwargs:
|
|
||||||
max_count (int): Maximum number of conversations to process
|
|
||||||
chatgpt_export_path (str): Specific path to ChatGPT export file/directory
|
|
||||||
include_metadata (bool): Whether to include metadata in documents
|
|
||||||
"""
|
|
||||||
docs: list[Document] = []
|
|
||||||
max_count = load_kwargs.get("max_count", -1)
|
|
||||||
chatgpt_export_path = load_kwargs.get("chatgpt_export_path", input_dir)
|
|
||||||
include_metadata = load_kwargs.get("include_metadata", True)
|
|
||||||
|
|
||||||
if not chatgpt_export_path:
|
|
||||||
print("No ChatGPT export path provided")
|
|
||||||
return docs
|
|
||||||
|
|
||||||
export_path = Path(chatgpt_export_path)
|
|
||||||
|
|
||||||
if not export_path.exists():
|
|
||||||
print(f"ChatGPT export path not found: {export_path}")
|
|
||||||
return docs
|
|
||||||
|
|
||||||
html_content = None
|
|
||||||
|
|
||||||
# Handle different input types
|
|
||||||
if export_path.is_file():
|
|
||||||
if export_path.suffix.lower() == ".zip":
|
|
||||||
# Extract HTML from zip file
|
|
||||||
html_content = self._extract_html_from_zip(export_path)
|
|
||||||
elif export_path.suffix.lower() == ".html":
|
|
||||||
# Read HTML file directly
|
|
||||||
try:
|
|
||||||
with open(export_path, encoding="utf-8", errors="ignore") as f:
|
|
||||||
html_content = f.read()
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error reading HTML file {export_path}: {e}")
|
|
||||||
return docs
|
|
||||||
else:
|
|
||||||
print(f"Unsupported file type: {export_path.suffix}")
|
|
||||||
return docs
|
|
||||||
|
|
||||||
elif export_path.is_dir():
|
|
||||||
# Look for HTML files in directory
|
|
||||||
html_files = list(export_path.glob("*.html"))
|
|
||||||
zip_files = list(export_path.glob("*.zip"))
|
|
||||||
|
|
||||||
if html_files:
|
|
||||||
# Use first HTML file found
|
|
||||||
html_file = html_files[0]
|
|
||||||
print(f"Found HTML file: {html_file}")
|
|
||||||
try:
|
|
||||||
with open(html_file, encoding="utf-8", errors="ignore") as f:
|
|
||||||
html_content = f.read()
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error reading HTML file {html_file}: {e}")
|
|
||||||
return docs
|
|
||||||
|
|
||||||
elif zip_files:
|
|
||||||
# Use first zip file found
|
|
||||||
zip_file = zip_files[0]
|
|
||||||
print(f"Found zip file: {zip_file}")
|
|
||||||
html_content = self._extract_html_from_zip(zip_file)
|
|
||||||
|
|
||||||
else:
|
|
||||||
print(f"No HTML or zip files found in {export_path}")
|
|
||||||
return docs
|
|
||||||
|
|
||||||
if not html_content:
|
|
||||||
print("No HTML content found to process")
|
|
||||||
return docs
|
|
||||||
|
|
||||||
# Parse conversations from HTML
|
|
||||||
print("Parsing ChatGPT conversations from HTML...")
|
|
||||||
conversations = self._parse_chatgpt_html(html_content)
|
|
||||||
|
|
||||||
if not conversations:
|
|
||||||
print("No conversations found in HTML content")
|
|
||||||
return docs
|
|
||||||
|
|
||||||
print(f"Found {len(conversations)} conversations")
|
|
||||||
|
|
||||||
# Process conversations into documents
|
|
||||||
count = 0
|
|
||||||
for conversation in conversations:
|
|
||||||
if max_count > 0 and count >= max_count:
|
|
||||||
break
|
|
||||||
|
|
||||||
if self.concatenate_conversations:
|
|
||||||
# Create one document per conversation with concatenated messages
|
|
||||||
doc_content = self._create_concatenated_content(conversation)
|
|
||||||
|
|
||||||
metadata = {}
|
|
||||||
if include_metadata:
|
|
||||||
metadata = {
|
|
||||||
"title": conversation.get("title", "ChatGPT Conversation"),
|
|
||||||
"timestamp": conversation.get("timestamp", "Unknown"),
|
|
||||||
"message_count": len(conversation.get("messages", [])),
|
|
||||||
"source": "ChatGPT Export",
|
|
||||||
}
|
|
||||||
|
|
||||||
doc = Document(text=doc_content, metadata=metadata)
|
|
||||||
docs.append(doc)
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
else:
|
|
||||||
# Create separate documents for each message
|
|
||||||
for message in conversation.get("messages", []):
|
|
||||||
if max_count > 0 and count >= max_count:
|
|
||||||
break
|
|
||||||
|
|
||||||
role = message.get("role", "mixed")
|
|
||||||
content = message.get("content", "")
|
|
||||||
msg_timestamp = message.get("timestamp", "")
|
|
||||||
|
|
||||||
if not content.strip():
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Create document content with context
|
|
||||||
doc_content = f"""Conversation: {conversation.get("title", "ChatGPT Conversation")}
|
|
||||||
Role: {role}
|
|
||||||
Timestamp: {msg_timestamp or conversation.get("timestamp", "Unknown")}
|
|
||||||
Message: {content}
|
|
||||||
"""
|
|
||||||
|
|
||||||
metadata = {}
|
|
||||||
if include_metadata:
|
|
||||||
metadata = {
|
|
||||||
"conversation_title": conversation.get("title", "ChatGPT Conversation"),
|
|
||||||
"role": role,
|
|
||||||
"timestamp": msg_timestamp or conversation.get("timestamp", "Unknown"),
|
|
||||||
"source": "ChatGPT Export",
|
|
||||||
}
|
|
||||||
|
|
||||||
doc = Document(text=doc_content, metadata=metadata)
|
|
||||||
docs.append(doc)
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
print(f"Created {len(docs)} documents from ChatGPT export")
|
|
||||||
return docs
|
|
||||||
@@ -1,186 +0,0 @@
|
|||||||
"""
|
|
||||||
ChatGPT RAG example using the unified interface.
|
|
||||||
Supports ChatGPT export data from chat.html files.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
# Add parent directory to path for imports
|
|
||||||
sys.path.insert(0, str(Path(__file__).parent))
|
|
||||||
|
|
||||||
from base_rag_example import BaseRAGExample
|
|
||||||
from chunking import create_text_chunks
|
|
||||||
|
|
||||||
from .chatgpt_data.chatgpt_reader import ChatGPTReader
|
|
||||||
|
|
||||||
|
|
||||||
class ChatGPTRAG(BaseRAGExample):
|
|
||||||
"""RAG example for ChatGPT conversation data."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
# Set default values BEFORE calling super().__init__
|
|
||||||
self.max_items_default = -1 # Process all conversations by default
|
|
||||||
self.embedding_model_default = (
|
|
||||||
"sentence-transformers/all-MiniLM-L6-v2" # Fast 384-dim model
|
|
||||||
)
|
|
||||||
|
|
||||||
super().__init__(
|
|
||||||
name="ChatGPT",
|
|
||||||
description="Process and query ChatGPT conversation exports with LEANN",
|
|
||||||
default_index_name="chatgpt_conversations_index",
|
|
||||||
)
|
|
||||||
|
|
||||||
def _add_specific_arguments(self, parser):
|
|
||||||
"""Add ChatGPT-specific arguments."""
|
|
||||||
chatgpt_group = parser.add_argument_group("ChatGPT Parameters")
|
|
||||||
chatgpt_group.add_argument(
|
|
||||||
"--export-path",
|
|
||||||
type=str,
|
|
||||||
default="./chatgpt_export",
|
|
||||||
help="Path to ChatGPT export file (.zip or .html) or directory containing exports (default: ./chatgpt_export)",
|
|
||||||
)
|
|
||||||
chatgpt_group.add_argument(
|
|
||||||
"--concatenate-conversations",
|
|
||||||
action="store_true",
|
|
||||||
default=True,
|
|
||||||
help="Concatenate messages within conversations for better context (default: True)",
|
|
||||||
)
|
|
||||||
chatgpt_group.add_argument(
|
|
||||||
"--separate-messages",
|
|
||||||
action="store_true",
|
|
||||||
help="Process each message as a separate document (overrides --concatenate-conversations)",
|
|
||||||
)
|
|
||||||
chatgpt_group.add_argument(
|
|
||||||
"--chunk-size", type=int, default=512, help="Text chunk size (default: 512)"
|
|
||||||
)
|
|
||||||
chatgpt_group.add_argument(
|
|
||||||
"--chunk-overlap", type=int, default=128, help="Text chunk overlap (default: 128)"
|
|
||||||
)
|
|
||||||
|
|
||||||
def _find_chatgpt_exports(self, export_path: Path) -> list[Path]:
|
|
||||||
"""
|
|
||||||
Find ChatGPT export files in the given path.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
export_path: Path to search for exports
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of paths to ChatGPT export files
|
|
||||||
"""
|
|
||||||
export_files = []
|
|
||||||
|
|
||||||
if export_path.is_file():
|
|
||||||
if export_path.suffix.lower() in [".zip", ".html"]:
|
|
||||||
export_files.append(export_path)
|
|
||||||
elif export_path.is_dir():
|
|
||||||
# Look for zip and html files
|
|
||||||
export_files.extend(export_path.glob("*.zip"))
|
|
||||||
export_files.extend(export_path.glob("*.html"))
|
|
||||||
|
|
||||||
return export_files
|
|
||||||
|
|
||||||
async def load_data(self, args) -> list[str]:
|
|
||||||
"""Load ChatGPT export data and convert to text chunks."""
|
|
||||||
export_path = Path(args.export_path)
|
|
||||||
|
|
||||||
if not export_path.exists():
|
|
||||||
print(f"ChatGPT export path not found: {export_path}")
|
|
||||||
print(
|
|
||||||
"Please ensure you have exported your ChatGPT data and placed it in the correct location."
|
|
||||||
)
|
|
||||||
print("\nTo export your ChatGPT data:")
|
|
||||||
print("1. Sign in to ChatGPT")
|
|
||||||
print("2. Click on your profile icon → Settings → Data Controls")
|
|
||||||
print("3. Click 'Export' under Export Data")
|
|
||||||
print("4. Download the zip file from the email link")
|
|
||||||
print("5. Extract or place the file/directory at the specified path")
|
|
||||||
return []
|
|
||||||
|
|
||||||
# Find export files
|
|
||||||
export_files = self._find_chatgpt_exports(export_path)
|
|
||||||
|
|
||||||
if not export_files:
|
|
||||||
print(f"No ChatGPT export files (.zip or .html) found in: {export_path}")
|
|
||||||
return []
|
|
||||||
|
|
||||||
print(f"Found {len(export_files)} ChatGPT export files")
|
|
||||||
|
|
||||||
# Create reader with appropriate settings
|
|
||||||
concatenate = args.concatenate_conversations and not args.separate_messages
|
|
||||||
reader = ChatGPTReader(concatenate_conversations=concatenate)
|
|
||||||
|
|
||||||
# Process each export file
|
|
||||||
all_documents = []
|
|
||||||
total_processed = 0
|
|
||||||
|
|
||||||
for i, export_file in enumerate(export_files):
|
|
||||||
print(f"\nProcessing export file {i + 1}/{len(export_files)}: {export_file.name}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Apply max_items limit per file
|
|
||||||
max_per_file = -1
|
|
||||||
if args.max_items > 0:
|
|
||||||
remaining = args.max_items - total_processed
|
|
||||||
if remaining <= 0:
|
|
||||||
break
|
|
||||||
max_per_file = remaining
|
|
||||||
|
|
||||||
# Load conversations
|
|
||||||
documents = reader.load_data(
|
|
||||||
chatgpt_export_path=str(export_file),
|
|
||||||
max_count=max_per_file,
|
|
||||||
include_metadata=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
if documents:
|
|
||||||
all_documents.extend(documents)
|
|
||||||
total_processed += len(documents)
|
|
||||||
print(f"Processed {len(documents)} conversations from this file")
|
|
||||||
else:
|
|
||||||
print(f"No conversations loaded from {export_file}")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing {export_file}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not all_documents:
|
|
||||||
print("No conversations found to process!")
|
|
||||||
print("\nTroubleshooting:")
|
|
||||||
print("- Ensure the export file is a valid ChatGPT export")
|
|
||||||
print("- Check that the HTML file contains conversation data")
|
|
||||||
print("- Try extracting the zip file and pointing to the HTML file directly")
|
|
||||||
return []
|
|
||||||
|
|
||||||
print(f"\nTotal conversations processed: {len(all_documents)}")
|
|
||||||
print("Now starting to split into text chunks... this may take some time")
|
|
||||||
|
|
||||||
# Convert to text chunks
|
|
||||||
all_texts = create_text_chunks(
|
|
||||||
all_documents, chunk_size=args.chunk_size, chunk_overlap=args.chunk_overlap
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Created {len(all_texts)} text chunks from {len(all_documents)} conversations")
|
|
||||||
return all_texts
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
import asyncio
|
|
||||||
|
|
||||||
# Example queries for ChatGPT RAG
|
|
||||||
print("\n🤖 ChatGPT RAG Example")
|
|
||||||
print("=" * 50)
|
|
||||||
print("\nExample queries you can try:")
|
|
||||||
print("- 'What did I ask about Python programming?'")
|
|
||||||
print("- 'Show me conversations about machine learning'")
|
|
||||||
print("- 'Find discussions about travel planning'")
|
|
||||||
print("- 'What advice did ChatGPT give me about career development?'")
|
|
||||||
print("- 'Search for conversations about cooking recipes'")
|
|
||||||
print("\nTo get started:")
|
|
||||||
print("1. Export your ChatGPT data from Settings → Data Controls → Export")
|
|
||||||
print("2. Place the downloaded zip file or extracted HTML in ./chatgpt_export/")
|
|
||||||
print("3. Run this script to build your personal ChatGPT knowledge base!")
|
|
||||||
print("\nOr run without --query for interactive mode\n")
|
|
||||||
|
|
||||||
rag = ChatGPTRAG()
|
|
||||||
asyncio.run(rag.run())
|
|
||||||
@@ -1,38 +1,16 @@
|
|||||||
"""Unified chunking utilities facade.
|
"""
|
||||||
|
Chunking utilities for LEANN RAG applications.
|
||||||
This module re-exports the packaged utilities from `leann.chunking_utils` so
|
Provides AST-aware and traditional text chunking functionality.
|
||||||
that both repo apps (importing `chunking`) and installed wheels share one
|
|
||||||
single implementation. When running from the repo without installation, it
|
|
||||||
adds the `packages/leann-core/src` directory to `sys.path` as a fallback.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import sys
|
from .utils import (
|
||||||
from pathlib import Path
|
CODE_EXTENSIONS,
|
||||||
|
create_ast_chunks,
|
||||||
try:
|
create_text_chunks,
|
||||||
from leann.chunking_utils import (
|
create_traditional_chunks,
|
||||||
CODE_EXTENSIONS,
|
detect_code_files,
|
||||||
create_ast_chunks,
|
get_language_from_extension,
|
||||||
create_text_chunks,
|
)
|
||||||
create_traditional_chunks,
|
|
||||||
detect_code_files,
|
|
||||||
get_language_from_extension,
|
|
||||||
)
|
|
||||||
except Exception: # pragma: no cover - best-effort fallback for dev environment
|
|
||||||
repo_root = Path(__file__).resolve().parents[2]
|
|
||||||
leann_src = repo_root / "packages" / "leann-core" / "src"
|
|
||||||
if leann_src.exists():
|
|
||||||
sys.path.insert(0, str(leann_src))
|
|
||||||
from leann.chunking_utils import (
|
|
||||||
CODE_EXTENSIONS,
|
|
||||||
create_ast_chunks,
|
|
||||||
create_text_chunks,
|
|
||||||
create_traditional_chunks,
|
|
||||||
detect_code_files,
|
|
||||||
get_language_from_extension,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"CODE_EXTENSIONS",
|
"CODE_EXTENSIONS",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
"""
|
"""
|
||||||
Enhanced chunking utilities with AST-aware code chunking support.
|
Enhanced chunking utilities with AST-aware code chunking support.
|
||||||
Packaged within leann-core so installed wheels can import it reliably.
|
Provides unified interface for both traditional and AST-based text chunking.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
@@ -22,9 +22,30 @@ CODE_EXTENSIONS = {
|
|||||||
".jsx": "typescript",
|
".jsx": "typescript",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Default chunk parameters for different content types
|
||||||
|
DEFAULT_CHUNK_PARAMS = {
|
||||||
|
"code": {
|
||||||
|
"max_chunk_size": 512,
|
||||||
|
"chunk_overlap": 64,
|
||||||
|
},
|
||||||
|
"text": {
|
||||||
|
"chunk_size": 256,
|
||||||
|
"chunk_overlap": 128,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def detect_code_files(documents, code_extensions=None) -> tuple[list, list]:
|
def detect_code_files(documents, code_extensions=None) -> tuple[list, list]:
|
||||||
"""Separate documents into code files and regular text files."""
|
"""
|
||||||
|
Separate documents into code files and regular text files.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
documents: List of LlamaIndex Document objects
|
||||||
|
code_extensions: Dict mapping file extensions to languages (defaults to CODE_EXTENSIONS)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (code_documents, text_documents)
|
||||||
|
"""
|
||||||
if code_extensions is None:
|
if code_extensions is None:
|
||||||
code_extensions = CODE_EXTENSIONS
|
code_extensions = CODE_EXTENSIONS
|
||||||
|
|
||||||
@@ -32,10 +53,16 @@ def detect_code_files(documents, code_extensions=None) -> tuple[list, list]:
|
|||||||
text_docs = []
|
text_docs = []
|
||||||
|
|
||||||
for doc in documents:
|
for doc in documents:
|
||||||
file_path = doc.metadata.get("file_path", "") or doc.metadata.get("file_name", "")
|
# Get file path from metadata
|
||||||
|
file_path = doc.metadata.get("file_path", "")
|
||||||
|
if not file_path:
|
||||||
|
# Fallback to file_name
|
||||||
|
file_path = doc.metadata.get("file_name", "")
|
||||||
|
|
||||||
if file_path:
|
if file_path:
|
||||||
file_ext = Path(file_path).suffix.lower()
|
file_ext = Path(file_path).suffix.lower()
|
||||||
if file_ext in code_extensions:
|
if file_ext in code_extensions:
|
||||||
|
# Add language info to metadata
|
||||||
doc.metadata["language"] = code_extensions[file_ext]
|
doc.metadata["language"] = code_extensions[file_ext]
|
||||||
doc.metadata["is_code"] = True
|
doc.metadata["is_code"] = True
|
||||||
code_docs.append(doc)
|
code_docs.append(doc)
|
||||||
@@ -43,6 +70,7 @@ def detect_code_files(documents, code_extensions=None) -> tuple[list, list]:
|
|||||||
doc.metadata["is_code"] = False
|
doc.metadata["is_code"] = False
|
||||||
text_docs.append(doc)
|
text_docs.append(doc)
|
||||||
else:
|
else:
|
||||||
|
# If no file path, treat as text
|
||||||
doc.metadata["is_code"] = False
|
doc.metadata["is_code"] = False
|
||||||
text_docs.append(doc)
|
text_docs.append(doc)
|
||||||
|
|
||||||
@@ -51,7 +79,7 @@ def detect_code_files(documents, code_extensions=None) -> tuple[list, list]:
|
|||||||
|
|
||||||
|
|
||||||
def get_language_from_extension(file_path: str) -> Optional[str]:
|
def get_language_from_extension(file_path: str) -> Optional[str]:
|
||||||
"""Return language string from a filename/extension using CODE_EXTENSIONS."""
|
"""Get the programming language from file extension."""
|
||||||
ext = Path(file_path).suffix.lower()
|
ext = Path(file_path).suffix.lower()
|
||||||
return CODE_EXTENSIONS.get(ext)
|
return CODE_EXTENSIONS.get(ext)
|
||||||
|
|
||||||
@@ -62,26 +90,40 @@ def create_ast_chunks(
|
|||||||
chunk_overlap: int = 64,
|
chunk_overlap: int = 64,
|
||||||
metadata_template: str = "default",
|
metadata_template: str = "default",
|
||||||
) -> list[str]:
|
) -> list[str]:
|
||||||
"""Create AST-aware chunks from code documents using astchunk.
|
"""
|
||||||
|
Create AST-aware chunks from code documents using astchunk.
|
||||||
|
|
||||||
Falls back to traditional chunking if astchunk is unavailable.
|
Args:
|
||||||
|
documents: List of code documents
|
||||||
|
max_chunk_size: Maximum characters per chunk
|
||||||
|
chunk_overlap: Number of AST nodes to overlap between chunks
|
||||||
|
metadata_template: Template for chunk metadata
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of text chunks with preserved code structure
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
from astchunk import ASTChunkBuilder # optional dependency
|
from astchunk import ASTChunkBuilder
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
logger.error(f"astchunk not available: {e}")
|
logger.error(f"astchunk not available: {e}")
|
||||||
logger.info("Falling back to traditional chunking for code files")
|
logger.info("Falling back to traditional chunking for code files")
|
||||||
return create_traditional_chunks(documents, max_chunk_size, chunk_overlap)
|
return create_traditional_chunks(documents, max_chunk_size, chunk_overlap)
|
||||||
|
|
||||||
all_chunks = []
|
all_chunks = []
|
||||||
|
|
||||||
for doc in documents:
|
for doc in documents:
|
||||||
|
# Get language from metadata (set by detect_code_files)
|
||||||
language = doc.metadata.get("language")
|
language = doc.metadata.get("language")
|
||||||
if not language:
|
if not language:
|
||||||
logger.warning("No language detected; falling back to traditional chunking")
|
logger.warning(
|
||||||
all_chunks.extend(create_traditional_chunks([doc], max_chunk_size, chunk_overlap))
|
"No language detected for document, falling back to traditional chunking"
|
||||||
|
)
|
||||||
|
traditional_chunks = create_traditional_chunks([doc], max_chunk_size, chunk_overlap)
|
||||||
|
all_chunks.extend(traditional_chunks)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
# Configure astchunk
|
||||||
configs = {
|
configs = {
|
||||||
"max_chunk_size": max_chunk_size,
|
"max_chunk_size": max_chunk_size,
|
||||||
"language": language,
|
"language": language,
|
||||||
@@ -89,6 +131,7 @@ def create_ast_chunks(
|
|||||||
"chunk_overlap": chunk_overlap if chunk_overlap > 0 else 0,
|
"chunk_overlap": chunk_overlap if chunk_overlap > 0 else 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Add repository-level metadata if available
|
||||||
repo_metadata = {
|
repo_metadata = {
|
||||||
"file_path": doc.metadata.get("file_path", ""),
|
"file_path": doc.metadata.get("file_path", ""),
|
||||||
"file_name": doc.metadata.get("file_name", ""),
|
"file_name": doc.metadata.get("file_name", ""),
|
||||||
@@ -97,13 +140,17 @@ def create_ast_chunks(
|
|||||||
}
|
}
|
||||||
configs["repo_level_metadata"] = repo_metadata
|
configs["repo_level_metadata"] = repo_metadata
|
||||||
|
|
||||||
|
# Create chunk builder and process
|
||||||
chunk_builder = ASTChunkBuilder(**configs)
|
chunk_builder = ASTChunkBuilder(**configs)
|
||||||
code_content = doc.get_content()
|
code_content = doc.get_content()
|
||||||
|
|
||||||
if not code_content or not code_content.strip():
|
if not code_content or not code_content.strip():
|
||||||
logger.warning("Empty code content, skipping")
|
logger.warning("Empty code content, skipping")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
chunks = chunk_builder.chunkify(code_content)
|
chunks = chunk_builder.chunkify(code_content)
|
||||||
|
|
||||||
|
# Extract text content from chunks
|
||||||
for chunk in chunks:
|
for chunk in chunks:
|
||||||
if hasattr(chunk, "text"):
|
if hasattr(chunk, "text"):
|
||||||
chunk_text = chunk.text
|
chunk_text = chunk.text
|
||||||
@@ -112,6 +159,7 @@ def create_ast_chunks(
|
|||||||
elif isinstance(chunk, str):
|
elif isinstance(chunk, str):
|
||||||
chunk_text = chunk
|
chunk_text = chunk
|
||||||
else:
|
else:
|
||||||
|
# Try to convert to string
|
||||||
chunk_text = str(chunk)
|
chunk_text = str(chunk)
|
||||||
|
|
||||||
if chunk_text and chunk_text.strip():
|
if chunk_text and chunk_text.strip():
|
||||||
@@ -120,10 +168,12 @@ def create_ast_chunks(
|
|||||||
logger.info(
|
logger.info(
|
||||||
f"Created {len(chunks)} AST chunks from {language} file: {doc.metadata.get('file_name', 'unknown')}"
|
f"Created {len(chunks)} AST chunks from {language} file: {doc.metadata.get('file_name', 'unknown')}"
|
||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"AST chunking failed for {language} file: {e}")
|
logger.warning(f"AST chunking failed for {language} file: {e}")
|
||||||
logger.info("Falling back to traditional chunking")
|
logger.info("Falling back to traditional chunking")
|
||||||
all_chunks.extend(create_traditional_chunks([doc], max_chunk_size, chunk_overlap))
|
traditional_chunks = create_traditional_chunks([doc], max_chunk_size, chunk_overlap)
|
||||||
|
all_chunks.extend(traditional_chunks)
|
||||||
|
|
||||||
return all_chunks
|
return all_chunks
|
||||||
|
|
||||||
@@ -131,10 +181,23 @@ def create_ast_chunks(
|
|||||||
def create_traditional_chunks(
|
def create_traditional_chunks(
|
||||||
documents, chunk_size: int = 256, chunk_overlap: int = 128
|
documents, chunk_size: int = 256, chunk_overlap: int = 128
|
||||||
) -> list[str]:
|
) -> list[str]:
|
||||||
"""Create traditional text chunks using LlamaIndex SentenceSplitter."""
|
"""
|
||||||
|
Create traditional text chunks using LlamaIndex SentenceSplitter.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
documents: List of documents to chunk
|
||||||
|
chunk_size: Size of each chunk in characters
|
||||||
|
chunk_overlap: Overlap between chunks
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of text chunks
|
||||||
|
"""
|
||||||
|
# Handle invalid chunk_size values
|
||||||
if chunk_size <= 0:
|
if chunk_size <= 0:
|
||||||
logger.warning(f"Invalid chunk_size={chunk_size}, using default value of 256")
|
logger.warning(f"Invalid chunk_size={chunk_size}, using default value of 256")
|
||||||
chunk_size = 256
|
chunk_size = 256
|
||||||
|
|
||||||
|
# Ensure chunk_overlap is not negative and not larger than chunk_size
|
||||||
if chunk_overlap < 0:
|
if chunk_overlap < 0:
|
||||||
chunk_overlap = 0
|
chunk_overlap = 0
|
||||||
if chunk_overlap >= chunk_size:
|
if chunk_overlap >= chunk_size:
|
||||||
@@ -152,9 +215,12 @@ def create_traditional_chunks(
|
|||||||
try:
|
try:
|
||||||
nodes = node_parser.get_nodes_from_documents([doc])
|
nodes = node_parser.get_nodes_from_documents([doc])
|
||||||
if nodes:
|
if nodes:
|
||||||
all_texts.extend(node.get_content() for node in nodes)
|
chunk_texts = [node.get_content() for node in nodes]
|
||||||
|
all_texts.extend(chunk_texts)
|
||||||
|
logger.debug(f"Created {len(chunk_texts)} traditional chunks from document")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Traditional chunking failed for document: {e}")
|
logger.error(f"Traditional chunking failed for document: {e}")
|
||||||
|
# As last resort, add the raw content
|
||||||
content = doc.get_content()
|
content = doc.get_content()
|
||||||
if content and content.strip():
|
if content and content.strip():
|
||||||
all_texts.append(content.strip())
|
all_texts.append(content.strip())
|
||||||
@@ -172,13 +238,32 @@ def create_text_chunks(
|
|||||||
code_file_extensions: Optional[list[str]] = None,
|
code_file_extensions: Optional[list[str]] = None,
|
||||||
ast_fallback_traditional: bool = True,
|
ast_fallback_traditional: bool = True,
|
||||||
) -> list[str]:
|
) -> list[str]:
|
||||||
"""Create text chunks from documents with optional AST support for code files."""
|
"""
|
||||||
|
Create text chunks from documents with optional AST support for code files.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
documents: List of LlamaIndex Document objects
|
||||||
|
chunk_size: Size for traditional text chunks
|
||||||
|
chunk_overlap: Overlap for traditional text chunks
|
||||||
|
use_ast_chunking: Whether to use AST chunking for code files
|
||||||
|
ast_chunk_size: Size for AST chunks
|
||||||
|
ast_chunk_overlap: Overlap for AST chunks
|
||||||
|
code_file_extensions: Custom list of code file extensions
|
||||||
|
ast_fallback_traditional: Fall back to traditional chunking on AST errors
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of text chunks
|
||||||
|
"""
|
||||||
if not documents:
|
if not documents:
|
||||||
logger.warning("No documents provided for chunking")
|
logger.warning("No documents provided for chunking")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
# Create a local copy of supported extensions for this function call
|
||||||
local_code_extensions = CODE_EXTENSIONS.copy()
|
local_code_extensions = CODE_EXTENSIONS.copy()
|
||||||
|
|
||||||
|
# Update supported extensions if provided
|
||||||
if code_file_extensions:
|
if code_file_extensions:
|
||||||
|
# Map extensions to languages (simplified mapping)
|
||||||
ext_mapping = {
|
ext_mapping = {
|
||||||
".py": "python",
|
".py": "python",
|
||||||
".java": "java",
|
".java": "java",
|
||||||
@@ -188,32 +273,47 @@ def create_text_chunks(
|
|||||||
}
|
}
|
||||||
for ext in code_file_extensions:
|
for ext in code_file_extensions:
|
||||||
if ext.lower() not in local_code_extensions:
|
if ext.lower() not in local_code_extensions:
|
||||||
|
# Try to guess language from extension
|
||||||
if ext.lower() in ext_mapping:
|
if ext.lower() in ext_mapping:
|
||||||
local_code_extensions[ext.lower()] = ext_mapping[ext.lower()]
|
local_code_extensions[ext.lower()] = ext_mapping[ext.lower()]
|
||||||
else:
|
else:
|
||||||
logger.warning(f"Unsupported extension {ext}, will use traditional chunking")
|
logger.warning(f"Unsupported extension {ext}, will use traditional chunking")
|
||||||
|
|
||||||
all_chunks = []
|
all_chunks = []
|
||||||
|
|
||||||
if use_ast_chunking:
|
if use_ast_chunking:
|
||||||
|
# Separate code and text documents using local extensions
|
||||||
code_docs, text_docs = detect_code_files(documents, local_code_extensions)
|
code_docs, text_docs = detect_code_files(documents, local_code_extensions)
|
||||||
|
|
||||||
|
# Process code files with AST chunking
|
||||||
if code_docs:
|
if code_docs:
|
||||||
|
logger.info(f"Processing {len(code_docs)} code files with AST chunking")
|
||||||
try:
|
try:
|
||||||
all_chunks.extend(
|
ast_chunks = create_ast_chunks(
|
||||||
create_ast_chunks(
|
code_docs, max_chunk_size=ast_chunk_size, chunk_overlap=ast_chunk_overlap
|
||||||
code_docs, max_chunk_size=ast_chunk_size, chunk_overlap=ast_chunk_overlap
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
all_chunks.extend(ast_chunks)
|
||||||
|
logger.info(f"Created {len(ast_chunks)} AST chunks from code files")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"AST chunking failed: {e}")
|
logger.error(f"AST chunking failed: {e}")
|
||||||
if ast_fallback_traditional:
|
if ast_fallback_traditional:
|
||||||
all_chunks.extend(
|
logger.info("Falling back to traditional chunking for code files")
|
||||||
create_traditional_chunks(code_docs, chunk_size, chunk_overlap)
|
traditional_code_chunks = create_traditional_chunks(
|
||||||
|
code_docs, chunk_size, chunk_overlap
|
||||||
)
|
)
|
||||||
|
all_chunks.extend(traditional_code_chunks)
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
# Process text files with traditional chunking
|
||||||
if text_docs:
|
if text_docs:
|
||||||
all_chunks.extend(create_traditional_chunks(text_docs, chunk_size, chunk_overlap))
|
logger.info(f"Processing {len(text_docs)} text files with traditional chunking")
|
||||||
|
text_chunks = create_traditional_chunks(text_docs, chunk_size, chunk_overlap)
|
||||||
|
all_chunks.extend(text_chunks)
|
||||||
|
logger.info(f"Created {len(text_chunks)} traditional chunks from text files")
|
||||||
else:
|
else:
|
||||||
|
# Use traditional chunking for all files
|
||||||
|
logger.info(f"Processing {len(documents)} documents with traditional chunking")
|
||||||
all_chunks = create_traditional_chunks(documents, chunk_size, chunk_overlap)
|
all_chunks = create_traditional_chunks(documents, chunk_size, chunk_overlap)
|
||||||
|
|
||||||
logger.info(f"Total chunks created: {len(all_chunks)}")
|
logger.info(f"Total chunks created: {len(all_chunks)}")
|
||||||
@@ -1,420 +0,0 @@
|
|||||||
"""
|
|
||||||
Claude export data reader.
|
|
||||||
|
|
||||||
Reads and processes Claude conversation data from exported JSON files.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import json
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any
|
|
||||||
from zipfile import ZipFile
|
|
||||||
|
|
||||||
from llama_index.core import Document
|
|
||||||
from llama_index.core.readers.base import BaseReader
|
|
||||||
|
|
||||||
|
|
||||||
class ClaudeReader(BaseReader):
|
|
||||||
"""
|
|
||||||
Claude export data reader.
|
|
||||||
|
|
||||||
Reads Claude conversation data from exported JSON files or zip archives.
|
|
||||||
Processes conversations into structured documents with metadata.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, concatenate_conversations: bool = True) -> None:
|
|
||||||
"""
|
|
||||||
Initialize.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
concatenate_conversations: Whether to concatenate messages within conversations for better context
|
|
||||||
"""
|
|
||||||
self.concatenate_conversations = concatenate_conversations
|
|
||||||
|
|
||||||
def _extract_json_from_zip(self, zip_path: Path) -> list[str]:
|
|
||||||
"""
|
|
||||||
Extract JSON files from Claude export zip file.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
zip_path: Path to the Claude export zip file
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of JSON content strings, or empty list if not found
|
|
||||||
"""
|
|
||||||
json_contents = []
|
|
||||||
try:
|
|
||||||
with ZipFile(zip_path, "r") as zip_file:
|
|
||||||
# Look for JSON files
|
|
||||||
json_files = [f for f in zip_file.namelist() if f.endswith(".json")]
|
|
||||||
|
|
||||||
if not json_files:
|
|
||||||
print(f"No JSON files found in {zip_path}")
|
|
||||||
return []
|
|
||||||
|
|
||||||
print(f"Found {len(json_files)} JSON files in archive")
|
|
||||||
|
|
||||||
for json_file in json_files:
|
|
||||||
with zip_file.open(json_file) as f:
|
|
||||||
content = f.read().decode("utf-8", errors="ignore")
|
|
||||||
json_contents.append(content)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error extracting JSON from zip {zip_path}: {e}")
|
|
||||||
|
|
||||||
return json_contents
|
|
||||||
|
|
||||||
def _parse_claude_json(self, json_content: str) -> list[dict]:
|
|
||||||
"""
|
|
||||||
Parse Claude JSON export to extract conversations.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
json_content: JSON content from Claude export
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of conversation dictionaries
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
data = json.loads(json_content)
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
print(f"Error parsing JSON: {e}")
|
|
||||||
return []
|
|
||||||
|
|
||||||
conversations = []
|
|
||||||
|
|
||||||
# Handle different possible JSON structures
|
|
||||||
if isinstance(data, list):
|
|
||||||
# If data is a list of conversations
|
|
||||||
for item in data:
|
|
||||||
conversation = self._extract_conversation_from_json(item)
|
|
||||||
if conversation:
|
|
||||||
conversations.append(conversation)
|
|
||||||
elif isinstance(data, dict):
|
|
||||||
# Check for common structures
|
|
||||||
if "conversations" in data:
|
|
||||||
# Structure: {"conversations": [...]}
|
|
||||||
for item in data["conversations"]:
|
|
||||||
conversation = self._extract_conversation_from_json(item)
|
|
||||||
if conversation:
|
|
||||||
conversations.append(conversation)
|
|
||||||
elif "messages" in data:
|
|
||||||
# Single conversation with messages
|
|
||||||
conversation = self._extract_conversation_from_json(data)
|
|
||||||
if conversation:
|
|
||||||
conversations.append(conversation)
|
|
||||||
else:
|
|
||||||
# Try to treat the whole object as a conversation
|
|
||||||
conversation = self._extract_conversation_from_json(data)
|
|
||||||
if conversation:
|
|
||||||
conversations.append(conversation)
|
|
||||||
|
|
||||||
return conversations
|
|
||||||
|
|
||||||
def _extract_conversation_from_json(self, conv_data: dict) -> dict | None:
|
|
||||||
"""
|
|
||||||
Extract conversation data from a JSON object.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
conv_data: Dictionary containing conversation data
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary with conversation data or None
|
|
||||||
"""
|
|
||||||
if not isinstance(conv_data, dict):
|
|
||||||
return None
|
|
||||||
|
|
||||||
messages = []
|
|
||||||
|
|
||||||
# Look for messages in various possible structures
|
|
||||||
message_sources = []
|
|
||||||
if "messages" in conv_data:
|
|
||||||
message_sources = conv_data["messages"]
|
|
||||||
elif "chat" in conv_data:
|
|
||||||
message_sources = conv_data["chat"]
|
|
||||||
elif "conversation" in conv_data:
|
|
||||||
message_sources = conv_data["conversation"]
|
|
||||||
else:
|
|
||||||
# If no clear message structure, try to extract from the object itself
|
|
||||||
if "content" in conv_data and "role" in conv_data:
|
|
||||||
message_sources = [conv_data]
|
|
||||||
|
|
||||||
for msg_data in message_sources:
|
|
||||||
message = self._extract_message_from_json(msg_data)
|
|
||||||
if message:
|
|
||||||
messages.append(message)
|
|
||||||
|
|
||||||
if not messages:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Extract conversation metadata
|
|
||||||
title = self._extract_title_from_conversation(conv_data, messages)
|
|
||||||
timestamp = self._extract_timestamp_from_conversation(conv_data)
|
|
||||||
|
|
||||||
return {"title": title, "messages": messages, "timestamp": timestamp}
|
|
||||||
|
|
||||||
def _extract_message_from_json(self, msg_data: dict) -> dict | None:
|
|
||||||
"""
|
|
||||||
Extract message data from a JSON message object.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
msg_data: Dictionary containing message data
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary with message data or None
|
|
||||||
"""
|
|
||||||
if not isinstance(msg_data, dict):
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Extract content from various possible fields
|
|
||||||
content = ""
|
|
||||||
content_fields = ["content", "text", "message", "body"]
|
|
||||||
for field in content_fields:
|
|
||||||
if msg_data.get(field):
|
|
||||||
content = str(msg_data[field])
|
|
||||||
break
|
|
||||||
|
|
||||||
if not content or len(content.strip()) < 3:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Extract role (user/assistant/human/ai/claude)
|
|
||||||
role = "mixed" # Default role
|
|
||||||
role_fields = ["role", "sender", "from", "author", "type"]
|
|
||||||
for field in role_fields:
|
|
||||||
if msg_data.get(field):
|
|
||||||
role_value = str(msg_data[field]).lower()
|
|
||||||
if role_value in ["user", "human", "person"]:
|
|
||||||
role = "user"
|
|
||||||
elif role_value in ["assistant", "ai", "claude", "bot"]:
|
|
||||||
role = "assistant"
|
|
||||||
break
|
|
||||||
|
|
||||||
# Extract timestamp
|
|
||||||
timestamp = self._extract_timestamp_from_message(msg_data)
|
|
||||||
|
|
||||||
return {"role": role, "content": content, "timestamp": timestamp}
|
|
||||||
|
|
||||||
def _extract_timestamp_from_message(self, msg_data: dict) -> str | None:
|
|
||||||
"""Extract timestamp from message data."""
|
|
||||||
timestamp_fields = ["timestamp", "created_at", "date", "time"]
|
|
||||||
for field in timestamp_fields:
|
|
||||||
if msg_data.get(field):
|
|
||||||
return str(msg_data[field])
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _extract_timestamp_from_conversation(self, conv_data: dict) -> str | None:
|
|
||||||
"""Extract timestamp from conversation data."""
|
|
||||||
timestamp_fields = ["timestamp", "created_at", "date", "updated_at", "last_updated"]
|
|
||||||
for field in timestamp_fields:
|
|
||||||
if conv_data.get(field):
|
|
||||||
return str(conv_data[field])
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _extract_title_from_conversation(self, conv_data: dict, messages: list) -> str:
|
|
||||||
"""Extract or generate title for conversation."""
|
|
||||||
# Try to find explicit title
|
|
||||||
title_fields = ["title", "name", "subject", "topic"]
|
|
||||||
for field in title_fields:
|
|
||||||
if conv_data.get(field):
|
|
||||||
return str(conv_data[field])
|
|
||||||
|
|
||||||
# Generate title from first user message
|
|
||||||
for message in messages:
|
|
||||||
if message.get("role") == "user":
|
|
||||||
content = message.get("content", "")
|
|
||||||
if content:
|
|
||||||
# Use first 50 characters as title
|
|
||||||
title = content[:50].strip()
|
|
||||||
if len(content) > 50:
|
|
||||||
title += "..."
|
|
||||||
return title
|
|
||||||
|
|
||||||
return "Claude Conversation"
|
|
||||||
|
|
||||||
def _create_concatenated_content(self, conversation: dict) -> str:
|
|
||||||
"""
|
|
||||||
Create concatenated content from conversation messages.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
conversation: Dictionary containing conversation data
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Formatted concatenated content
|
|
||||||
"""
|
|
||||||
title = conversation.get("title", "Claude Conversation")
|
|
||||||
messages = conversation.get("messages", [])
|
|
||||||
timestamp = conversation.get("timestamp", "Unknown")
|
|
||||||
|
|
||||||
# Build message content
|
|
||||||
message_parts = []
|
|
||||||
for message in messages:
|
|
||||||
role = message.get("role", "mixed")
|
|
||||||
content = message.get("content", "")
|
|
||||||
msg_timestamp = message.get("timestamp", "")
|
|
||||||
|
|
||||||
if role == "user":
|
|
||||||
prefix = "[You]"
|
|
||||||
elif role == "assistant":
|
|
||||||
prefix = "[Claude]"
|
|
||||||
else:
|
|
||||||
prefix = "[Message]"
|
|
||||||
|
|
||||||
# Add timestamp if available
|
|
||||||
if msg_timestamp:
|
|
||||||
prefix += f" ({msg_timestamp})"
|
|
||||||
|
|
||||||
message_parts.append(f"{prefix}: {content}")
|
|
||||||
|
|
||||||
concatenated_text = "\n\n".join(message_parts)
|
|
||||||
|
|
||||||
# Create final document content
|
|
||||||
doc_content = f"""Conversation: {title}
|
|
||||||
Date: {timestamp}
|
|
||||||
Messages ({len(messages)} messages):
|
|
||||||
|
|
||||||
{concatenated_text}
|
|
||||||
"""
|
|
||||||
return doc_content
|
|
||||||
|
|
||||||
def load_data(self, input_dir: str | None = None, **load_kwargs: Any) -> list[Document]:
|
|
||||||
"""
|
|
||||||
Load Claude export data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
input_dir: Directory containing Claude export files or path to specific file
|
|
||||||
**load_kwargs:
|
|
||||||
max_count (int): Maximum number of conversations to process
|
|
||||||
claude_export_path (str): Specific path to Claude export file/directory
|
|
||||||
include_metadata (bool): Whether to include metadata in documents
|
|
||||||
"""
|
|
||||||
docs: list[Document] = []
|
|
||||||
max_count = load_kwargs.get("max_count", -1)
|
|
||||||
claude_export_path = load_kwargs.get("claude_export_path", input_dir)
|
|
||||||
include_metadata = load_kwargs.get("include_metadata", True)
|
|
||||||
|
|
||||||
if not claude_export_path:
|
|
||||||
print("No Claude export path provided")
|
|
||||||
return docs
|
|
||||||
|
|
||||||
export_path = Path(claude_export_path)
|
|
||||||
|
|
||||||
if not export_path.exists():
|
|
||||||
print(f"Claude export path not found: {export_path}")
|
|
||||||
return docs
|
|
||||||
|
|
||||||
json_contents = []
|
|
||||||
|
|
||||||
# Handle different input types
|
|
||||||
if export_path.is_file():
|
|
||||||
if export_path.suffix.lower() == ".zip":
|
|
||||||
# Extract JSON from zip file
|
|
||||||
json_contents = self._extract_json_from_zip(export_path)
|
|
||||||
elif export_path.suffix.lower() == ".json":
|
|
||||||
# Read JSON file directly
|
|
||||||
try:
|
|
||||||
with open(export_path, encoding="utf-8", errors="ignore") as f:
|
|
||||||
json_contents.append(f.read())
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error reading JSON file {export_path}: {e}")
|
|
||||||
return docs
|
|
||||||
else:
|
|
||||||
print(f"Unsupported file type: {export_path.suffix}")
|
|
||||||
return docs
|
|
||||||
|
|
||||||
elif export_path.is_dir():
|
|
||||||
# Look for JSON files in directory
|
|
||||||
json_files = list(export_path.glob("*.json"))
|
|
||||||
zip_files = list(export_path.glob("*.zip"))
|
|
||||||
|
|
||||||
if json_files:
|
|
||||||
print(f"Found {len(json_files)} JSON files in directory")
|
|
||||||
for json_file in json_files:
|
|
||||||
try:
|
|
||||||
with open(json_file, encoding="utf-8", errors="ignore") as f:
|
|
||||||
json_contents.append(f.read())
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error reading JSON file {json_file}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
if zip_files:
|
|
||||||
print(f"Found {len(zip_files)} ZIP files in directory")
|
|
||||||
for zip_file in zip_files:
|
|
||||||
zip_contents = self._extract_json_from_zip(zip_file)
|
|
||||||
json_contents.extend(zip_contents)
|
|
||||||
|
|
||||||
if not json_files and not zip_files:
|
|
||||||
print(f"No JSON or ZIP files found in {export_path}")
|
|
||||||
return docs
|
|
||||||
|
|
||||||
if not json_contents:
|
|
||||||
print("No JSON content found to process")
|
|
||||||
return docs
|
|
||||||
|
|
||||||
# Parse conversations from JSON content
|
|
||||||
print("Parsing Claude conversations from JSON...")
|
|
||||||
all_conversations = []
|
|
||||||
for json_content in json_contents:
|
|
||||||
conversations = self._parse_claude_json(json_content)
|
|
||||||
all_conversations.extend(conversations)
|
|
||||||
|
|
||||||
if not all_conversations:
|
|
||||||
print("No conversations found in JSON content")
|
|
||||||
return docs
|
|
||||||
|
|
||||||
print(f"Found {len(all_conversations)} conversations")
|
|
||||||
|
|
||||||
# Process conversations into documents
|
|
||||||
count = 0
|
|
||||||
for conversation in all_conversations:
|
|
||||||
if max_count > 0 and count >= max_count:
|
|
||||||
break
|
|
||||||
|
|
||||||
if self.concatenate_conversations:
|
|
||||||
# Create one document per conversation with concatenated messages
|
|
||||||
doc_content = self._create_concatenated_content(conversation)
|
|
||||||
|
|
||||||
metadata = {}
|
|
||||||
if include_metadata:
|
|
||||||
metadata = {
|
|
||||||
"title": conversation.get("title", "Claude Conversation"),
|
|
||||||
"timestamp": conversation.get("timestamp", "Unknown"),
|
|
||||||
"message_count": len(conversation.get("messages", [])),
|
|
||||||
"source": "Claude Export",
|
|
||||||
}
|
|
||||||
|
|
||||||
doc = Document(text=doc_content, metadata=metadata)
|
|
||||||
docs.append(doc)
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
else:
|
|
||||||
# Create separate documents for each message
|
|
||||||
for message in conversation.get("messages", []):
|
|
||||||
if max_count > 0 and count >= max_count:
|
|
||||||
break
|
|
||||||
|
|
||||||
role = message.get("role", "mixed")
|
|
||||||
content = message.get("content", "")
|
|
||||||
msg_timestamp = message.get("timestamp", "")
|
|
||||||
|
|
||||||
if not content.strip():
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Create document content with context
|
|
||||||
doc_content = f"""Conversation: {conversation.get("title", "Claude Conversation")}
|
|
||||||
Role: {role}
|
|
||||||
Timestamp: {msg_timestamp or conversation.get("timestamp", "Unknown")}
|
|
||||||
Message: {content}
|
|
||||||
"""
|
|
||||||
|
|
||||||
metadata = {}
|
|
||||||
if include_metadata:
|
|
||||||
metadata = {
|
|
||||||
"conversation_title": conversation.get("title", "Claude Conversation"),
|
|
||||||
"role": role,
|
|
||||||
"timestamp": msg_timestamp or conversation.get("timestamp", "Unknown"),
|
|
||||||
"source": "Claude Export",
|
|
||||||
}
|
|
||||||
|
|
||||||
doc = Document(text=doc_content, metadata=metadata)
|
|
||||||
docs.append(doc)
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
print(f"Created {len(docs)} documents from Claude export")
|
|
||||||
return docs
|
|
||||||
@@ -1,189 +0,0 @@
|
|||||||
"""
|
|
||||||
Claude RAG example using the unified interface.
|
|
||||||
Supports Claude export data from JSON files.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
# Add parent directory to path for imports
|
|
||||||
sys.path.insert(0, str(Path(__file__).parent))
|
|
||||||
|
|
||||||
from base_rag_example import BaseRAGExample
|
|
||||||
from chunking import create_text_chunks
|
|
||||||
|
|
||||||
from .claude_data.claude_reader import ClaudeReader
|
|
||||||
|
|
||||||
|
|
||||||
class ClaudeRAG(BaseRAGExample):
|
|
||||||
"""RAG example for Claude conversation data."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
# Set default values BEFORE calling super().__init__
|
|
||||||
self.max_items_default = -1 # Process all conversations by default
|
|
||||||
self.embedding_model_default = (
|
|
||||||
"sentence-transformers/all-MiniLM-L6-v2" # Fast 384-dim model
|
|
||||||
)
|
|
||||||
|
|
||||||
super().__init__(
|
|
||||||
name="Claude",
|
|
||||||
description="Process and query Claude conversation exports with LEANN",
|
|
||||||
default_index_name="claude_conversations_index",
|
|
||||||
)
|
|
||||||
|
|
||||||
def _add_specific_arguments(self, parser):
|
|
||||||
"""Add Claude-specific arguments."""
|
|
||||||
claude_group = parser.add_argument_group("Claude Parameters")
|
|
||||||
claude_group.add_argument(
|
|
||||||
"--export-path",
|
|
||||||
type=str,
|
|
||||||
default="./claude_export",
|
|
||||||
help="Path to Claude export file (.json or .zip) or directory containing exports (default: ./claude_export)",
|
|
||||||
)
|
|
||||||
claude_group.add_argument(
|
|
||||||
"--concatenate-conversations",
|
|
||||||
action="store_true",
|
|
||||||
default=True,
|
|
||||||
help="Concatenate messages within conversations for better context (default: True)",
|
|
||||||
)
|
|
||||||
claude_group.add_argument(
|
|
||||||
"--separate-messages",
|
|
||||||
action="store_true",
|
|
||||||
help="Process each message as a separate document (overrides --concatenate-conversations)",
|
|
||||||
)
|
|
||||||
claude_group.add_argument(
|
|
||||||
"--chunk-size", type=int, default=512, help="Text chunk size (default: 512)"
|
|
||||||
)
|
|
||||||
claude_group.add_argument(
|
|
||||||
"--chunk-overlap", type=int, default=128, help="Text chunk overlap (default: 128)"
|
|
||||||
)
|
|
||||||
|
|
||||||
def _find_claude_exports(self, export_path: Path) -> list[Path]:
|
|
||||||
"""
|
|
||||||
Find Claude export files in the given path.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
export_path: Path to search for exports
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of paths to Claude export files
|
|
||||||
"""
|
|
||||||
export_files = []
|
|
||||||
|
|
||||||
if export_path.is_file():
|
|
||||||
if export_path.suffix.lower() in [".zip", ".json"]:
|
|
||||||
export_files.append(export_path)
|
|
||||||
elif export_path.is_dir():
|
|
||||||
# Look for zip and json files
|
|
||||||
export_files.extend(export_path.glob("*.zip"))
|
|
||||||
export_files.extend(export_path.glob("*.json"))
|
|
||||||
|
|
||||||
return export_files
|
|
||||||
|
|
||||||
async def load_data(self, args) -> list[str]:
|
|
||||||
"""Load Claude export data and convert to text chunks."""
|
|
||||||
export_path = Path(args.export_path)
|
|
||||||
|
|
||||||
if not export_path.exists():
|
|
||||||
print(f"Claude export path not found: {export_path}")
|
|
||||||
print(
|
|
||||||
"Please ensure you have exported your Claude data and placed it in the correct location."
|
|
||||||
)
|
|
||||||
print("\nTo export your Claude data:")
|
|
||||||
print("1. Open Claude in your browser")
|
|
||||||
print("2. Look for export/download options in settings or conversation menu")
|
|
||||||
print("3. Download the conversation data (usually in JSON format)")
|
|
||||||
print("4. Place the file/directory at the specified path")
|
|
||||||
print(
|
|
||||||
"\nNote: Claude export methods may vary. Check Claude's help documentation for current instructions."
|
|
||||||
)
|
|
||||||
return []
|
|
||||||
|
|
||||||
# Find export files
|
|
||||||
export_files = self._find_claude_exports(export_path)
|
|
||||||
|
|
||||||
if not export_files:
|
|
||||||
print(f"No Claude export files (.json or .zip) found in: {export_path}")
|
|
||||||
return []
|
|
||||||
|
|
||||||
print(f"Found {len(export_files)} Claude export files")
|
|
||||||
|
|
||||||
# Create reader with appropriate settings
|
|
||||||
concatenate = args.concatenate_conversations and not args.separate_messages
|
|
||||||
reader = ClaudeReader(concatenate_conversations=concatenate)
|
|
||||||
|
|
||||||
# Process each export file
|
|
||||||
all_documents = []
|
|
||||||
total_processed = 0
|
|
||||||
|
|
||||||
for i, export_file in enumerate(export_files):
|
|
||||||
print(f"\nProcessing export file {i + 1}/{len(export_files)}: {export_file.name}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Apply max_items limit per file
|
|
||||||
max_per_file = -1
|
|
||||||
if args.max_items > 0:
|
|
||||||
remaining = args.max_items - total_processed
|
|
||||||
if remaining <= 0:
|
|
||||||
break
|
|
||||||
max_per_file = remaining
|
|
||||||
|
|
||||||
# Load conversations
|
|
||||||
documents = reader.load_data(
|
|
||||||
claude_export_path=str(export_file),
|
|
||||||
max_count=max_per_file,
|
|
||||||
include_metadata=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
if documents:
|
|
||||||
all_documents.extend(documents)
|
|
||||||
total_processed += len(documents)
|
|
||||||
print(f"Processed {len(documents)} conversations from this file")
|
|
||||||
else:
|
|
||||||
print(f"No conversations loaded from {export_file}")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing {export_file}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not all_documents:
|
|
||||||
print("No conversations found to process!")
|
|
||||||
print("\nTroubleshooting:")
|
|
||||||
print("- Ensure the export file is a valid Claude export")
|
|
||||||
print("- Check that the JSON file contains conversation data")
|
|
||||||
print("- Try using a different export format or method")
|
|
||||||
print("- Check Claude's documentation for current export procedures")
|
|
||||||
return []
|
|
||||||
|
|
||||||
print(f"\nTotal conversations processed: {len(all_documents)}")
|
|
||||||
print("Now starting to split into text chunks... this may take some time")
|
|
||||||
|
|
||||||
# Convert to text chunks
|
|
||||||
all_texts = create_text_chunks(
|
|
||||||
all_documents, chunk_size=args.chunk_size, chunk_overlap=args.chunk_overlap
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Created {len(all_texts)} text chunks from {len(all_documents)} conversations")
|
|
||||||
return all_texts
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
import asyncio
|
|
||||||
|
|
||||||
# Example queries for Claude RAG
|
|
||||||
print("\n🤖 Claude RAG Example")
|
|
||||||
print("=" * 50)
|
|
||||||
print("\nExample queries you can try:")
|
|
||||||
print("- 'What did I ask Claude about Python programming?'")
|
|
||||||
print("- 'Show me conversations about machine learning'")
|
|
||||||
print("- 'Find discussions about code optimization'")
|
|
||||||
print("- 'What advice did Claude give me about software design?'")
|
|
||||||
print("- 'Search for conversations about debugging techniques'")
|
|
||||||
print("\nTo get started:")
|
|
||||||
print("1. Export your Claude conversation data")
|
|
||||||
print("2. Place the JSON/ZIP file in ./claude_export/")
|
|
||||||
print("3. Run this script to build your personal Claude knowledge base!")
|
|
||||||
print("\nOr run without --query for interactive mode\n")
|
|
||||||
|
|
||||||
rag = ClaudeRAG()
|
|
||||||
asyncio.run(rag.run())
|
|
||||||
@@ -74,7 +74,7 @@ class ChromeHistoryReader(BaseReader):
|
|||||||
if count >= max_count and max_count > 0:
|
if count >= max_count and max_count > 0:
|
||||||
break
|
break
|
||||||
|
|
||||||
last_visit, url, title, visit_count, typed_count, _hidden = row
|
last_visit, url, title, visit_count, typed_count, hidden = row
|
||||||
|
|
||||||
# Create document content with metadata embedded in text
|
# Create document content with metadata embedded in text
|
||||||
doc_content = f"""
|
doc_content = f"""
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
"""iMessage data processing module."""
|
|
||||||
@@ -1,342 +0,0 @@
|
|||||||
"""
|
|
||||||
iMessage data reader.
|
|
||||||
|
|
||||||
Reads and processes iMessage conversation data from the macOS Messages database.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sqlite3
|
|
||||||
from datetime import datetime
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
from llama_index.core import Document
|
|
||||||
from llama_index.core.readers.base import BaseReader
|
|
||||||
|
|
||||||
|
|
||||||
class IMessageReader(BaseReader):
|
|
||||||
"""
|
|
||||||
iMessage data reader.
|
|
||||||
|
|
||||||
Reads iMessage conversation data from the macOS Messages database (chat.db).
|
|
||||||
Processes conversations into structured documents with metadata.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, concatenate_conversations: bool = True) -> None:
|
|
||||||
"""
|
|
||||||
Initialize.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
concatenate_conversations: Whether to concatenate messages within conversations for better context
|
|
||||||
"""
|
|
||||||
self.concatenate_conversations = concatenate_conversations
|
|
||||||
|
|
||||||
def _get_default_chat_db_path(self) -> Path:
|
|
||||||
"""
|
|
||||||
Get the default path to the iMessage chat database.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Path to the chat.db file
|
|
||||||
"""
|
|
||||||
home = Path.home()
|
|
||||||
return home / "Library" / "Messages" / "chat.db"
|
|
||||||
|
|
||||||
def _convert_cocoa_timestamp(self, cocoa_timestamp: int) -> str:
|
|
||||||
"""
|
|
||||||
Convert Cocoa timestamp to readable format.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cocoa_timestamp: Timestamp in Cocoa format (nanoseconds since 2001-01-01)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Formatted timestamp string
|
|
||||||
"""
|
|
||||||
if cocoa_timestamp == 0:
|
|
||||||
return "Unknown"
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Cocoa timestamp is nanoseconds since 2001-01-01 00:00:00 UTC
|
|
||||||
# Convert to seconds and add to Unix epoch
|
|
||||||
cocoa_epoch = datetime(2001, 1, 1)
|
|
||||||
unix_timestamp = cocoa_timestamp / 1_000_000_000 # Convert nanoseconds to seconds
|
|
||||||
message_time = cocoa_epoch.timestamp() + unix_timestamp
|
|
||||||
return datetime.fromtimestamp(message_time).strftime("%Y-%m-%d %H:%M:%S")
|
|
||||||
except (ValueError, OSError):
|
|
||||||
return "Unknown"
|
|
||||||
|
|
||||||
def _get_contact_name(self, handle_id: str) -> str:
|
|
||||||
"""
|
|
||||||
Get a readable contact name from handle ID.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
handle_id: The handle ID (phone number or email)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Formatted contact name
|
|
||||||
"""
|
|
||||||
if not handle_id:
|
|
||||||
return "Unknown"
|
|
||||||
|
|
||||||
# Clean up phone numbers and emails for display
|
|
||||||
if "@" in handle_id:
|
|
||||||
return handle_id # Email address
|
|
||||||
elif handle_id.startswith("+"):
|
|
||||||
return handle_id # International phone number
|
|
||||||
else:
|
|
||||||
# Try to format as phone number
|
|
||||||
digits = "".join(filter(str.isdigit, handle_id))
|
|
||||||
if len(digits) == 10:
|
|
||||||
return f"({digits[:3]}) {digits[3:6]}-{digits[6:]}"
|
|
||||||
elif len(digits) == 11 and digits[0] == "1":
|
|
||||||
return f"+1 ({digits[1:4]}) {digits[4:7]}-{digits[7:]}"
|
|
||||||
else:
|
|
||||||
return handle_id
|
|
||||||
|
|
||||||
def _read_messages_from_db(self, db_path: Path) -> list[dict]:
|
|
||||||
"""
|
|
||||||
Read messages from the iMessage database.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
db_path: Path to the chat.db file
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of message dictionaries
|
|
||||||
"""
|
|
||||||
if not db_path.exists():
|
|
||||||
print(f"iMessage database not found at: {db_path}")
|
|
||||||
return []
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Connect to the database
|
|
||||||
conn = sqlite3.connect(str(db_path))
|
|
||||||
cursor = conn.cursor()
|
|
||||||
|
|
||||||
# Query to get messages with chat and handle information
|
|
||||||
query = """
|
|
||||||
SELECT
|
|
||||||
m.ROWID as message_id,
|
|
||||||
m.text,
|
|
||||||
m.date,
|
|
||||||
m.is_from_me,
|
|
||||||
m.service,
|
|
||||||
c.chat_identifier,
|
|
||||||
c.display_name as chat_display_name,
|
|
||||||
h.id as handle_id,
|
|
||||||
c.ROWID as chat_id
|
|
||||||
FROM message m
|
|
||||||
LEFT JOIN chat_message_join cmj ON m.ROWID = cmj.message_id
|
|
||||||
LEFT JOIN chat c ON cmj.chat_id = c.ROWID
|
|
||||||
LEFT JOIN handle h ON m.handle_id = h.ROWID
|
|
||||||
WHERE m.text IS NOT NULL AND m.text != ''
|
|
||||||
ORDER BY c.ROWID, m.date
|
|
||||||
"""
|
|
||||||
|
|
||||||
cursor.execute(query)
|
|
||||||
rows = cursor.fetchall()
|
|
||||||
|
|
||||||
messages = []
|
|
||||||
for row in rows:
|
|
||||||
(
|
|
||||||
message_id,
|
|
||||||
text,
|
|
||||||
date,
|
|
||||||
is_from_me,
|
|
||||||
service,
|
|
||||||
chat_identifier,
|
|
||||||
chat_display_name,
|
|
||||||
handle_id,
|
|
||||||
chat_id,
|
|
||||||
) = row
|
|
||||||
|
|
||||||
message = {
|
|
||||||
"message_id": message_id,
|
|
||||||
"text": text,
|
|
||||||
"timestamp": self._convert_cocoa_timestamp(date),
|
|
||||||
"is_from_me": bool(is_from_me),
|
|
||||||
"service": service or "iMessage",
|
|
||||||
"chat_identifier": chat_identifier or "Unknown",
|
|
||||||
"chat_display_name": chat_display_name or "Unknown Chat",
|
|
||||||
"handle_id": handle_id or "Unknown",
|
|
||||||
"contact_name": self._get_contact_name(handle_id or ""),
|
|
||||||
"chat_id": chat_id,
|
|
||||||
}
|
|
||||||
messages.append(message)
|
|
||||||
|
|
||||||
conn.close()
|
|
||||||
print(f"Found {len(messages)} messages in database")
|
|
||||||
return messages
|
|
||||||
|
|
||||||
except sqlite3.Error as e:
|
|
||||||
print(f"Error reading iMessage database: {e}")
|
|
||||||
return []
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Unexpected error reading iMessage database: {e}")
|
|
||||||
return []
|
|
||||||
|
|
||||||
def _group_messages_by_chat(self, messages: list[dict]) -> dict[int, list[dict]]:
|
|
||||||
"""
|
|
||||||
Group messages by chat ID.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
messages: List of message dictionaries
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary mapping chat_id to list of messages
|
|
||||||
"""
|
|
||||||
chats = {}
|
|
||||||
for message in messages:
|
|
||||||
chat_id = message["chat_id"]
|
|
||||||
if chat_id not in chats:
|
|
||||||
chats[chat_id] = []
|
|
||||||
chats[chat_id].append(message)
|
|
||||||
|
|
||||||
return chats
|
|
||||||
|
|
||||||
def _create_concatenated_content(self, chat_id: int, messages: list[dict]) -> str:
|
|
||||||
"""
|
|
||||||
Create concatenated content from chat messages.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
chat_id: The chat ID
|
|
||||||
messages: List of messages in the chat
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Concatenated text content
|
|
||||||
"""
|
|
||||||
if not messages:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
# Get chat info from first message
|
|
||||||
first_msg = messages[0]
|
|
||||||
chat_name = first_msg["chat_display_name"]
|
|
||||||
chat_identifier = first_msg["chat_identifier"]
|
|
||||||
|
|
||||||
# Build message content
|
|
||||||
message_parts = []
|
|
||||||
for message in messages:
|
|
||||||
timestamp = message["timestamp"]
|
|
||||||
is_from_me = message["is_from_me"]
|
|
||||||
text = message["text"]
|
|
||||||
contact_name = message["contact_name"]
|
|
||||||
|
|
||||||
if is_from_me:
|
|
||||||
prefix = "[You]"
|
|
||||||
else:
|
|
||||||
prefix = f"[{contact_name}]"
|
|
||||||
|
|
||||||
if timestamp != "Unknown":
|
|
||||||
prefix += f" ({timestamp})"
|
|
||||||
|
|
||||||
message_parts.append(f"{prefix}: {text}")
|
|
||||||
|
|
||||||
concatenated_text = "\n\n".join(message_parts)
|
|
||||||
|
|
||||||
doc_content = f"""Chat: {chat_name}
|
|
||||||
Identifier: {chat_identifier}
|
|
||||||
Messages ({len(messages)} messages):
|
|
||||||
|
|
||||||
{concatenated_text}
|
|
||||||
"""
|
|
||||||
return doc_content
|
|
||||||
|
|
||||||
def _create_individual_content(self, message: dict) -> str:
|
|
||||||
"""
|
|
||||||
Create content for individual message.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
message: Message dictionary
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Formatted message content
|
|
||||||
"""
|
|
||||||
timestamp = message["timestamp"]
|
|
||||||
is_from_me = message["is_from_me"]
|
|
||||||
text = message["text"]
|
|
||||||
contact_name = message["contact_name"]
|
|
||||||
chat_name = message["chat_display_name"]
|
|
||||||
|
|
||||||
sender = "You" if is_from_me else contact_name
|
|
||||||
|
|
||||||
return f"""Message from {sender} in chat "{chat_name}"
|
|
||||||
Time: {timestamp}
|
|
||||||
Content: {text}
|
|
||||||
"""
|
|
||||||
|
|
||||||
def load_data(self, input_dir: str | None = None, **load_kwargs: Any) -> list[Document]:
|
|
||||||
"""
|
|
||||||
Load iMessage data and return as documents.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
input_dir: Optional path to directory containing chat.db file.
|
|
||||||
If not provided, uses default macOS location.
|
|
||||||
**load_kwargs: Additional arguments (unused)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of Document objects containing iMessage data
|
|
||||||
"""
|
|
||||||
docs = []
|
|
||||||
|
|
||||||
# Determine database path
|
|
||||||
if input_dir:
|
|
||||||
db_path = Path(input_dir) / "chat.db"
|
|
||||||
else:
|
|
||||||
db_path = self._get_default_chat_db_path()
|
|
||||||
|
|
||||||
print(f"Reading iMessage database from: {db_path}")
|
|
||||||
|
|
||||||
# Read messages from database
|
|
||||||
messages = self._read_messages_from_db(db_path)
|
|
||||||
if not messages:
|
|
||||||
return docs
|
|
||||||
|
|
||||||
if self.concatenate_conversations:
|
|
||||||
# Group messages by chat and create concatenated documents
|
|
||||||
chats = self._group_messages_by_chat(messages)
|
|
||||||
|
|
||||||
for chat_id, chat_messages in chats.items():
|
|
||||||
if not chat_messages:
|
|
||||||
continue
|
|
||||||
|
|
||||||
content = self._create_concatenated_content(chat_id, chat_messages)
|
|
||||||
|
|
||||||
# Create metadata
|
|
||||||
first_msg = chat_messages[0]
|
|
||||||
last_msg = chat_messages[-1]
|
|
||||||
|
|
||||||
metadata = {
|
|
||||||
"source": "iMessage",
|
|
||||||
"chat_id": chat_id,
|
|
||||||
"chat_name": first_msg["chat_display_name"],
|
|
||||||
"chat_identifier": first_msg["chat_identifier"],
|
|
||||||
"message_count": len(chat_messages),
|
|
||||||
"first_message_date": first_msg["timestamp"],
|
|
||||||
"last_message_date": last_msg["timestamp"],
|
|
||||||
"participants": list(
|
|
||||||
{msg["contact_name"] for msg in chat_messages if not msg["is_from_me"]}
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
doc = Document(text=content, metadata=metadata)
|
|
||||||
docs.append(doc)
|
|
||||||
|
|
||||||
else:
|
|
||||||
# Create individual documents for each message
|
|
||||||
for message in messages:
|
|
||||||
content = self._create_individual_content(message)
|
|
||||||
|
|
||||||
metadata = {
|
|
||||||
"source": "iMessage",
|
|
||||||
"message_id": message["message_id"],
|
|
||||||
"chat_id": message["chat_id"],
|
|
||||||
"chat_name": message["chat_display_name"],
|
|
||||||
"chat_identifier": message["chat_identifier"],
|
|
||||||
"timestamp": message["timestamp"],
|
|
||||||
"is_from_me": message["is_from_me"],
|
|
||||||
"contact_name": message["contact_name"],
|
|
||||||
"service": message["service"],
|
|
||||||
}
|
|
||||||
|
|
||||||
doc = Document(text=content, metadata=metadata)
|
|
||||||
docs.append(doc)
|
|
||||||
|
|
||||||
print(f"Created {len(docs)} documents from iMessage data")
|
|
||||||
return docs
|
|
||||||
@@ -1,125 +0,0 @@
|
|||||||
"""
|
|
||||||
iMessage RAG Example.
|
|
||||||
|
|
||||||
This example demonstrates how to build a RAG system on your iMessage conversation history.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from leann.chunking_utils import create_text_chunks
|
|
||||||
|
|
||||||
from apps.base_rag_example import BaseRAGExample
|
|
||||||
from apps.imessage_data.imessage_reader import IMessageReader
|
|
||||||
|
|
||||||
|
|
||||||
class IMessageRAG(BaseRAGExample):
|
|
||||||
"""RAG example for iMessage conversation history."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__(
|
|
||||||
name="iMessage",
|
|
||||||
description="RAG on your iMessage conversation history",
|
|
||||||
default_index_name="imessage_index",
|
|
||||||
)
|
|
||||||
|
|
||||||
def _add_specific_arguments(self, parser):
|
|
||||||
"""Add iMessage-specific arguments."""
|
|
||||||
imessage_group = parser.add_argument_group("iMessage Parameters")
|
|
||||||
imessage_group.add_argument(
|
|
||||||
"--db-path",
|
|
||||||
type=str,
|
|
||||||
default=None,
|
|
||||||
help="Path to iMessage chat.db file (default: ~/Library/Messages/chat.db)",
|
|
||||||
)
|
|
||||||
imessage_group.add_argument(
|
|
||||||
"--concatenate-conversations",
|
|
||||||
action="store_true",
|
|
||||||
default=True,
|
|
||||||
help="Concatenate messages within conversations for better context (default: True)",
|
|
||||||
)
|
|
||||||
imessage_group.add_argument(
|
|
||||||
"--no-concatenate-conversations",
|
|
||||||
action="store_true",
|
|
||||||
help="Process each message individually instead of concatenating by conversation",
|
|
||||||
)
|
|
||||||
imessage_group.add_argument(
|
|
||||||
"--chunk-size",
|
|
||||||
type=int,
|
|
||||||
default=1000,
|
|
||||||
help="Maximum characters per text chunk (default: 1000)",
|
|
||||||
)
|
|
||||||
imessage_group.add_argument(
|
|
||||||
"--chunk-overlap",
|
|
||||||
type=int,
|
|
||||||
default=200,
|
|
||||||
help="Overlap between text chunks (default: 200)",
|
|
||||||
)
|
|
||||||
|
|
||||||
async def load_data(self, args) -> list[str]:
|
|
||||||
"""Load iMessage history and convert to text chunks."""
|
|
||||||
print("Loading iMessage conversation history...")
|
|
||||||
|
|
||||||
# Determine concatenation setting
|
|
||||||
concatenate = args.concatenate_conversations and not args.no_concatenate_conversations
|
|
||||||
|
|
||||||
# Initialize iMessage reader
|
|
||||||
reader = IMessageReader(concatenate_conversations=concatenate)
|
|
||||||
|
|
||||||
# Load documents
|
|
||||||
try:
|
|
||||||
if args.db_path:
|
|
||||||
# Use custom database path
|
|
||||||
db_dir = str(Path(args.db_path).parent)
|
|
||||||
documents = reader.load_data(input_dir=db_dir)
|
|
||||||
else:
|
|
||||||
# Use default macOS location
|
|
||||||
documents = reader.load_data()
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error loading iMessage data: {e}")
|
|
||||||
print("\nTroubleshooting tips:")
|
|
||||||
print("1. Make sure you have granted Full Disk Access to your terminal/IDE")
|
|
||||||
print("2. Check that the iMessage database exists at ~/Library/Messages/chat.db")
|
|
||||||
print("3. Try specifying a custom path with --db-path if you have a backup")
|
|
||||||
return []
|
|
||||||
|
|
||||||
if not documents:
|
|
||||||
print("No iMessage conversations found!")
|
|
||||||
return []
|
|
||||||
|
|
||||||
print(f"Loaded {len(documents)} iMessage documents")
|
|
||||||
|
|
||||||
# Show some statistics
|
|
||||||
total_messages = sum(doc.metadata.get("message_count", 1) for doc in documents)
|
|
||||||
print(f"Total messages: {total_messages}")
|
|
||||||
|
|
||||||
if concatenate:
|
|
||||||
# Show chat statistics
|
|
||||||
chat_names = [doc.metadata.get("chat_name", "Unknown") for doc in documents]
|
|
||||||
unique_chats = len(set(chat_names))
|
|
||||||
print(f"Unique conversations: {unique_chats}")
|
|
||||||
|
|
||||||
# Convert to text chunks
|
|
||||||
all_texts = create_text_chunks(
|
|
||||||
documents,
|
|
||||||
chunk_size=args.chunk_size,
|
|
||||||
chunk_overlap=args.chunk_overlap,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Apply max_items limit if specified
|
|
||||||
if args.max_items > 0:
|
|
||||||
all_texts = all_texts[: args.max_items]
|
|
||||||
print(f"Limited to {len(all_texts)} text chunks (max_items={args.max_items})")
|
|
||||||
|
|
||||||
return all_texts
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
"""Main entry point."""
|
|
||||||
app = IMessageRAG()
|
|
||||||
await app.run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
||||||
@@ -26,21 +26,6 @@ leann build my-code-index --docs ./src --use-ast-chunking
|
|||||||
uv pip install -e "."
|
uv pip install -e "."
|
||||||
```
|
```
|
||||||
|
|
||||||
#### For normal users (PyPI install)
|
|
||||||
- Use `pip install leann` or `uv pip install leann`.
|
|
||||||
- `astchunk` is pulled automatically from PyPI as a dependency; no extra steps.
|
|
||||||
|
|
||||||
#### For developers (from source, editable)
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/yichuan-w/LEANN.git leann
|
|
||||||
cd leann
|
|
||||||
git submodule update --init --recursive
|
|
||||||
uv sync
|
|
||||||
```
|
|
||||||
- This repo vendors `astchunk` as a git submodule at `packages/astchunk-leann` (our fork).
|
|
||||||
- `[tool.uv.sources]` maps the `astchunk` package to that path in editable mode.
|
|
||||||
- You can edit code under `packages/astchunk-leann` and Python will use your changes immediately (no separate `pip install astchunk` needed).
|
|
||||||
|
|
||||||
## Best Practices
|
## Best Practices
|
||||||
|
|
||||||
### When to Use AST Chunking
|
### When to Use AST Chunking
|
||||||
|
|||||||
@@ -1,149 +0,0 @@
|
|||||||
# LEANN Grep Search Usage Guide
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
LEANN's grep search functionality provides exact text matching for finding specific code patterns, error messages, function names, or exact phrases in your indexed documents.
|
|
||||||
|
|
||||||
## Basic Usage
|
|
||||||
|
|
||||||
### Simple Grep Search
|
|
||||||
|
|
||||||
```python
|
|
||||||
from leann.api import LeannSearcher
|
|
||||||
|
|
||||||
searcher = LeannSearcher("your_index_path")
|
|
||||||
|
|
||||||
# Exact text search
|
|
||||||
results = searcher.search("def authenticate_user", use_grep=True, top_k=5)
|
|
||||||
|
|
||||||
for result in results:
|
|
||||||
print(f"Score: {result.score}")
|
|
||||||
print(f"Text: {result.text[:100]}...")
|
|
||||||
print("-" * 40)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Comparison: Semantic vs Grep Search
|
|
||||||
|
|
||||||
```python
|
|
||||||
# Semantic search - finds conceptually similar content
|
|
||||||
semantic_results = searcher.search("machine learning algorithms", top_k=3)
|
|
||||||
|
|
||||||
# Grep search - finds exact text matches
|
|
||||||
grep_results = searcher.search("def train_model", use_grep=True, top_k=3)
|
|
||||||
```
|
|
||||||
|
|
||||||
## When to Use Grep Search
|
|
||||||
|
|
||||||
### Use Cases
|
|
||||||
|
|
||||||
- **Code Search**: Finding specific function definitions, class names, or variable references
|
|
||||||
- **Error Debugging**: Locating exact error messages or stack traces
|
|
||||||
- **Documentation**: Finding specific API endpoints or exact terminology
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
|
|
||||||
```python
|
|
||||||
# Find function definitions
|
|
||||||
functions = searcher.search("def __init__", use_grep=True)
|
|
||||||
|
|
||||||
# Find import statements
|
|
||||||
imports = searcher.search("from sklearn import", use_grep=True)
|
|
||||||
|
|
||||||
# Find specific error types
|
|
||||||
errors = searcher.search("FileNotFoundError", use_grep=True)
|
|
||||||
|
|
||||||
# Find TODO comments
|
|
||||||
todos = searcher.search("TODO:", use_grep=True)
|
|
||||||
|
|
||||||
# Find configuration entries
|
|
||||||
configs = searcher.search("server_port=", use_grep=True)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Technical Details
|
|
||||||
|
|
||||||
### How It Works
|
|
||||||
|
|
||||||
1. **File Location**: Grep search operates on the raw text stored in `.jsonl` files
|
|
||||||
2. **Command Execution**: Uses the system `grep` command with case-insensitive search
|
|
||||||
3. **Result Processing**: Parses JSON lines and extracts text and metadata
|
|
||||||
4. **Scoring**: Simple frequency-based scoring based on query term occurrences
|
|
||||||
|
|
||||||
### Search Process
|
|
||||||
|
|
||||||
```
|
|
||||||
Query: "def train_model"
|
|
||||||
↓
|
|
||||||
grep -i -n "def train_model" documents.leann.passages.jsonl
|
|
||||||
↓
|
|
||||||
Parse matching JSON lines
|
|
||||||
↓
|
|
||||||
Calculate scores based on term frequency
|
|
||||||
↓
|
|
||||||
Return top_k results
|
|
||||||
```
|
|
||||||
|
|
||||||
### Scoring Algorithm
|
|
||||||
|
|
||||||
```python
|
|
||||||
# Term frequency in document
|
|
||||||
score = text.lower().count(query.lower())
|
|
||||||
```
|
|
||||||
|
|
||||||
Results are ranked by score (highest first), with higher scores indicating more occurrences of the search term.
|
|
||||||
|
|
||||||
## Error Handling
|
|
||||||
|
|
||||||
### Common Issues
|
|
||||||
|
|
||||||
#### Grep Command Not Found
|
|
||||||
```
|
|
||||||
RuntimeError: grep command not found. Please install grep or use semantic search.
|
|
||||||
```
|
|
||||||
|
|
||||||
**Solution**: Install grep on your system:
|
|
||||||
- **Ubuntu/Debian**: `sudo apt-get install grep`
|
|
||||||
- **macOS**: grep is pre-installed
|
|
||||||
- **Windows**: Use WSL or install grep via Git Bash/MSYS2
|
|
||||||
|
|
||||||
#### No Results Found
|
|
||||||
```python
|
|
||||||
# Check if your query exists in the raw data
|
|
||||||
results = searcher.search("your_query", use_grep=True)
|
|
||||||
if not results:
|
|
||||||
print("No exact matches found. Try:")
|
|
||||||
print("1. Check spelling and case")
|
|
||||||
print("2. Use partial terms")
|
|
||||||
print("3. Switch to semantic search")
|
|
||||||
```
|
|
||||||
|
|
||||||
## Complete Example
|
|
||||||
|
|
||||||
```python
|
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
Grep Search Example
|
|
||||||
Demonstrates grep search for exact text matching.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from leann.api import LeannSearcher
|
|
||||||
|
|
||||||
def demonstrate_grep_search():
|
|
||||||
# Initialize searcher
|
|
||||||
searcher = LeannSearcher("my_index")
|
|
||||||
|
|
||||||
print("=== Function Search ===")
|
|
||||||
functions = searcher.search("def __init__", use_grep=True, top_k=5)
|
|
||||||
for i, result in enumerate(functions, 1):
|
|
||||||
print(f"{i}. Score: {result.score}")
|
|
||||||
print(f" Preview: {result.text[:60]}...")
|
|
||||||
print()
|
|
||||||
|
|
||||||
print("=== Error Search ===")
|
|
||||||
errors = searcher.search("FileNotFoundError", use_grep=True, top_k=3)
|
|
||||||
for result in errors:
|
|
||||||
print(f"Content: {result.text.strip()}")
|
|
||||||
print("-" * 40)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
demonstrate_grep_search()
|
|
||||||
```
|
|
||||||
@@ -1,404 +0,0 @@
|
|||||||
"""Dynamic HNSW update demo without compact storage.
|
|
||||||
|
|
||||||
This script reproduces the minimal scenario we used while debugging on-the-fly
|
|
||||||
recompute:
|
|
||||||
|
|
||||||
1. Build a non-compact HNSW index from the first few paragraphs of a text file.
|
|
||||||
2. Print the top results with `recompute_embeddings=True`.
|
|
||||||
3. Append additional paragraphs with :meth:`LeannBuilder.update_index`.
|
|
||||||
4. Run the same query again to show the newly inserted passages.
|
|
||||||
|
|
||||||
Run it with ``uv`` (optionally pointing LEANN_HNSW_LOG_PATH at a file to inspect
|
|
||||||
ZMQ activity)::
|
|
||||||
|
|
||||||
LEANN_HNSW_LOG_PATH=embedding_fetch.log \
|
|
||||||
uv run -m examples.dynamic_update_no_recompute \
|
|
||||||
--index-path .leann/examples/leann-demo.leann
|
|
||||||
|
|
||||||
By default the script builds an index from ``data/2501.14312v1 (1).pdf`` and
|
|
||||||
then updates it with LEANN-related material from ``data/2506.08276v1.pdf``.
|
|
||||||
It issues the query "What's LEANN?" before and after the update to show how the
|
|
||||||
new passages become immediately searchable. The script uses the
|
|
||||||
``sentence-transformers/all-MiniLM-L6-v2`` model with ``is_recompute=True`` so
|
|
||||||
Faiss pulls existing vectors on demand via the ZMQ embedding server, while
|
|
||||||
freshly added passages are embedded locally just like the initial build.
|
|
||||||
|
|
||||||
To make storage comparisons easy, the script can also build a matching
|
|
||||||
``is_recompute=False`` baseline (enabled by default) and report the index size
|
|
||||||
delta after the update. Disable the baseline run with
|
|
||||||
``--skip-compare-no-recompute`` if you only need the recompute flow.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import json
|
|
||||||
from collections.abc import Iterable
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
from leann.api import LeannBuilder, LeannSearcher
|
|
||||||
from leann.registry import register_project_directory
|
|
||||||
|
|
||||||
from apps.chunking import create_text_chunks
|
|
||||||
|
|
||||||
REPO_ROOT = Path(__file__).resolve().parents[1]
|
|
||||||
|
|
||||||
DEFAULT_QUERY = "What's LEANN?"
|
|
||||||
DEFAULT_INITIAL_FILES = [REPO_ROOT / "data" / "2501.14312v1 (1).pdf"]
|
|
||||||
DEFAULT_UPDATE_FILES = [REPO_ROOT / "data" / "2506.08276v1.pdf"]
|
|
||||||
|
|
||||||
|
|
||||||
def load_chunks_from_files(paths: list[Path]) -> list[str]:
|
|
||||||
from llama_index.core import SimpleDirectoryReader
|
|
||||||
|
|
||||||
documents = []
|
|
||||||
for path in paths:
|
|
||||||
p = path.expanduser().resolve()
|
|
||||||
if not p.exists():
|
|
||||||
raise FileNotFoundError(f"Input path not found: {p}")
|
|
||||||
if p.is_dir():
|
|
||||||
reader = SimpleDirectoryReader(str(p), recursive=False)
|
|
||||||
documents.extend(reader.load_data(show_progress=True))
|
|
||||||
else:
|
|
||||||
reader = SimpleDirectoryReader(input_files=[str(p)])
|
|
||||||
documents.extend(reader.load_data(show_progress=True))
|
|
||||||
|
|
||||||
if not documents:
|
|
||||||
return []
|
|
||||||
|
|
||||||
chunks = create_text_chunks(
|
|
||||||
documents,
|
|
||||||
chunk_size=512,
|
|
||||||
chunk_overlap=128,
|
|
||||||
use_ast_chunking=False,
|
|
||||||
)
|
|
||||||
return [c for c in chunks if isinstance(c, str) and c.strip()]
|
|
||||||
|
|
||||||
|
|
||||||
def run_search(index_path: Path, query: str, top_k: int, *, recompute_embeddings: bool) -> list:
|
|
||||||
searcher = LeannSearcher(str(index_path))
|
|
||||||
try:
|
|
||||||
return searcher.search(
|
|
||||||
query=query,
|
|
||||||
top_k=top_k,
|
|
||||||
recompute_embeddings=recompute_embeddings,
|
|
||||||
batch_size=16,
|
|
||||||
)
|
|
||||||
finally:
|
|
||||||
searcher.cleanup()
|
|
||||||
|
|
||||||
|
|
||||||
def print_results(title: str, results: Iterable) -> None:
|
|
||||||
print(f"\n=== {title} ===")
|
|
||||||
res_list = list(results)
|
|
||||||
print(f"results count: {len(res_list)}")
|
|
||||||
print("passages:")
|
|
||||||
if not res_list:
|
|
||||||
print(" (no passages returned)")
|
|
||||||
for res in res_list:
|
|
||||||
snippet = res.text.replace("\n", " ")[:120]
|
|
||||||
print(f" - {res.id}: {snippet}... (score={res.score:.4f})")
|
|
||||||
|
|
||||||
|
|
||||||
def build_initial_index(
|
|
||||||
index_path: Path,
|
|
||||||
paragraphs: list[str],
|
|
||||||
model_name: str,
|
|
||||||
embedding_mode: str,
|
|
||||||
is_recompute: bool,
|
|
||||||
) -> None:
|
|
||||||
builder = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model=model_name,
|
|
||||||
embedding_mode=embedding_mode,
|
|
||||||
is_compact=False,
|
|
||||||
is_recompute=is_recompute,
|
|
||||||
)
|
|
||||||
for idx, passage in enumerate(paragraphs):
|
|
||||||
builder.add_text(passage, metadata={"id": str(idx)})
|
|
||||||
builder.build_index(str(index_path))
|
|
||||||
|
|
||||||
|
|
||||||
def update_index(
|
|
||||||
index_path: Path,
|
|
||||||
start_id: int,
|
|
||||||
paragraphs: list[str],
|
|
||||||
model_name: str,
|
|
||||||
embedding_mode: str,
|
|
||||||
is_recompute: bool,
|
|
||||||
) -> None:
|
|
||||||
updater = LeannBuilder(
|
|
||||||
backend_name="hnsw",
|
|
||||||
embedding_model=model_name,
|
|
||||||
embedding_mode=embedding_mode,
|
|
||||||
is_compact=False,
|
|
||||||
is_recompute=is_recompute,
|
|
||||||
)
|
|
||||||
for offset, passage in enumerate(paragraphs, start=start_id):
|
|
||||||
updater.add_text(passage, metadata={"id": str(offset)})
|
|
||||||
updater.update_index(str(index_path))
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_index_dir(index_path: Path) -> None:
|
|
||||||
index_path.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup_index_files(index_path: Path) -> None:
|
|
||||||
"""Remove leftover index artifacts for a clean rebuild."""
|
|
||||||
|
|
||||||
parent = index_path.parent
|
|
||||||
if not parent.exists():
|
|
||||||
return
|
|
||||||
stem = index_path.stem
|
|
||||||
for file in parent.glob(f"{stem}*"):
|
|
||||||
if file.is_file():
|
|
||||||
file.unlink()
|
|
||||||
|
|
||||||
|
|
||||||
def index_file_size(index_path: Path) -> int:
|
|
||||||
"""Return the size of the primary .index file for the given index path."""
|
|
||||||
|
|
||||||
index_file = index_path.parent / f"{index_path.stem}.index"
|
|
||||||
return index_file.stat().st_size if index_file.exists() else 0
|
|
||||||
|
|
||||||
|
|
||||||
def load_metadata_snapshot(index_path: Path) -> dict[str, Any] | None:
|
|
||||||
meta_path = index_path.parent / f"{index_path.name}.meta.json"
|
|
||||||
if not meta_path.exists():
|
|
||||||
return None
|
|
||||||
try:
|
|
||||||
return json.loads(meta_path.read_text())
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def run_workflow(
|
|
||||||
*,
|
|
||||||
label: str,
|
|
||||||
index_path: Path,
|
|
||||||
initial_paragraphs: list[str],
|
|
||||||
update_paragraphs: list[str],
|
|
||||||
model_name: str,
|
|
||||||
embedding_mode: str,
|
|
||||||
is_recompute: bool,
|
|
||||||
query: str,
|
|
||||||
top_k: int,
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
prefix = f"[{label}] " if label else ""
|
|
||||||
|
|
||||||
ensure_index_dir(index_path)
|
|
||||||
cleanup_index_files(index_path)
|
|
||||||
|
|
||||||
print(f"{prefix}Building initial index...")
|
|
||||||
build_initial_index(
|
|
||||||
index_path,
|
|
||||||
initial_paragraphs,
|
|
||||||
model_name,
|
|
||||||
embedding_mode,
|
|
||||||
is_recompute=is_recompute,
|
|
||||||
)
|
|
||||||
|
|
||||||
initial_size = index_file_size(index_path)
|
|
||||||
before_results = run_search(
|
|
||||||
index_path,
|
|
||||||
query,
|
|
||||||
top_k,
|
|
||||||
recompute_embeddings=is_recompute,
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"\n{prefix}Updating index with additional passages...")
|
|
||||||
update_index(
|
|
||||||
index_path,
|
|
||||||
start_id=len(initial_paragraphs),
|
|
||||||
paragraphs=update_paragraphs,
|
|
||||||
model_name=model_name,
|
|
||||||
embedding_mode=embedding_mode,
|
|
||||||
is_recompute=is_recompute,
|
|
||||||
)
|
|
||||||
|
|
||||||
after_results = run_search(
|
|
||||||
index_path,
|
|
||||||
query,
|
|
||||||
top_k,
|
|
||||||
recompute_embeddings=is_recompute,
|
|
||||||
)
|
|
||||||
updated_size = index_file_size(index_path)
|
|
||||||
|
|
||||||
return {
|
|
||||||
"initial_size": initial_size,
|
|
||||||
"updated_size": updated_size,
|
|
||||||
"delta": updated_size - initial_size,
|
|
||||||
"before_results": before_results,
|
|
||||||
"after_results": after_results,
|
|
||||||
"metadata": load_metadata_snapshot(index_path),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
|
||||||
parser = argparse.ArgumentParser(description=__doc__)
|
|
||||||
parser.add_argument(
|
|
||||||
"--initial-files",
|
|
||||||
type=Path,
|
|
||||||
nargs="+",
|
|
||||||
default=DEFAULT_INITIAL_FILES,
|
|
||||||
help="Initial document files (PDF/TXT) used to build the base index",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--index-path",
|
|
||||||
type=Path,
|
|
||||||
default=Path(".leann/examples/leann-demo.leann"),
|
|
||||||
help="Destination index path (default: .leann/examples/leann-demo.leann)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--initial-count",
|
|
||||||
type=int,
|
|
||||||
default=8,
|
|
||||||
help="Number of chunks to use from the initial documents (default: 8)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--update-files",
|
|
||||||
type=Path,
|
|
||||||
nargs="*",
|
|
||||||
default=DEFAULT_UPDATE_FILES,
|
|
||||||
help="Additional documents to add during update (PDF/TXT)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--update-count",
|
|
||||||
type=int,
|
|
||||||
default=4,
|
|
||||||
help="Number of chunks to append from update documents (default: 4)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--update-text",
|
|
||||||
type=str,
|
|
||||||
default=(
|
|
||||||
"LEANN (Lightweight Embedding ANN) is an indexing toolkit focused on "
|
|
||||||
"recompute-aware HNSW graphs, allowing embeddings to be regenerated "
|
|
||||||
"on demand to keep disk usage minimal."
|
|
||||||
),
|
|
||||||
help="Fallback text to append if --update-files is omitted",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--top-k",
|
|
||||||
type=int,
|
|
||||||
default=4,
|
|
||||||
help="Number of results to show for each search (default: 4)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--query",
|
|
||||||
type=str,
|
|
||||||
default=DEFAULT_QUERY,
|
|
||||||
help="Query to run before/after the update",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--embedding-model",
|
|
||||||
type=str,
|
|
||||||
default="sentence-transformers/all-MiniLM-L6-v2",
|
|
||||||
help="Embedding model name",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--embedding-mode",
|
|
||||||
type=str,
|
|
||||||
default="sentence-transformers",
|
|
||||||
choices=["sentence-transformers", "openai", "mlx", "ollama"],
|
|
||||||
help="Embedding backend mode",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--compare-no-recompute",
|
|
||||||
dest="compare_no_recompute",
|
|
||||||
action="store_true",
|
|
||||||
help="Also run a baseline with is_recompute=False and report its index growth.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--skip-compare-no-recompute",
|
|
||||||
dest="compare_no_recompute",
|
|
||||||
action="store_false",
|
|
||||||
help="Skip building the no-recompute baseline.",
|
|
||||||
)
|
|
||||||
parser.set_defaults(compare_no_recompute=True)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
ensure_index_dir(args.index_path)
|
|
||||||
register_project_directory(REPO_ROOT)
|
|
||||||
|
|
||||||
initial_chunks = load_chunks_from_files(list(args.initial_files))
|
|
||||||
if not initial_chunks:
|
|
||||||
raise ValueError("No text chunks extracted from the initial files.")
|
|
||||||
|
|
||||||
initial = initial_chunks[: args.initial_count]
|
|
||||||
if not initial:
|
|
||||||
raise ValueError("Initial chunk set is empty after applying --initial-count.")
|
|
||||||
|
|
||||||
if args.update_files:
|
|
||||||
update_chunks = load_chunks_from_files(list(args.update_files))
|
|
||||||
if not update_chunks:
|
|
||||||
raise ValueError("No text chunks extracted from the update files.")
|
|
||||||
to_add = update_chunks[: args.update_count]
|
|
||||||
else:
|
|
||||||
if not args.update_text:
|
|
||||||
raise ValueError("Provide --update-files or --update-text for the update step.")
|
|
||||||
to_add = [args.update_text]
|
|
||||||
if not to_add:
|
|
||||||
raise ValueError("Update chunk set is empty after applying --update-count.")
|
|
||||||
|
|
||||||
recompute_stats = run_workflow(
|
|
||||||
label="recompute",
|
|
||||||
index_path=args.index_path,
|
|
||||||
initial_paragraphs=initial,
|
|
||||||
update_paragraphs=to_add,
|
|
||||||
model_name=args.embedding_model,
|
|
||||||
embedding_mode=args.embedding_mode,
|
|
||||||
is_recompute=True,
|
|
||||||
query=args.query,
|
|
||||||
top_k=args.top_k,
|
|
||||||
)
|
|
||||||
|
|
||||||
print_results("initial search", recompute_stats["before_results"])
|
|
||||||
print_results("after update", recompute_stats["after_results"])
|
|
||||||
print(
|
|
||||||
f"\n[recompute] Index file size change: {recompute_stats['initial_size']} -> {recompute_stats['updated_size']} bytes"
|
|
||||||
f" (Δ {recompute_stats['delta']})"
|
|
||||||
)
|
|
||||||
|
|
||||||
if recompute_stats["metadata"]:
|
|
||||||
meta_view = {k: recompute_stats["metadata"].get(k) for k in ("is_compact", "is_pruned")}
|
|
||||||
print("[recompute] metadata snapshot:")
|
|
||||||
print(json.dumps(meta_view, indent=2))
|
|
||||||
|
|
||||||
if args.compare_no_recompute:
|
|
||||||
baseline_path = (
|
|
||||||
args.index_path.parent / f"{args.index_path.stem}-norecompute{args.index_path.suffix}"
|
|
||||||
)
|
|
||||||
baseline_stats = run_workflow(
|
|
||||||
label="no-recompute",
|
|
||||||
index_path=baseline_path,
|
|
||||||
initial_paragraphs=initial,
|
|
||||||
update_paragraphs=to_add,
|
|
||||||
model_name=args.embedding_model,
|
|
||||||
embedding_mode=args.embedding_mode,
|
|
||||||
is_recompute=False,
|
|
||||||
query=args.query,
|
|
||||||
top_k=args.top_k,
|
|
||||||
)
|
|
||||||
|
|
||||||
print(
|
|
||||||
f"\n[no-recompute] Index file size change: {baseline_stats['initial_size']} -> {baseline_stats['updated_size']} bytes"
|
|
||||||
f" (Δ {baseline_stats['delta']})"
|
|
||||||
)
|
|
||||||
|
|
||||||
after_texts = [res.text for res in recompute_stats["after_results"]]
|
|
||||||
baseline_after_texts = [res.text for res in baseline_stats["after_results"]]
|
|
||||||
if after_texts == baseline_after_texts:
|
|
||||||
print(
|
|
||||||
"[no-recompute] Search results match recompute baseline; see above for the shared output."
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
print("[no-recompute] WARNING: search results differ from recompute baseline.")
|
|
||||||
|
|
||||||
if baseline_stats["metadata"]:
|
|
||||||
meta_view = {k: baseline_stats["metadata"].get(k) for k in ("is_compact", "is_pruned")}
|
|
||||||
print("[no-recompute] metadata snapshot:")
|
|
||||||
print(json.dumps(meta_view, indent=2))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
"""
|
|
||||||
Grep Search Example
|
|
||||||
|
|
||||||
Shows how to use grep-based text search instead of semantic search.
|
|
||||||
Useful when you need exact text matches rather than meaning-based results.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from leann import LeannSearcher
|
|
||||||
|
|
||||||
# Load your index
|
|
||||||
searcher = LeannSearcher("my-documents.leann")
|
|
||||||
|
|
||||||
# Regular semantic search
|
|
||||||
print("=== Semantic Search ===")
|
|
||||||
results = searcher.search("machine learning algorithms", top_k=3)
|
|
||||||
for result in results:
|
|
||||||
print(f"Score: {result.score:.3f}")
|
|
||||||
print(f"Text: {result.text[:80]}...")
|
|
||||||
print()
|
|
||||||
|
|
||||||
# Grep-based search for exact text matches
|
|
||||||
print("=== Grep Search ===")
|
|
||||||
results = searcher.search("def train_model", top_k=3, use_grep=True)
|
|
||||||
for result in results:
|
|
||||||
print(f"Score: {result.score}")
|
|
||||||
print(f"Text: {result.text[:80]}...")
|
|
||||||
print()
|
|
||||||
|
|
||||||
# Find specific error messages
|
|
||||||
error_results = searcher.search("FileNotFoundError", use_grep=True)
|
|
||||||
print(f"Found {len(error_results)} files mentioning FileNotFoundError")
|
|
||||||
|
|
||||||
# Search for function definitions
|
|
||||||
func_results = searcher.search("class SearchResult", use_grep=True, top_k=5)
|
|
||||||
print(f"Found {len(func_results)} class definitions")
|
|
||||||
28
llms.txt
28
llms.txt
@@ -1,28 +0,0 @@
|
|||||||
# llms.txt — LEANN MCP and Agent Integration
|
|
||||||
product: LEANN
|
|
||||||
homepage: https://github.com/yichuan-w/LEANN
|
|
||||||
contact: https://github.com/yichuan-w/LEANN/issues
|
|
||||||
|
|
||||||
# Installation
|
|
||||||
install: uv tool install leann-core --with leann
|
|
||||||
|
|
||||||
# MCP Server Entry Point
|
|
||||||
mcp.server: leann_mcp
|
|
||||||
mcp.protocol_version: 2024-11-05
|
|
||||||
|
|
||||||
# Tools
|
|
||||||
mcp.tools: leann_list, leann_search
|
|
||||||
|
|
||||||
mcp.tool.leann_list.description: List available LEANN indexes
|
|
||||||
mcp.tool.leann_list.input: {}
|
|
||||||
|
|
||||||
mcp.tool.leann_search.description: Semantic search across a named LEANN index
|
|
||||||
mcp.tool.leann_search.input.index_name: string, required
|
|
||||||
mcp.tool.leann_search.input.query: string, required
|
|
||||||
mcp.tool.leann_search.input.top_k: integer, optional, default=5, min=1, max=20
|
|
||||||
mcp.tool.leann_search.input.complexity: integer, optional, default=32, min=16, max=128
|
|
||||||
|
|
||||||
# Notes
|
|
||||||
note: Build indexes with `leann build <name> --docs <files...>` before searching.
|
|
||||||
example.add: claude mcp add --scope user leann-server -- leann_mcp
|
|
||||||
example.verify: claude mcp list | cat
|
|
||||||
Submodule packages/astchunk-leann deleted from ad9afa07b9
@@ -1,11 +1,11 @@
|
|||||||
[build-system]
|
[build-system]
|
||||||
requires = ["scikit-build-core>=0.10", "pybind11>=2.12.0", "numpy", "cmake>=3.30"]
|
requires = ["scikit-build-core>=0.10", "pybind11>=2.12.0", "numpy"]
|
||||||
build-backend = "scikit_build_core.build"
|
build-backend = "scikit_build_core.build"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "leann-backend-diskann"
|
name = "leann-backend-diskann"
|
||||||
version = "0.3.4"
|
version = "0.3.2"
|
||||||
dependencies = ["leann-core==0.3.4", "numpy", "protobuf>=3.19.0"]
|
dependencies = ["leann-core==0.3.2", "numpy", "protobuf>=3.19.0"]
|
||||||
|
|
||||||
[tool.scikit-build]
|
[tool.scikit-build]
|
||||||
# Key: simplified CMake path
|
# Key: simplified CMake path
|
||||||
|
|||||||
Submodule packages/leann-backend-diskann/third_party/DiskANN updated: 19f9603c72...c593831474
@@ -49,28 +49,9 @@ set(BUILD_TESTING OFF CACHE BOOL "" FORCE)
|
|||||||
set(FAISS_ENABLE_C_API OFF CACHE BOOL "" FORCE)
|
set(FAISS_ENABLE_C_API OFF CACHE BOOL "" FORCE)
|
||||||
set(FAISS_OPT_LEVEL "generic" CACHE STRING "" FORCE)
|
set(FAISS_OPT_LEVEL "generic" CACHE STRING "" FORCE)
|
||||||
|
|
||||||
# Disable x86-specific SIMD optimizations (important for ARM64 compatibility)
|
# Disable additional SIMD versions to speed up compilation
|
||||||
set(FAISS_ENABLE_AVX2 OFF CACHE BOOL "" FORCE)
|
set(FAISS_ENABLE_AVX2 OFF CACHE BOOL "" FORCE)
|
||||||
set(FAISS_ENABLE_AVX512 OFF CACHE BOOL "" FORCE)
|
set(FAISS_ENABLE_AVX512 OFF CACHE BOOL "" FORCE)
|
||||||
set(FAISS_ENABLE_SSE4_1 OFF CACHE BOOL "" FORCE)
|
|
||||||
|
|
||||||
# ARM64-specific configuration
|
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|arm64")
|
|
||||||
message(STATUS "Configuring Faiss for ARM64 architecture")
|
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
|
||||||
# Use SVE optimization level for ARM64 Linux (as seen in Faiss conda build)
|
|
||||||
set(FAISS_OPT_LEVEL "sve" CACHE STRING "" FORCE)
|
|
||||||
message(STATUS "Setting FAISS_OPT_LEVEL to 'sve' for ARM64 Linux")
|
|
||||||
else()
|
|
||||||
# Use generic optimization for other ARM64 platforms (like macOS)
|
|
||||||
set(FAISS_OPT_LEVEL "generic" CACHE STRING "" FORCE)
|
|
||||||
message(STATUS "Setting FAISS_OPT_LEVEL to 'generic' for ARM64 ${CMAKE_SYSTEM_NAME}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# ARM64 compatibility: Faiss submodule has been modified to fix x86 header inclusion
|
|
||||||
message(STATUS "Using ARM64-compatible Faiss submodule")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Additional optimization options from INSTALL.md
|
# Additional optimization options from INSTALL.md
|
||||||
set(CMAKE_BUILD_TYPE "Release" CACHE STRING "" FORCE)
|
set(CMAKE_BUILD_TYPE "Release" CACHE STRING "" FORCE)
|
||||||
|
|||||||
@@ -5,8 +5,6 @@ import os
|
|||||||
import struct
|
import struct
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
from dataclasses import dataclass
|
|
||||||
from typing import Any, Optional
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
@@ -239,288 +237,6 @@ def write_compact_format(
|
|||||||
f_out.write(storage_data)
|
f_out.write(storage_data)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class HNSWComponents:
|
|
||||||
original_hnsw_data: dict[str, Any]
|
|
||||||
assign_probas_np: np.ndarray
|
|
||||||
cum_nneighbor_per_level_np: np.ndarray
|
|
||||||
levels_np: np.ndarray
|
|
||||||
is_compact: bool
|
|
||||||
compact_level_ptr: Optional[np.ndarray] = None
|
|
||||||
compact_node_offsets_np: Optional[np.ndarray] = None
|
|
||||||
compact_neighbors_data: Optional[list[int]] = None
|
|
||||||
offsets_np: Optional[np.ndarray] = None
|
|
||||||
neighbors_np: Optional[np.ndarray] = None
|
|
||||||
storage_fourcc: int = NULL_INDEX_FOURCC
|
|
||||||
storage_data: bytes = b""
|
|
||||||
|
|
||||||
|
|
||||||
def _read_hnsw_structure(f) -> HNSWComponents:
|
|
||||||
original_hnsw_data: dict[str, Any] = {}
|
|
||||||
|
|
||||||
hnsw_index_fourcc = read_struct(f, "<I")
|
|
||||||
if hnsw_index_fourcc not in EXPECTED_HNSW_FOURCCS:
|
|
||||||
raise ValueError(
|
|
||||||
f"Unexpected HNSW FourCC: {hnsw_index_fourcc:08x}. Expected one of {EXPECTED_HNSW_FOURCCS}."
|
|
||||||
)
|
|
||||||
|
|
||||||
original_hnsw_data["index_fourcc"] = hnsw_index_fourcc
|
|
||||||
original_hnsw_data["d"] = read_struct(f, "<i")
|
|
||||||
original_hnsw_data["ntotal"] = read_struct(f, "<q")
|
|
||||||
original_hnsw_data["dummy1"] = read_struct(f, "<q")
|
|
||||||
original_hnsw_data["dummy2"] = read_struct(f, "<q")
|
|
||||||
original_hnsw_data["is_trained"] = read_struct(f, "?")
|
|
||||||
original_hnsw_data["metric_type"] = read_struct(f, "<i")
|
|
||||||
original_hnsw_data["metric_arg"] = 0.0
|
|
||||||
if original_hnsw_data["metric_type"] > 1:
|
|
||||||
original_hnsw_data["metric_arg"] = read_struct(f, "<f")
|
|
||||||
|
|
||||||
assign_probas_np = read_numpy_vector(f, np.float64, "d")
|
|
||||||
cum_nneighbor_per_level_np = read_numpy_vector(f, np.int32, "i")
|
|
||||||
levels_np = read_numpy_vector(f, np.int32, "i")
|
|
||||||
|
|
||||||
ntotal = len(levels_np)
|
|
||||||
if ntotal != original_hnsw_data["ntotal"]:
|
|
||||||
original_hnsw_data["ntotal"] = ntotal
|
|
||||||
|
|
||||||
pos_before_compact = f.tell()
|
|
||||||
is_compact_flag = None
|
|
||||||
try:
|
|
||||||
is_compact_flag = read_struct(f, "<?")
|
|
||||||
except EOFError:
|
|
||||||
is_compact_flag = None
|
|
||||||
|
|
||||||
if is_compact_flag:
|
|
||||||
compact_level_ptr = read_numpy_vector(f, np.uint64, "Q")
|
|
||||||
compact_node_offsets_np = read_numpy_vector(f, np.uint64, "Q")
|
|
||||||
|
|
||||||
original_hnsw_data["entry_point"] = read_struct(f, "<i")
|
|
||||||
original_hnsw_data["max_level"] = read_struct(f, "<i")
|
|
||||||
original_hnsw_data["efConstruction"] = read_struct(f, "<i")
|
|
||||||
original_hnsw_data["efSearch"] = read_struct(f, "<i")
|
|
||||||
original_hnsw_data["dummy_upper_beam"] = read_struct(f, "<i")
|
|
||||||
|
|
||||||
storage_fourcc = read_struct(f, "<I")
|
|
||||||
compact_neighbors_data_np = read_numpy_vector(f, np.int32, "i")
|
|
||||||
compact_neighbors_data = compact_neighbors_data_np.tolist()
|
|
||||||
storage_data = f.read()
|
|
||||||
|
|
||||||
return HNSWComponents(
|
|
||||||
original_hnsw_data=original_hnsw_data,
|
|
||||||
assign_probas_np=assign_probas_np,
|
|
||||||
cum_nneighbor_per_level_np=cum_nneighbor_per_level_np,
|
|
||||||
levels_np=levels_np,
|
|
||||||
is_compact=True,
|
|
||||||
compact_level_ptr=compact_level_ptr,
|
|
||||||
compact_node_offsets_np=compact_node_offsets_np,
|
|
||||||
compact_neighbors_data=compact_neighbors_data,
|
|
||||||
storage_fourcc=storage_fourcc,
|
|
||||||
storage_data=storage_data,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Non-compact case
|
|
||||||
f.seek(pos_before_compact)
|
|
||||||
|
|
||||||
pos_before_probe = f.tell()
|
|
||||||
try:
|
|
||||||
suspected_flag = read_struct(f, "<B")
|
|
||||||
if suspected_flag != 0x00:
|
|
||||||
f.seek(pos_before_probe)
|
|
||||||
except EOFError:
|
|
||||||
f.seek(pos_before_probe)
|
|
||||||
|
|
||||||
offsets_np = read_numpy_vector(f, np.uint64, "Q")
|
|
||||||
neighbors_np = read_numpy_vector(f, np.int32, "i")
|
|
||||||
|
|
||||||
original_hnsw_data["entry_point"] = read_struct(f, "<i")
|
|
||||||
original_hnsw_data["max_level"] = read_struct(f, "<i")
|
|
||||||
original_hnsw_data["efConstruction"] = read_struct(f, "<i")
|
|
||||||
original_hnsw_data["efSearch"] = read_struct(f, "<i")
|
|
||||||
original_hnsw_data["dummy_upper_beam"] = read_struct(f, "<i")
|
|
||||||
|
|
||||||
storage_fourcc = NULL_INDEX_FOURCC
|
|
||||||
storage_data = b""
|
|
||||||
try:
|
|
||||||
storage_fourcc = read_struct(f, "<I")
|
|
||||||
storage_data = f.read()
|
|
||||||
except EOFError:
|
|
||||||
storage_fourcc = NULL_INDEX_FOURCC
|
|
||||||
|
|
||||||
return HNSWComponents(
|
|
||||||
original_hnsw_data=original_hnsw_data,
|
|
||||||
assign_probas_np=assign_probas_np,
|
|
||||||
cum_nneighbor_per_level_np=cum_nneighbor_per_level_np,
|
|
||||||
levels_np=levels_np,
|
|
||||||
is_compact=False,
|
|
||||||
offsets_np=offsets_np,
|
|
||||||
neighbors_np=neighbors_np,
|
|
||||||
storage_fourcc=storage_fourcc,
|
|
||||||
storage_data=storage_data,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _read_hnsw_structure_from_file(path: str) -> HNSWComponents:
|
|
||||||
with open(path, "rb") as f:
|
|
||||||
return _read_hnsw_structure(f)
|
|
||||||
|
|
||||||
|
|
||||||
def write_original_format(
|
|
||||||
f_out,
|
|
||||||
original_hnsw_data,
|
|
||||||
assign_probas_np,
|
|
||||||
cum_nneighbor_per_level_np,
|
|
||||||
levels_np,
|
|
||||||
offsets_np,
|
|
||||||
neighbors_np,
|
|
||||||
storage_fourcc,
|
|
||||||
storage_data,
|
|
||||||
):
|
|
||||||
"""Write non-compact HNSW data in original FAISS order."""
|
|
||||||
|
|
||||||
f_out.write(struct.pack("<I", original_hnsw_data["index_fourcc"]))
|
|
||||||
f_out.write(struct.pack("<i", original_hnsw_data["d"]))
|
|
||||||
f_out.write(struct.pack("<q", original_hnsw_data["ntotal"]))
|
|
||||||
f_out.write(struct.pack("<q", original_hnsw_data["dummy1"]))
|
|
||||||
f_out.write(struct.pack("<q", original_hnsw_data["dummy2"]))
|
|
||||||
f_out.write(struct.pack("<?", original_hnsw_data["is_trained"]))
|
|
||||||
f_out.write(struct.pack("<i", original_hnsw_data["metric_type"]))
|
|
||||||
if original_hnsw_data["metric_type"] > 1:
|
|
||||||
f_out.write(struct.pack("<f", original_hnsw_data["metric_arg"]))
|
|
||||||
|
|
||||||
write_numpy_vector(f_out, assign_probas_np, "d")
|
|
||||||
write_numpy_vector(f_out, cum_nneighbor_per_level_np, "i")
|
|
||||||
write_numpy_vector(f_out, levels_np, "i")
|
|
||||||
|
|
||||||
write_numpy_vector(f_out, offsets_np, "Q")
|
|
||||||
write_numpy_vector(f_out, neighbors_np, "i")
|
|
||||||
|
|
||||||
f_out.write(struct.pack("<i", original_hnsw_data["entry_point"]))
|
|
||||||
f_out.write(struct.pack("<i", original_hnsw_data["max_level"]))
|
|
||||||
f_out.write(struct.pack("<i", original_hnsw_data["efConstruction"]))
|
|
||||||
f_out.write(struct.pack("<i", original_hnsw_data["efSearch"]))
|
|
||||||
f_out.write(struct.pack("<i", original_hnsw_data["dummy_upper_beam"]))
|
|
||||||
|
|
||||||
f_out.write(struct.pack("<I", storage_fourcc))
|
|
||||||
if storage_fourcc != NULL_INDEX_FOURCC and storage_data:
|
|
||||||
f_out.write(storage_data)
|
|
||||||
|
|
||||||
|
|
||||||
def prune_hnsw_embeddings(input_filename: str, output_filename: str) -> bool:
|
|
||||||
"""Rewrite an HNSW index while dropping the embedded storage section."""
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
try:
|
|
||||||
with open(input_filename, "rb") as f_in, open(output_filename, "wb") as f_out:
|
|
||||||
original_hnsw_data: dict[str, Any] = {}
|
|
||||||
|
|
||||||
hnsw_index_fourcc = read_struct(f_in, "<I")
|
|
||||||
if hnsw_index_fourcc not in EXPECTED_HNSW_FOURCCS:
|
|
||||||
print(
|
|
||||||
f"Error: Expected HNSW Index FourCC ({list(EXPECTED_HNSW_FOURCCS)}), got {hnsw_index_fourcc:08x}.",
|
|
||||||
file=sys.stderr,
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
original_hnsw_data["index_fourcc"] = hnsw_index_fourcc
|
|
||||||
original_hnsw_data["d"] = read_struct(f_in, "<i")
|
|
||||||
original_hnsw_data["ntotal"] = read_struct(f_in, "<q")
|
|
||||||
original_hnsw_data["dummy1"] = read_struct(f_in, "<q")
|
|
||||||
original_hnsw_data["dummy2"] = read_struct(f_in, "<q")
|
|
||||||
original_hnsw_data["is_trained"] = read_struct(f_in, "?")
|
|
||||||
original_hnsw_data["metric_type"] = read_struct(f_in, "<i")
|
|
||||||
original_hnsw_data["metric_arg"] = 0.0
|
|
||||||
if original_hnsw_data["metric_type"] > 1:
|
|
||||||
original_hnsw_data["metric_arg"] = read_struct(f_in, "<f")
|
|
||||||
|
|
||||||
assign_probas_np = read_numpy_vector(f_in, np.float64, "d")
|
|
||||||
cum_nneighbor_per_level_np = read_numpy_vector(f_in, np.int32, "i")
|
|
||||||
levels_np = read_numpy_vector(f_in, np.int32, "i")
|
|
||||||
|
|
||||||
ntotal = len(levels_np)
|
|
||||||
if ntotal != original_hnsw_data["ntotal"]:
|
|
||||||
original_hnsw_data["ntotal"] = ntotal
|
|
||||||
|
|
||||||
pos_before_compact = f_in.tell()
|
|
||||||
is_compact_flag = None
|
|
||||||
try:
|
|
||||||
is_compact_flag = read_struct(f_in, "<?")
|
|
||||||
except EOFError:
|
|
||||||
is_compact_flag = None
|
|
||||||
|
|
||||||
if is_compact_flag:
|
|
||||||
compact_level_ptr = read_numpy_vector(f_in, np.uint64, "Q")
|
|
||||||
compact_node_offsets_np = read_numpy_vector(f_in, np.uint64, "Q")
|
|
||||||
|
|
||||||
original_hnsw_data["entry_point"] = read_struct(f_in, "<i")
|
|
||||||
original_hnsw_data["max_level"] = read_struct(f_in, "<i")
|
|
||||||
original_hnsw_data["efConstruction"] = read_struct(f_in, "<i")
|
|
||||||
original_hnsw_data["efSearch"] = read_struct(f_in, "<i")
|
|
||||||
original_hnsw_data["dummy_upper_beam"] = read_struct(f_in, "<i")
|
|
||||||
|
|
||||||
_storage_fourcc = read_struct(f_in, "<I")
|
|
||||||
compact_neighbors_data_np = read_numpy_vector(f_in, np.int32, "i")
|
|
||||||
compact_neighbors_data = compact_neighbors_data_np.tolist()
|
|
||||||
_storage_data = f_in.read()
|
|
||||||
|
|
||||||
write_compact_format(
|
|
||||||
f_out,
|
|
||||||
original_hnsw_data,
|
|
||||||
assign_probas_np,
|
|
||||||
cum_nneighbor_per_level_np,
|
|
||||||
levels_np,
|
|
||||||
compact_level_ptr,
|
|
||||||
compact_node_offsets_np,
|
|
||||||
compact_neighbors_data,
|
|
||||||
NULL_INDEX_FOURCC,
|
|
||||||
b"",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
f_in.seek(pos_before_compact)
|
|
||||||
|
|
||||||
pos_before_probe = f_in.tell()
|
|
||||||
try:
|
|
||||||
suspected_flag = read_struct(f_in, "<B")
|
|
||||||
if suspected_flag != 0x00:
|
|
||||||
f_in.seek(pos_before_probe)
|
|
||||||
except EOFError:
|
|
||||||
f_in.seek(pos_before_probe)
|
|
||||||
|
|
||||||
offsets_np = read_numpy_vector(f_in, np.uint64, "Q")
|
|
||||||
neighbors_np = read_numpy_vector(f_in, np.int32, "i")
|
|
||||||
|
|
||||||
original_hnsw_data["entry_point"] = read_struct(f_in, "<i")
|
|
||||||
original_hnsw_data["max_level"] = read_struct(f_in, "<i")
|
|
||||||
original_hnsw_data["efConstruction"] = read_struct(f_in, "<i")
|
|
||||||
original_hnsw_data["efSearch"] = read_struct(f_in, "<i")
|
|
||||||
original_hnsw_data["dummy_upper_beam"] = read_struct(f_in, "<i")
|
|
||||||
|
|
||||||
_storage_fourcc = None
|
|
||||||
_storage_data = b""
|
|
||||||
try:
|
|
||||||
_storage_fourcc = read_struct(f_in, "<I")
|
|
||||||
_storage_data = f_in.read()
|
|
||||||
except EOFError:
|
|
||||||
_storage_fourcc = NULL_INDEX_FOURCC
|
|
||||||
|
|
||||||
write_original_format(
|
|
||||||
f_out,
|
|
||||||
original_hnsw_data,
|
|
||||||
assign_probas_np,
|
|
||||||
cum_nneighbor_per_level_np,
|
|
||||||
levels_np,
|
|
||||||
offsets_np,
|
|
||||||
neighbors_np,
|
|
||||||
NULL_INDEX_FOURCC,
|
|
||||||
b"",
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"[{time.time() - start_time:.2f}s] Pruned embeddings from {input_filename}")
|
|
||||||
return True
|
|
||||||
except Exception as exc:
|
|
||||||
print(f"Failed to prune embeddings: {exc}", file=sys.stderr)
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
# --- Main Conversion Logic ---
|
# --- Main Conversion Logic ---
|
||||||
|
|
||||||
|
|
||||||
@@ -984,29 +700,6 @@ def convert_hnsw_graph_to_csr(input_filename, output_filename, prune_embeddings=
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def prune_hnsw_embeddings_inplace(index_filename: str) -> bool:
|
|
||||||
"""Convenience wrapper to prune embeddings in-place."""
|
|
||||||
|
|
||||||
temp_path = f"{index_filename}.prune.tmp"
|
|
||||||
success = prune_hnsw_embeddings(index_filename, temp_path)
|
|
||||||
if success:
|
|
||||||
try:
|
|
||||||
os.replace(temp_path, index_filename)
|
|
||||||
except Exception as exc: # pragma: no cover - defensive
|
|
||||||
logger.error(f"Failed to replace original index with pruned version: {exc}")
|
|
||||||
try:
|
|
||||||
os.remove(temp_path)
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
os.remove(temp_path)
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
return success
|
|
||||||
|
|
||||||
|
|
||||||
# --- Script Execution ---
|
# --- Script Execution ---
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ from leann.interface import (
|
|||||||
from leann.registry import register_backend
|
from leann.registry import register_backend
|
||||||
from leann.searcher_base import BaseSearcher
|
from leann.searcher_base import BaseSearcher
|
||||||
|
|
||||||
from .convert_to_csr import convert_hnsw_graph_to_csr, prune_hnsw_embeddings_inplace
|
from .convert_to_csr import convert_hnsw_graph_to_csr
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -92,8 +92,6 @@ class HNSWBuilder(LeannBackendBuilderInterface):
|
|||||||
|
|
||||||
if self.is_compact:
|
if self.is_compact:
|
||||||
self._convert_to_csr(index_file)
|
self._convert_to_csr(index_file)
|
||||||
elif self.is_recompute:
|
|
||||||
prune_hnsw_embeddings_inplace(str(index_file))
|
|
||||||
|
|
||||||
def _convert_to_csr(self, index_file: Path):
|
def _convert_to_csr(self, index_file: Path):
|
||||||
"""Convert built index to CSR format"""
|
"""Convert built index to CSR format"""
|
||||||
@@ -135,10 +133,10 @@ class HNSWSearcher(BaseSearcher):
|
|||||||
if metric_enum is None:
|
if metric_enum is None:
|
||||||
raise ValueError(f"Unsupported distance_metric '{self.distance_metric}'.")
|
raise ValueError(f"Unsupported distance_metric '{self.distance_metric}'.")
|
||||||
|
|
||||||
backend_meta_kwargs = self.meta.get("backend_kwargs", {})
|
self.is_compact, self.is_pruned = (
|
||||||
self.is_compact = self.meta.get("is_compact", backend_meta_kwargs.get("is_compact", True))
|
self.meta.get("is_compact", True),
|
||||||
default_pruned = backend_meta_kwargs.get("is_recompute", self.is_compact)
|
self.meta.get("is_pruned", True),
|
||||||
self.is_pruned = bool(self.meta.get("is_pruned", default_pruned))
|
)
|
||||||
|
|
||||||
index_file = self.index_dir / f"{self.index_path.stem}.index"
|
index_file = self.index_dir / f"{self.index_path.stem}.index"
|
||||||
if not index_file.exists():
|
if not index_file.exists():
|
||||||
|
|||||||
@@ -24,26 +24,13 @@ logger = logging.getLogger(__name__)
|
|||||||
log_level = getattr(logging, LOG_LEVEL, logging.WARNING)
|
log_level = getattr(logging, LOG_LEVEL, logging.WARNING)
|
||||||
logger.setLevel(log_level)
|
logger.setLevel(log_level)
|
||||||
|
|
||||||
# Ensure we have handlers if none exist
|
# Ensure we have a handler if none exists
|
||||||
if not logger.handlers:
|
if not logger.handlers:
|
||||||
stream_handler = logging.StreamHandler()
|
handler = logging.StreamHandler()
|
||||||
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
|
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
|
||||||
stream_handler.setFormatter(formatter)
|
handler.setFormatter(formatter)
|
||||||
logger.addHandler(stream_handler)
|
logger.addHandler(handler)
|
||||||
|
logger.propagate = False
|
||||||
log_path = os.getenv("LEANN_HNSW_LOG_PATH")
|
|
||||||
if log_path:
|
|
||||||
try:
|
|
||||||
file_handler = logging.FileHandler(log_path, mode="a", encoding="utf-8")
|
|
||||||
file_formatter = logging.Formatter(
|
|
||||||
"%(asctime)s - %(levelname)s - [pid=%(process)d] %(message)s"
|
|
||||||
)
|
|
||||||
file_handler.setFormatter(file_formatter)
|
|
||||||
logger.addHandler(file_handler)
|
|
||||||
except Exception as exc: # pragma: no cover - best effort logging
|
|
||||||
logger.warning(f"Failed to attach file handler for log path {log_path}: {exc}")
|
|
||||||
|
|
||||||
logger.propagate = False
|
|
||||||
|
|
||||||
|
|
||||||
def create_hnsw_embedding_server(
|
def create_hnsw_embedding_server(
|
||||||
|
|||||||
@@ -6,10 +6,10 @@ build-backend = "scikit_build_core.build"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "leann-backend-hnsw"
|
name = "leann-backend-hnsw"
|
||||||
version = "0.3.4"
|
version = "0.3.2"
|
||||||
description = "Custom-built HNSW (Faiss) backend for the Leann toolkit."
|
description = "Custom-built HNSW (Faiss) backend for the Leann toolkit."
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"leann-core==0.3.4",
|
"leann-core==0.3.2",
|
||||||
"numpy",
|
"numpy",
|
||||||
"pyzmq>=23.0.0",
|
"pyzmq>=23.0.0",
|
||||||
"msgpack>=1.0.0",
|
"msgpack>=1.0.0",
|
||||||
|
|||||||
Submodule packages/leann-backend-hnsw/third_party/faiss updated: 1d51f0c074...a0361858fc
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "leann-core"
|
name = "leann-core"
|
||||||
version = "0.3.4"
|
version = "0.3.2"
|
||||||
description = "Core API and plugin system for LEANN"
|
description = "Core API and plugin system for LEANN"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
|
|||||||
@@ -6,8 +6,6 @@ with the correct, original embedding logic from the user's reference code.
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import pickle
|
import pickle
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
import time
|
import time
|
||||||
import warnings
|
import warnings
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
@@ -15,7 +13,6 @@ from pathlib import Path
|
|||||||
from typing import Any, Literal, Optional, Union
|
from typing import Any, Literal, Optional, Union
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from leann_backend_hnsw.convert_to_csr import prune_hnsw_embeddings_inplace
|
|
||||||
|
|
||||||
from leann.interface import LeannBackendSearcherInterface
|
from leann.interface import LeannBackendSearcherInterface
|
||||||
|
|
||||||
@@ -477,7 +474,9 @@ class LeannBuilder:
|
|||||||
is_compact = self.backend_kwargs.get("is_compact", True)
|
is_compact = self.backend_kwargs.get("is_compact", True)
|
||||||
is_recompute = self.backend_kwargs.get("is_recompute", True)
|
is_recompute = self.backend_kwargs.get("is_recompute", True)
|
||||||
meta_data["is_compact"] = is_compact
|
meta_data["is_compact"] = is_compact
|
||||||
meta_data["is_pruned"] = bool(is_recompute)
|
meta_data["is_pruned"] = (
|
||||||
|
is_compact and is_recompute
|
||||||
|
) # Pruned only if compact and recompute
|
||||||
with open(leann_meta_path, "w", encoding="utf-8") as f:
|
with open(leann_meta_path, "w", encoding="utf-8") as f:
|
||||||
json.dump(meta_data, f, indent=2)
|
json.dump(meta_data, f, indent=2)
|
||||||
|
|
||||||
@@ -597,157 +596,13 @@ class LeannBuilder:
|
|||||||
is_compact = self.backend_kwargs.get("is_compact", True)
|
is_compact = self.backend_kwargs.get("is_compact", True)
|
||||||
is_recompute = self.backend_kwargs.get("is_recompute", True)
|
is_recompute = self.backend_kwargs.get("is_recompute", True)
|
||||||
meta_data["is_compact"] = is_compact
|
meta_data["is_compact"] = is_compact
|
||||||
meta_data["is_pruned"] = bool(is_recompute)
|
meta_data["is_pruned"] = is_compact and is_recompute
|
||||||
|
|
||||||
with open(leann_meta_path, "w", encoding="utf-8") as f:
|
with open(leann_meta_path, "w", encoding="utf-8") as f:
|
||||||
json.dump(meta_data, f, indent=2)
|
json.dump(meta_data, f, indent=2)
|
||||||
|
|
||||||
logger.info(f"Index built successfully from precomputed embeddings: {index_path}")
|
logger.info(f"Index built successfully from precomputed embeddings: {index_path}")
|
||||||
|
|
||||||
def update_index(self, index_path: str):
|
|
||||||
"""Append new passages and vectors to an existing HNSW index."""
|
|
||||||
if not self.chunks:
|
|
||||||
raise ValueError("No new chunks provided for update.")
|
|
||||||
|
|
||||||
path = Path(index_path)
|
|
||||||
index_dir = path.parent
|
|
||||||
index_name = path.name
|
|
||||||
index_prefix = path.stem
|
|
||||||
|
|
||||||
meta_path = index_dir / f"{index_name}.meta.json"
|
|
||||||
passages_file = index_dir / f"{index_name}.passages.jsonl"
|
|
||||||
offset_file = index_dir / f"{index_name}.passages.idx"
|
|
||||||
index_file = index_dir / f"{index_prefix}.index"
|
|
||||||
|
|
||||||
if not meta_path.exists() or not passages_file.exists() or not offset_file.exists():
|
|
||||||
raise FileNotFoundError("Index metadata or passage files are missing; cannot update.")
|
|
||||||
if not index_file.exists():
|
|
||||||
raise FileNotFoundError(f"HNSW index file not found: {index_file}")
|
|
||||||
|
|
||||||
with open(meta_path, encoding="utf-8") as f:
|
|
||||||
meta = json.load(f)
|
|
||||||
backend_name = meta.get("backend_name")
|
|
||||||
if backend_name != self.backend_name:
|
|
||||||
raise ValueError(
|
|
||||||
f"Index was built with backend '{backend_name}', cannot update with '{self.backend_name}'."
|
|
||||||
)
|
|
||||||
|
|
||||||
meta_backend_kwargs = meta.get("backend_kwargs", {})
|
|
||||||
index_is_compact = meta.get("is_compact", meta_backend_kwargs.get("is_compact", True))
|
|
||||||
if index_is_compact:
|
|
||||||
raise ValueError(
|
|
||||||
"Compact HNSW indices do not support in-place updates. Rebuild required."
|
|
||||||
)
|
|
||||||
|
|
||||||
distance_metric = meta_backend_kwargs.get(
|
|
||||||
"distance_metric", self.backend_kwargs.get("distance_metric", "mips")
|
|
||||||
).lower()
|
|
||||||
needs_recompute = bool(
|
|
||||||
meta.get("is_pruned")
|
|
||||||
or meta_backend_kwargs.get("is_recompute")
|
|
||||||
or self.backend_kwargs.get("is_recompute")
|
|
||||||
)
|
|
||||||
|
|
||||||
with open(offset_file, "rb") as f:
|
|
||||||
offset_map: dict[str, int] = pickle.load(f)
|
|
||||||
existing_ids = set(offset_map.keys())
|
|
||||||
|
|
||||||
valid_chunks: list[dict[str, Any]] = []
|
|
||||||
for chunk in self.chunks:
|
|
||||||
text = chunk.get("text", "")
|
|
||||||
if not isinstance(text, str) or not text.strip():
|
|
||||||
continue
|
|
||||||
metadata = chunk.setdefault("metadata", {})
|
|
||||||
passage_id = chunk.get("id") or metadata.get("id")
|
|
||||||
if passage_id and passage_id in existing_ids:
|
|
||||||
raise ValueError(f"Passage ID '{passage_id}' already exists in the index.")
|
|
||||||
valid_chunks.append(chunk)
|
|
||||||
|
|
||||||
if not valid_chunks:
|
|
||||||
raise ValueError("No valid chunks to append.")
|
|
||||||
|
|
||||||
texts_to_embed = [chunk["text"] for chunk in valid_chunks]
|
|
||||||
embeddings = compute_embeddings(
|
|
||||||
texts_to_embed,
|
|
||||||
self.embedding_model,
|
|
||||||
self.embedding_mode,
|
|
||||||
use_server=False,
|
|
||||||
is_build=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
embedding_dim = embeddings.shape[1]
|
|
||||||
expected_dim = meta.get("dimensions")
|
|
||||||
if expected_dim is not None and expected_dim != embedding_dim:
|
|
||||||
raise ValueError(
|
|
||||||
f"Dimension mismatch during update: existing index uses {expected_dim}, got {embedding_dim}."
|
|
||||||
)
|
|
||||||
|
|
||||||
from leann_backend_hnsw import faiss # type: ignore
|
|
||||||
|
|
||||||
embeddings = np.ascontiguousarray(embeddings, dtype=np.float32)
|
|
||||||
if distance_metric == "cosine":
|
|
||||||
norms = np.linalg.norm(embeddings, axis=1, keepdims=True)
|
|
||||||
norms[norms == 0] = 1
|
|
||||||
embeddings = embeddings / norms
|
|
||||||
|
|
||||||
index = faiss.read_index(str(index_file))
|
|
||||||
if hasattr(index, "is_recompute"):
|
|
||||||
index.is_recompute = needs_recompute
|
|
||||||
if getattr(index, "storage", None) is None:
|
|
||||||
if index.metric_type == faiss.METRIC_INNER_PRODUCT:
|
|
||||||
storage_index = faiss.IndexFlatIP(index.d)
|
|
||||||
else:
|
|
||||||
storage_index = faiss.IndexFlatL2(index.d)
|
|
||||||
index.storage = storage_index
|
|
||||||
index.own_fields = True
|
|
||||||
if index.d != embedding_dim:
|
|
||||||
raise ValueError(
|
|
||||||
f"Existing index dimension ({index.d}) does not match new embeddings ({embedding_dim})."
|
|
||||||
)
|
|
||||||
|
|
||||||
base_id = index.ntotal
|
|
||||||
for offset, chunk in enumerate(valid_chunks):
|
|
||||||
new_id = str(base_id + offset)
|
|
||||||
chunk.setdefault("metadata", {})["id"] = new_id
|
|
||||||
chunk["id"] = new_id
|
|
||||||
|
|
||||||
index.add(embeddings.shape[0], faiss.swig_ptr(embeddings))
|
|
||||||
faiss.write_index(index, str(index_file))
|
|
||||||
|
|
||||||
with open(passages_file, "a", encoding="utf-8") as f:
|
|
||||||
for chunk in valid_chunks:
|
|
||||||
offset = f.tell()
|
|
||||||
json.dump(
|
|
||||||
{
|
|
||||||
"id": chunk["id"],
|
|
||||||
"text": chunk["text"],
|
|
||||||
"metadata": chunk.get("metadata", {}),
|
|
||||||
},
|
|
||||||
f,
|
|
||||||
ensure_ascii=False,
|
|
||||||
)
|
|
||||||
f.write("\n")
|
|
||||||
offset_map[chunk["id"]] = offset
|
|
||||||
|
|
||||||
with open(offset_file, "wb") as f:
|
|
||||||
pickle.dump(offset_map, f)
|
|
||||||
|
|
||||||
meta["total_passages"] = len(offset_map)
|
|
||||||
with open(meta_path, "w", encoding="utf-8") as f:
|
|
||||||
json.dump(meta, f, indent=2)
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
"Appended %d passages to index '%s'. New total: %d",
|
|
||||||
len(valid_chunks),
|
|
||||||
index_path,
|
|
||||||
len(offset_map),
|
|
||||||
)
|
|
||||||
|
|
||||||
self.chunks.clear()
|
|
||||||
|
|
||||||
if needs_recompute:
|
|
||||||
prune_hnsw_embeddings_inplace(str(index_file))
|
|
||||||
|
|
||||||
|
|
||||||
class LeannSearcher:
|
class LeannSearcher:
|
||||||
def __init__(self, index_path: str, enable_warmup: bool = False, **backend_kwargs):
|
def __init__(self, index_path: str, enable_warmup: bool = False, **backend_kwargs):
|
||||||
@@ -798,7 +653,6 @@ class LeannSearcher:
|
|||||||
expected_zmq_port: int = 5557,
|
expected_zmq_port: int = 5557,
|
||||||
metadata_filters: Optional[dict[str, dict[str, Union[str, int, float, bool, list]]]] = None,
|
metadata_filters: Optional[dict[str, dict[str, Union[str, int, float, bool, list]]]] = None,
|
||||||
batch_size: int = 0,
|
batch_size: int = 0,
|
||||||
use_grep: bool = False,
|
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> list[SearchResult]:
|
) -> list[SearchResult]:
|
||||||
"""
|
"""
|
||||||
@@ -825,10 +679,6 @@ class LeannSearcher:
|
|||||||
Returns:
|
Returns:
|
||||||
List of SearchResult objects with text, metadata, and similarity scores
|
List of SearchResult objects with text, metadata, and similarity scores
|
||||||
"""
|
"""
|
||||||
# Handle grep search
|
|
||||||
if use_grep:
|
|
||||||
return self._grep_search(query, top_k)
|
|
||||||
|
|
||||||
logger.info("🔍 LeannSearcher.search() called:")
|
logger.info("🔍 LeannSearcher.search() called:")
|
||||||
logger.info(f" Query: '{query}'")
|
logger.info(f" Query: '{query}'")
|
||||||
logger.info(f" Top_k: {top_k}")
|
logger.info(f" Top_k: {top_k}")
|
||||||
@@ -945,96 +795,9 @@ class LeannSearcher:
|
|||||||
logger.info(f" {GREEN}✓ Final enriched results: {len(enriched_results)} passages{RESET}")
|
logger.info(f" {GREEN}✓ Final enriched results: {len(enriched_results)} passages{RESET}")
|
||||||
return enriched_results
|
return enriched_results
|
||||||
|
|
||||||
def _find_jsonl_file(self) -> Optional[str]:
|
|
||||||
"""Find the .jsonl file containing raw passages for grep search"""
|
|
||||||
index_path = Path(self.meta_path_str).parent
|
|
||||||
potential_files = [
|
|
||||||
index_path / "documents.leann.passages.jsonl",
|
|
||||||
index_path.parent / "documents.leann.passages.jsonl",
|
|
||||||
]
|
|
||||||
|
|
||||||
for file_path in potential_files:
|
|
||||||
if file_path.exists():
|
|
||||||
return str(file_path)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _grep_search(self, query: str, top_k: int = 5) -> list[SearchResult]:
|
|
||||||
"""Perform grep-based search on raw passages"""
|
|
||||||
jsonl_file = self._find_jsonl_file()
|
|
||||||
if not jsonl_file:
|
|
||||||
raise FileNotFoundError("No .jsonl passages file found for grep search")
|
|
||||||
|
|
||||||
try:
|
|
||||||
cmd = ["grep", "-i", "-n", query, jsonl_file]
|
|
||||||
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
|
||||||
|
|
||||||
if result.returncode == 1:
|
|
||||||
return []
|
|
||||||
elif result.returncode != 0:
|
|
||||||
raise RuntimeError(f"Grep failed: {result.stderr}")
|
|
||||||
|
|
||||||
matches = []
|
|
||||||
for line in result.stdout.strip().split("\n"):
|
|
||||||
if not line:
|
|
||||||
continue
|
|
||||||
parts = line.split(":", 1)
|
|
||||||
if len(parts) != 2:
|
|
||||||
continue
|
|
||||||
|
|
||||||
try:
|
|
||||||
data = json.loads(parts[1])
|
|
||||||
text = data.get("text", "")
|
|
||||||
score = text.lower().count(query.lower())
|
|
||||||
|
|
||||||
matches.append(
|
|
||||||
SearchResult(
|
|
||||||
id=data.get("id", parts[0]),
|
|
||||||
text=text,
|
|
||||||
metadata=data.get("metadata", {}),
|
|
||||||
score=float(score),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
continue
|
|
||||||
|
|
||||||
matches.sort(key=lambda x: x.score, reverse=True)
|
|
||||||
return matches[:top_k]
|
|
||||||
|
|
||||||
except FileNotFoundError:
|
|
||||||
raise RuntimeError(
|
|
||||||
"grep command not found. Please install grep or use semantic search."
|
|
||||||
)
|
|
||||||
|
|
||||||
def _python_regex_search(self, query: str, top_k: int = 5) -> list[SearchResult]:
|
|
||||||
"""Fallback regex search"""
|
|
||||||
jsonl_file = self._find_jsonl_file()
|
|
||||||
if not jsonl_file:
|
|
||||||
raise FileNotFoundError("No .jsonl file found")
|
|
||||||
|
|
||||||
pattern = re.compile(re.escape(query), re.IGNORECASE)
|
|
||||||
matches = []
|
|
||||||
|
|
||||||
with open(jsonl_file, encoding="utf-8") as f:
|
|
||||||
for line_num, line in enumerate(f, 1):
|
|
||||||
if pattern.search(line):
|
|
||||||
try:
|
|
||||||
data = json.loads(line.strip())
|
|
||||||
matches.append(
|
|
||||||
SearchResult(
|
|
||||||
id=data.get("id", str(line_num)),
|
|
||||||
text=data.get("text", ""),
|
|
||||||
metadata=data.get("metadata", {}),
|
|
||||||
score=float(len(pattern.findall(data.get("text", "")))),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
continue
|
|
||||||
|
|
||||||
matches.sort(key=lambda x: x.score, reverse=True)
|
|
||||||
return matches[:top_k]
|
|
||||||
|
|
||||||
def cleanup(self):
|
def cleanup(self):
|
||||||
"""Explicitly cleanup embedding server resources.
|
"""Explicitly cleanup embedding server resources.
|
||||||
|
|
||||||
This method should be called after you're done using the searcher,
|
This method should be called after you're done using the searcher,
|
||||||
especially in test environments or batch processing scenarios.
|
especially in test environments or batch processing scenarios.
|
||||||
"""
|
"""
|
||||||
@@ -1090,7 +853,6 @@ class LeannChat:
|
|||||||
expected_zmq_port: int = 5557,
|
expected_zmq_port: int = 5557,
|
||||||
metadata_filters: Optional[dict[str, dict[str, Union[str, int, float, bool, list]]]] = None,
|
metadata_filters: Optional[dict[str, dict[str, Union[str, int, float, bool, list]]]] = None,
|
||||||
batch_size: int = 0,
|
batch_size: int = 0,
|
||||||
use_grep: bool = False,
|
|
||||||
**search_kwargs,
|
**search_kwargs,
|
||||||
):
|
):
|
||||||
if llm_kwargs is None:
|
if llm_kwargs is None:
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Optional, Union
|
from typing import Any, Optional, Union
|
||||||
|
|
||||||
@@ -1215,8 +1216,13 @@ Examples:
|
|||||||
if use_ast:
|
if use_ast:
|
||||||
print("🧠 Using AST-aware chunking for code files")
|
print("🧠 Using AST-aware chunking for code files")
|
||||||
try:
|
try:
|
||||||
# Import enhanced chunking utilities from packaged module
|
# Import enhanced chunking utilities
|
||||||
from .chunking_utils import create_text_chunks
|
# Add apps directory to path to import chunking utilities
|
||||||
|
apps_dir = Path(__file__).parent.parent.parent.parent.parent / "apps"
|
||||||
|
if apps_dir.exists():
|
||||||
|
sys.path.insert(0, str(apps_dir))
|
||||||
|
|
||||||
|
from chunking import create_text_chunks
|
||||||
|
|
||||||
# Use enhanced chunking with AST support
|
# Use enhanced chunking with AST support
|
||||||
all_texts = create_text_chunks(
|
all_texts = create_text_chunks(
|
||||||
@@ -1231,9 +1237,7 @@ Examples:
|
|||||||
)
|
)
|
||||||
|
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
print(
|
print(f"⚠️ AST chunking not available ({e}), falling back to traditional chunking")
|
||||||
f"⚠️ AST chunking utilities not available in package ({e}), falling back to traditional chunking"
|
|
||||||
)
|
|
||||||
use_ast = False
|
use_ast = False
|
||||||
|
|
||||||
if not use_ast:
|
if not use_ast:
|
||||||
|
|||||||
@@ -2,8 +2,6 @@
|
|||||||
|
|
||||||
Transform your development workflow with intelligent code assistance using LEANN's semantic search directly in Claude Code.
|
Transform your development workflow with intelligent code assistance using LEANN's semantic search directly in Claude Code.
|
||||||
|
|
||||||
For agent-facing discovery details, see `llms.txt` in the repository root.
|
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
Install LEANN globally for MCP integration (with default backend):
|
Install LEANN globally for MCP integration (with default backend):
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "leann"
|
name = "leann"
|
||||||
version = "0.3.4"
|
version = "0.3.2"
|
||||||
description = "LEANN - The smallest vector index in the world. RAG Everything with LEANN!"
|
description = "LEANN - The smallest vector index in the world. RAG Everything with LEANN!"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
|
|||||||
@@ -99,7 +99,6 @@ wechat-exporter = "wechat_exporter.main:main"
|
|||||||
leann-core = { path = "packages/leann-core", editable = true }
|
leann-core = { path = "packages/leann-core", editable = true }
|
||||||
leann-backend-diskann = { path = "packages/leann-backend-diskann", editable = true }
|
leann-backend-diskann = { path = "packages/leann-backend-diskann", editable = true }
|
||||||
leann-backend-hnsw = { path = "packages/leann-backend-hnsw", editable = true }
|
leann-backend-hnsw = { path = "packages/leann-backend-hnsw", editable = true }
|
||||||
astchunk = { path = "packages/astchunk-leann", editable = true }
|
|
||||||
|
|
||||||
[tool.ruff]
|
[tool.ruff]
|
||||||
target-version = "py39"
|
target-version = "py39"
|
||||||
|
|||||||
Reference in New Issue
Block a user