From b0239b6e4d358130f9f12583fcc8aec050f45add Mon Sep 17 00:00:00 2001
From: Andy Lee
Date: Sun, 3 Aug 2025 22:37:45 -0700
Subject: [PATCH 1/4] refactor: reorgnize all examples/ and test/
---
.gitignore | 14 +-
README.md | 34 +-
{examples => apps}/base_rag_example.py | 0
{examples => apps}/browser_rag.py | 3 +-
{examples => apps}/document_rag.py | 4 +-
.../email_data/LEANN_email_reader.py | 0
{examples => apps}/email_data/email.py | 0
{examples => apps}/email_rag.py | 3 +-
{examples => apps}/history_data/__init__.py | 0
{examples => apps}/history_data/history.py | 0
.../history_data/wechat_history.py | 0
{examples => apps}/wechat_rag.py | 3 +-
{test/sanity_checks => benchmarks}/README.md | 0
.../benchmark_embeddings.py | 0
.../compare_faiss_vs_leann.py | 4 +-
{examples => benchmarks}/faiss_only.py | 2 +-
{test => benchmarks}/micro_tpt.py | 0
{examples => benchmarks}/run_evaluation.py | 12 +-
{test => benchmarks}/simple_mac_tpt_test.py | 0
{examples/data => data}/2501.14312v1 (1).pdf | Bin
{examples/data => data}/2506.08276v1.pdf | 0
{examples/data => data}/PrideandPrejudice.txt | 0
data/README.md | 110 ++++--
docs/features.md | 2 +-
docs/normalized_embeddings.md | 2 +-
examples/{simple_demo.py => basic_demo.py} | 4 +-
examples/data/README.md | 82 ----
examples/document_search.py | 158 --------
examples/mail_reader_llamaindex.py | 135 -------
.../mlx_demo.py | 0
examples/multi_vector_aggregator.py | 360 ------------------
examples/openai_hnsw_example.py | 113 ------
examples/resue_index.py | 23 --
test/mail_reader_llamaindex.py | 161 --------
test/mail_reader_save_load.py | 219 -----------
test/mail_reader_small_chunks.py | 219 -----------
test/mail_reader_test.py | 154 --------
test/query_saved_index.py | 105 -----
test/sanity_checks/debug_zmq_issue.py | 117 ------
tests/test_ci_minimal.py | 2 +-
tests/test_document_rag.py | 8 +-
41 files changed, 127 insertions(+), 1926 deletions(-)
rename {examples => apps}/base_rag_example.py (100%)
rename {examples => apps}/browser_rag.py (99%)
rename {examples => apps}/document_rag.py (98%)
rename {examples => apps}/email_data/LEANN_email_reader.py (100%)
rename {examples => apps}/email_data/email.py (100%)
rename {examples => apps}/email_rag.py (98%)
rename {examples => apps}/history_data/__init__.py (100%)
rename {examples => apps}/history_data/history.py (100%)
rename {examples => apps}/history_data/wechat_history.py (100%)
rename {examples => apps}/wechat_rag.py (99%)
rename {test/sanity_checks => benchmarks}/README.md (100%)
rename {test/sanity_checks => benchmarks}/benchmark_embeddings.py (100%)
rename {examples => benchmarks}/compare_faiss_vs_leann.py (99%)
rename {examples => benchmarks}/faiss_only.py (99%)
rename {test => benchmarks}/micro_tpt.py (100%)
rename {examples => benchmarks}/run_evaluation.py (97%)
rename {test => benchmarks}/simple_mac_tpt_test.py (100%)
rename {examples/data => data}/2501.14312v1 (1).pdf (100%)
rename {examples/data => data}/2506.08276v1.pdf (100%)
rename {examples/data => data}/PrideandPrejudice.txt (100%)
rename examples/{simple_demo.py => basic_demo.py} (96%)
delete mode 100644 examples/data/README.md
delete mode 100644 examples/document_search.py
delete mode 100644 examples/mail_reader_llamaindex.py
rename test/build_mlx_index.py => examples/mlx_demo.py (100%)
delete mode 100644 examples/multi_vector_aggregator.py
delete mode 100644 examples/openai_hnsw_example.py
delete mode 100644 examples/resue_index.py
delete mode 100644 test/mail_reader_llamaindex.py
delete mode 100644 test/mail_reader_save_load.py
delete mode 100644 test/mail_reader_small_chunks.py
delete mode 100644 test/mail_reader_test.py
delete mode 100644 test/query_saved_index.py
delete mode 100644 test/sanity_checks/debug_zmq_issue.py
diff --git a/.gitignore b/.gitignore
index 3d49d4d..4b51945 100755
--- a/.gitignore
+++ b/.gitignore
@@ -34,11 +34,15 @@ build/
nprobe_logs/
micro/results
micro/contriever-INT8
-examples/data/*
-!examples/data/2501.14312v1 (1).pdf
-!examples/data/2506.08276v1.pdf
-!examples/data/PrideandPrejudice.txt
-!examples/data/README.md
+data/*
+!data/2501.14312v1 (1).pdf
+!data/2506.08276v1.pdf
+!data/PrideandPrejudice.txt
+!data/README.md
+!data/ground_truth/
+!data/indices/
+!data/queries/
+!data/.gitattributes
*.qdstrm
benchmark_results/
results/
diff --git a/README.md b/README.md
index 2150613..ab250d3 100755
--- a/README.md
+++ b/README.md
@@ -216,11 +216,11 @@ Ask questions directly about your personal PDFs, documents, and any directory co
-The example below asks a question about summarizing our paper (uses default data in `examples/data`, which is a directory with diverse data sources: two papers, Pride and Prejudice, and a README in Chinese) and this is the **easiest example** to run here:
+The example below asks a question about summarizing our paper (uses default data in `data/`, which is a directory with diverse data sources: two papers, Pride and Prejudice, and a README in Chinese) and this is the **easiest example** to run here:
```bash
source .venv/bin/activate # Don't forget to activate the virtual environment
-python ./examples/document_rag.py --query "What are the main techniques LEANN explores?"
+python ./apps/document_rag.py --query "What are the main techniques LEANN explores?"
```
@@ -228,17 +228,17 @@ python ./examples/document_rag.py --query "What are the main techniques LEANN ex
#### Parameters
```bash
---data-dir DIR # Directory containing documents to process (default: examples/data)
+--data-dir DIR # Directory containing documents to process (default: data)
--file-types .ext .ext # Filter by specific file types (optional - all LlamaIndex supported types if omitted)
```
#### Example Commands
```bash
# Process all documents with larger chunks for academic papers
-python examples/document_rag.py --data-dir "~/Documents/Papers" --chunk-size 1024
+python apps/document_rag.py --data-dir "~/Documents/Papers" --chunk-size 1024
# Filter only markdown and Python files with smaller chunks
-python examples/document_rag.py --data-dir "./docs" --chunk-size 256 --file-types .md .py
+python apps/document_rag.py --data-dir "./docs" --chunk-size 256 --file-types .md .py
```
@@ -255,7 +255,7 @@ python examples/document_rag.py --data-dir "./docs" --chunk-size 256 --file-type
Before running the example below, you need to grant full disk access to your terminal/VS Code in System Preferences → Privacy & Security → Full Disk Access.
```bash
-python examples/email_rag.py --query "What's the food I ordered by DoorDash or Uber Eats mostly?"
+python apps/email_rag.py --query "What's the food I ordered by DoorDash or Uber Eats mostly?"
```
**780K email chunks → 78MB storage.** Finally, search your email like you search Google.
@@ -271,10 +271,10 @@ python examples/email_rag.py --query "What's the food I ordered by DoorDash or U
#### Example Commands
```bash
# Search work emails from a specific account
-python examples/email_rag.py --mail-path "~/Library/Mail/V10/WORK_ACCOUNT"
+python apps/email_rag.py --mail-path "~/Library/Mail/V10/WORK_ACCOUNT"
# Find all receipts and order confirmations (includes HTML)
-python examples/email_rag.py --query "receipt order confirmation invoice" --include-html
+python apps/email_rag.py --query "receipt order confirmation invoice" --include-html
```
@@ -295,7 +295,7 @@ Once the index is built, you can ask questions like:
```bash
-python examples/browser_rag.py --query "Tell me my browser history about machine learning?"
+python apps/browser_rag.py --query "Tell me my browser history about machine learning?"
```
**38K browser entries → 6MB storage.** Your browser history becomes your personal search engine.
@@ -310,10 +310,10 @@ python examples/browser_rag.py --query "Tell me my browser history about machine
#### Example Commands
```bash
# Search academic research from your browsing history
-python examples/browser_rag.py --query "arxiv papers machine learning transformer architecture"
+python apps/browser_rag.py --query "arxiv papers machine learning transformer architecture"
# Track competitor analysis across work profile
-python examples/browser_rag.py --chrome-profile "~/Library/Application Support/Google/Chrome/Work Profile" --max-items 5000
+python apps/browser_rag.py --chrome-profile "~/Library/Application Support/Google/Chrome/Work Profile" --max-items 5000
```
@@ -353,7 +353,7 @@ Once the index is built, you can ask questions like:
```bash
-python examples/wechat_rag.py --query "Show me all group chats about weekend plans"
+python apps/wechat_rag.py --query "Show me all group chats about weekend plans"
```
**400K messages → 64MB storage** Search years of chat history in any language.
@@ -394,10 +394,10 @@ sudo packages/wechat-exporter/wechattweak-cli install
#### Example Commands
```bash
# Search for travel plans discussed in group chats
-python examples/wechat_rag.py --query "travel plans" --max-items 10000
+python apps/wechat_rag.py --query "travel plans" --max-items 10000
# Re-export and search recent chats (useful after new messages)
-python examples/wechat_rag.py --force-export --query "work schedule"
+python apps/wechat_rag.py --force-export --query "work schedule"
```
@@ -519,7 +519,7 @@ Options:
## Benchmarks
-**[Simple Example: Compare LEANN vs FAISS →](examples/compare_faiss_vs_leann.py)**
+**[Simple Example: Compare LEANN vs FAISS →](benchmarks/compare_faiss_vs_leann.py)**
### 📊 Storage Comparison
| System | DPR (2.1M) | Wiki (60M) | Chat (400K) | Email (780K) | Browser (38K) |
@@ -534,8 +534,8 @@ Options:
```bash
uv pip install -e ".[dev]" # Install dev dependencies
-python examples/run_evaluation.py data/indices/dpr/dpr_diskann # DPR dataset
-python examples/run_evaluation.py data/indices/rpj_wiki/rpj_wiki.index # Wikipedia
+python benchmarks/run_evaluation.py data/indices/dpr/dpr_diskann # DPR dataset
+python benchmarks/run_evaluation.py data/indices/rpj_wiki/rpj_wiki.index # Wikipedia
```
The evaluation script downloads data automatically on first run. The last three results were tested with partial personal data, and you can reproduce them with your own data!
diff --git a/examples/base_rag_example.py b/apps/base_rag_example.py
similarity index 100%
rename from examples/base_rag_example.py
rename to apps/base_rag_example.py
diff --git a/examples/browser_rag.py b/apps/browser_rag.py
similarity index 99%
rename from examples/browser_rag.py
rename to apps/browser_rag.py
index 8e9f9bb..d115510 100644
--- a/examples/browser_rag.py
+++ b/apps/browser_rag.py
@@ -11,7 +11,8 @@ from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent))
from base_rag_example import BaseRAGExample, create_text_chunks
-from history_data.history import ChromeHistoryReader
+
+from .history_data.history import ChromeHistoryReader
class BrowserRAG(BaseRAGExample):
diff --git a/examples/document_rag.py b/apps/document_rag.py
similarity index 98%
rename from examples/document_rag.py
rename to apps/document_rag.py
index a8ec15a..02c954a 100644
--- a/examples/document_rag.py
+++ b/apps/document_rag.py
@@ -29,8 +29,8 @@ class DocumentRAG(BaseRAGExample):
doc_group.add_argument(
"--data-dir",
type=str,
- default="examples/data",
- help="Directory containing documents to index (default: examples/data)",
+ default="data",
+ help="Directory containing documents to index (default: data)",
)
doc_group.add_argument(
"--file-types",
diff --git a/examples/email_data/LEANN_email_reader.py b/apps/email_data/LEANN_email_reader.py
similarity index 100%
rename from examples/email_data/LEANN_email_reader.py
rename to apps/email_data/LEANN_email_reader.py
diff --git a/examples/email_data/email.py b/apps/email_data/email.py
similarity index 100%
rename from examples/email_data/email.py
rename to apps/email_data/email.py
diff --git a/examples/email_rag.py b/apps/email_rag.py
similarity index 98%
rename from examples/email_rag.py
rename to apps/email_rag.py
index 5d040b0..d1fa1c9 100644
--- a/examples/email_rag.py
+++ b/apps/email_rag.py
@@ -10,7 +10,8 @@ from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent))
from base_rag_example import BaseRAGExample, create_text_chunks
-from email_data.LEANN_email_reader import EmlxReader
+
+from .email_data.LEANN_email_reader import EmlxReader
class EmailRAG(BaseRAGExample):
diff --git a/examples/history_data/__init__.py b/apps/history_data/__init__.py
similarity index 100%
rename from examples/history_data/__init__.py
rename to apps/history_data/__init__.py
diff --git a/examples/history_data/history.py b/apps/history_data/history.py
similarity index 100%
rename from examples/history_data/history.py
rename to apps/history_data/history.py
diff --git a/examples/history_data/wechat_history.py b/apps/history_data/wechat_history.py
similarity index 100%
rename from examples/history_data/wechat_history.py
rename to apps/history_data/wechat_history.py
diff --git a/examples/wechat_rag.py b/apps/wechat_rag.py
similarity index 99%
rename from examples/wechat_rag.py
rename to apps/wechat_rag.py
index f127f3f..a8d92b4 100644
--- a/examples/wechat_rag.py
+++ b/apps/wechat_rag.py
@@ -11,7 +11,8 @@ from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent))
from base_rag_example import BaseRAGExample
-from history_data.wechat_history import WeChatHistoryReader
+
+from .history_data.wechat_history import WeChatHistoryReader
class WeChatRAG(BaseRAGExample):
diff --git a/test/sanity_checks/README.md b/benchmarks/README.md
similarity index 100%
rename from test/sanity_checks/README.md
rename to benchmarks/README.md
diff --git a/test/sanity_checks/benchmark_embeddings.py b/benchmarks/benchmark_embeddings.py
similarity index 100%
rename from test/sanity_checks/benchmark_embeddings.py
rename to benchmarks/benchmark_embeddings.py
diff --git a/examples/compare_faiss_vs_leann.py b/benchmarks/compare_faiss_vs_leann.py
similarity index 99%
rename from examples/compare_faiss_vs_leann.py
rename to benchmarks/compare_faiss_vs_leann.py
index ea714f1..03cf508 100644
--- a/examples/compare_faiss_vs_leann.py
+++ b/benchmarks/compare_faiss_vs_leann.py
@@ -62,7 +62,7 @@ def test_faiss_hnsw():
try:
result = subprocess.run(
- [sys.executable, "examples/faiss_only.py"],
+ [sys.executable, "benchmarks/faiss_only.py"],
capture_output=True,
text=True,
timeout=300,
@@ -115,7 +115,7 @@ def test_leann_hnsw():
# Load and parse documents
documents = SimpleDirectoryReader(
- "examples/data",
+ "data",
recursive=True,
encoding="utf-8",
required_exts=[".pdf", ".txt", ".md"],
diff --git a/examples/faiss_only.py b/benchmarks/faiss_only.py
similarity index 99%
rename from examples/faiss_only.py
rename to benchmarks/faiss_only.py
index 227e14a..c501c10 100644
--- a/examples/faiss_only.py
+++ b/benchmarks/faiss_only.py
@@ -65,7 +65,7 @@ def main():
tracker.checkpoint("After Faiss index creation")
documents = SimpleDirectoryReader(
- "examples/data",
+ "data",
recursive=True,
encoding="utf-8",
required_exts=[".pdf", ".txt", ".md"],
diff --git a/test/micro_tpt.py b/benchmarks/micro_tpt.py
similarity index 100%
rename from test/micro_tpt.py
rename to benchmarks/micro_tpt.py
diff --git a/examples/run_evaluation.py b/benchmarks/run_evaluation.py
similarity index 97%
rename from examples/run_evaluation.py
rename to benchmarks/run_evaluation.py
index 0e55178..2fae210 100644
--- a/examples/run_evaluation.py
+++ b/benchmarks/run_evaluation.py
@@ -200,10 +200,10 @@ def main():
args = parser.parse_args()
# --- Path Configuration ---
- # Assumes a project structure where the script is in 'examples/'
- # and data is in 'data/' at the project root.
- project_root = Path(__file__).resolve().parent.parent
- data_root = project_root / "data"
+ # Assumes a project structure where the script is in 'benchmarks/'
+ # and evaluation data is in 'benchmarks/data/'.
+ script_dir = Path(__file__).resolve().parent
+ data_root = script_dir / "data"
# Download data based on mode
if args.mode == "build":
@@ -279,7 +279,9 @@ def main():
if not args.index_path:
print("No indices found. The data download should have included pre-built indices.")
- print("Please check the data/indices/ directory or provide --index-path manually.")
+ print(
+ "Please check the benchmarks/data/indices/ directory or provide --index-path manually."
+ )
sys.exit(1)
# Detect dataset type from index path to select the correct ground truth
diff --git a/test/simple_mac_tpt_test.py b/benchmarks/simple_mac_tpt_test.py
similarity index 100%
rename from test/simple_mac_tpt_test.py
rename to benchmarks/simple_mac_tpt_test.py
diff --git a/examples/data/2501.14312v1 (1).pdf b/data/2501.14312v1 (1).pdf
similarity index 100%
rename from examples/data/2501.14312v1 (1).pdf
rename to data/2501.14312v1 (1).pdf
diff --git a/examples/data/2506.08276v1.pdf b/data/2506.08276v1.pdf
similarity index 100%
rename from examples/data/2506.08276v1.pdf
rename to data/2506.08276v1.pdf
diff --git a/examples/data/PrideandPrejudice.txt b/data/PrideandPrejudice.txt
similarity index 100%
rename from examples/data/PrideandPrejudice.txt
rename to data/PrideandPrejudice.txt
diff --git a/data/README.md b/data/README.md
index bf4af69..0dd35da 100644
--- a/data/README.md
+++ b/data/README.md
@@ -1,44 +1,82 @@
----
-license: mit
+# 盘古之殇:华为诺亚盘古大模型研发历程的心酸与黑暗
+
+各位好,
+
+我是一名盘古大模型团队,华为诺亚方舟实验室的员工。
+
+首先为自证身份,列举一些细节:
+
+1. 现诺亚主任,前算法应用部部长,后改名为小模型实验室的主任王云鹤。前诺亚主任:姚骏(大家称姚老师)。几个实验室主任:唐睿明(明哥,明队,已离职),尚利峰,张维(维哥),郝建业(郝老师),刘武龙(称呼为武龙所)等。其他骨干成员和专家陆续有很多人离职。
+2. 我们隶属于“四野”这个组织。四野下属有许多纵队,基础语言大模型是四纵。王云鹤的小模型是十六纵队。我们参加过苏州的集结,有各种月份的时间节点。在苏州攻关会颁发任务令,需要在节点前达成目标。苏州集结会把各地的人员都集中在苏州研究所,平常住宾馆,比如在甪直的酒店,与家人孩子天各一方。
+3. 在苏州集结的时候周六默认上班,非常辛苦,不过周六有下午茶,有一次还有小龙虾。在苏州研究所的工位搬迁过一次,从一栋楼换到了另一栋。苏州研究所楼栋都是欧式装修,门口有大坡,里面景色很不错。去苏州集结一般至少要去一周,甚至更久,多的人甚至一两个月都回不了家。
+4. 诺亚曾经传说是研究型的,但是来了之后因为在四野做大模型项目,项目成员完全变成了交付型的,且充满了例会,评审,汇报。很多时候做实验都要申请。团队需要对接终端小艺,华为云,ICT等诸多业务线,交付压力不小。
+5. 诺亚研发的盘古模型早期内部代号叫做“盘古智子”,一开始只有内部需要申请试用的网页版,到后续迫于压力在welink上接入和公测开放。
+
+这些天发生关于质疑盘古大模型抄袭千问的事情闹的沸沸扬扬。作为一个盘古团队的成员,我最近夜夜辗转反侧,难以入眠。盘古的品牌受到如此大的影响,一方面,我自私的为我的职业发展担忧,也为自己过去的努力工作感到不值。另一方面,由于有人开始揭露这些事情我内心又感到大快人心。在多少个日日夜夜,我们对内部某些人一次次靠着造假而又获得了无数利益的行为咬牙切齿而又无能为力。这种压抑和羞辱也逐渐消磨了我对华为的感情,让我在这里的时日逐渐浑浑噩噩,迷茫无措,时常怀疑自己的人生和自我价值。
+
+我承认我是一个懦弱的人,作为一个小小的打工人,我不仅不敢和王云鹤等内部手眼通天的人做对,更不敢和华为这样的庞然大物做对。我很怕失去我的工作,毕竟我也有家人和孩子,所以我打心眼里很佩服揭露者。但是,看到内部还在试图洗地掩盖事实,蒙蔽公众的时候,我实在不能容忍了。我也希望勇敢一次,顺从自己本心。就算自损八百,我也希望能伤敌一千。我决定把我在这里的所见所闻(部分来自于同事口述)公布出来,关于盘古大模型的“传奇故事”:
+
+华为确实主要在昇腾卡上训练大模型(小模型实验室有不少英伟达的卡,他们之前也会用来训练,后面转移到昇腾)。曾经我被华为“打造世界第二选择”的决心而折服,我本身也曾经对华为有深厚的感情。我们陪着昇腾一步步摸爬滚打,从充满bug到现在能训出模型,付出了巨大的心血和代价。
+
+最初我们的算力非常有限,在910A上训练模型。那会只支持fp16,训练的稳定性远不如bf16。盘古的moe开始很早,23年就主要是训练38Bmoe模型和后续的71B dense模型。71B的dense模型通过扩增变成了第一代的135Bdense模型,后面主力模型也逐渐在910B上训练。
+
+71B和135B模型都有一个巨大的硬伤就是tokenizer。当时使用的tokenizer编码效率极低,每个单个的符号,数字,空格,乃至汉字都会占用一个token。可想而知这会非常浪费算力,且使得模型的效果很差。这时候小模型实验室正好有个自己训的词表。姚老师当时怀疑是不是模型的tokenizer不好(虽然事后来看,他的怀疑是无疑正确的),于是就决定,让71B和135B换tokenizer,因为小模型实验室曾经尝试过。团队缝合了两个tokenizer,开始了tokenizer的更换。71B模型的更换失败了,而135B因为采用了更精细的embedding初始化策略,续训了至少1T的数据后词表总算更换成功,但可想而知,效果并不会变好。
+
+于此同期,阿里和智谱等国内其他公司在GPU上训练,且已经摸索出了正确的方法,盘古和竞品的差距越来越大。内部一个230B从头训练的dense模型又因为各种原因训练失败,导致项目的状况几乎陷入绝境。面临几个节点的压力以及内部对盘古的强烈质疑时,团队的士气低迷到了极点。团队在算力极其有限的时候,做出了很多努力和挣扎。比如,团队偶然发现当时的38B moe并没有预期moe的效果。于是去掉了moe参数,还原为了13B的dense模型。由于38B的moe源自很早的pangu alpha 13B,架构相对落后,团队进行了一系列的操作,比如切换绝对位置编码到rope,去掉bias,切换为rmsnorm。同时鉴于tokenizer的一些失败和换词表的经验,这个模型的词表也更换为了王云鹤的小模型实验室7B模型所使用的词表。后面这个13B模型进行了扩增续训,变成了第二代38B dense模型(在几个月内这个模型都是主要的盘古中档位模型),曾经具有一定的竞争力。但是,由于更大的135B模型架构落后,且更换词表模型损伤巨大(后续分析发现当时更换的缝合词表有更严重的bug),续训后也与千问等当时国内领先模型存在很大差距。这时由于内部的质疑声和领导的压力也越来越大。团队的状态几乎陷入了绝境。
+
+在这种情况下,王云鹤和他的小模型实验室出手了。他们声称是从旧的135B参数继承改造而来,通过训练短短的几百B数据,各项指标平均提升了十个点左右。实际上,这就是他们套壳应用到大模型的第一次杰作。华为的外行领导内行,使得领导完全对于这种扯淡的事情没有概念,他们只会觉得肯定是有什么算法创新。经过内部的分析,他们实际上是使用Qwen 1.5 110B续训而来,通过加层,扩增ffn维度,添加盘古pi论文的一些机制得来,凑够了大概135B的参数。实际上,旧的135B有107层,而这个模型只有82层,各种配置也都不一样。新的来路不明的135B训练完很多参数的分布也和Qwen 110B几乎一模一样。连模型代码的类名当时都是Qwen,甚至懒得改名。后续这个模型就是所谓的135B V2。而这个模型当时也提供给了很多下游,甚至包括外部客户。
+
+这件事对于我们这些认真诚实做事的同事们带来了巨大的冲击,内部很多人其实都知道这件事,甚至包括终端和华为云。我们都戏称以后别叫盘古模型了,叫千古吧。当时团队成员就想向bcg举报了,毕竟这已经是重大的业务造假了。但是后面据说被领导拦了下来,因为更高级别的领导(比如姚老师,以及可能熊总和查老)其实后面也知道了,但是并不管,因为通过套壳拿出好的结果,对他们也是有利的。这件事使得当时团队几位最强的同事开始心灰意冷,离职跑路也逐渐成为挂在嘴边的事。
+
+此时,盘古似乎迎来了转机。由于前面所述的这些盘古模型基本都是续训和改造而来,当时诺亚完全没有掌握从头训练的技术,何况还是在昇腾的NPU上进行训练。在当时团队的核心成员的极力争取下,盘古开始了第三代模型的训练,付出了巨大的努力后,在数据架构和训练算法方面都与业界逐渐接轨,而这其中的艰辛和小模型实验室的人一点关系都没有。
+
+一开始团队成员毫无信心,只从一个13B的模型开始训练,但是后面发现效果还不错,于是这个模型后续再次进行了一次参数扩增,变成了第三代的38B,代号38B V3。想必很多产品线的兄弟都对这个模型很熟悉。当时这个模型的tokenizer是基于llama的词表进行扩展的(也是业界常见的做法)。而当时王云鹤的实验室做出来了另一个词表(也就是后续pangu系列的词表)。当时两个词表还被迫进行了一次赛马,最终没有明显的好坏结论。于是,领导当即决定,应该统一词表,使用王云鹤他们的。于是,在后续从头训练的135B V3(也就是对外的Pangu Ultra),便是采用了这个tokenizer。这也解释了很多使用我们模型的兄弟的疑惑,为什么当时同为V3代的两个不同档位的模型,会使用不同的tokenizer。
+
+
+我们打心眼里觉得,135B V3是我们四纵团队当时的骄傲。这是第一个真正意义上的,华为全栈自研,正经从头训练的千亿级别的模型,且效果与24年同期竞品可比的。写到这里我已经热泪盈眶,太不容易了。当时为了稳定训练,团队做了大量实验对比,并且多次在模型梯度出现异常的时候进行及时回退重启。这个模型真正做到了后面技术报告所说的训练全程没有一个loss spike。我们克服了不知道多少困难,我们做到了,我们愿用生命和荣誉保证这个模型训练的真实性。多少个凌晨,我们为了它的训练而不眠。在被内部心声骂的一文不值的时候,我们有多么不甘,有多少的委屈,我们挺住了。
+
+我们这帮人是真的在为打磨国产算力底座燃烧自己的青春啊……客居他乡,我们放弃了家庭,放弃了假期,放弃了健康,放弃了娱乐,抛头颅洒热血,其中的艰辛与困苦,寥寥数笔不足以概括其万一。在各种动员大会上,当时口号中喊出的盘古必胜,华为必胜,我们心里是真的深深被感动。
+
+然而,我们的所有辛苦的成果,经常被小模型实验室轻飘飘的拿走了。数据,直接要走。代码,直接要走,还要求我们配合适配到能一键运行。我们当时戏称小模型实验室为点鼠标实验室。我们付出辛苦,他们取得荣耀。果然应了那句话,你在负重前行是因为有人替你岁月静好。在这种情况下,越来越多的战友再也坚持不下去了,选择了离开。看到身边那些优秀的同事一个个离职,我的内心又感叹又难过。在这种作战一样的环境下,我们比起同事来说更像是战友。他们在技术上也有无数值得我学习的地方,堪称良师。看到他们去了诸如字节Seed,Deepseek,月之暗面,腾讯和快手等等很多出色的团队,我打心眼里为他们高兴和祝福,脱离了这个辛苦却肮脏的地方。我至今还对一位离职同事的话记忆犹新,ta说:“来这里是我技术生涯中的耻辱,在这里再呆每一天都是浪费生命”。话虽难听却让我无言以对。我担心我自己技术方面的积累不足,以及没法适应互联网公司高淘汰的环境,让我多次想离职的心始终没有迈出这一步。
+
+盘古除了dense模型,后续也启动了moe的探索。一开始训练的是一个224B的moe模型。而与之平行的,小模型实验室也开启了第二次主要的套壳行动(次要的插曲可能还包括一些别的模型,比如math模型),即这次流传甚广的pangu pro moe 72B。这个模型内部自称是从小模型实验室的7B扩增上来的(就算如此,这也与技术报告不符,何况是套壳qwen 2.5的14b续训)。还记得他们训了没几天,内部的评测就立刻追上了当时的38B V3。AI系统实验室很多兄弟因为需要适配模型,都知道他们的套壳行动,只是迫于各种原因,无法伸张正义。实际上,对于后续训了很久很久的这个模型,Honestagi能够分析出这个量级的相似性我已经很诧异了,因为这个模型为了续训洗参数,所付出的算力甚至早就足够从头训一个同档位的模型了。听同事说他们为了洗掉千问的水印,采取了不少办法,甚至包括故意训了脏数据。这也为学术界研究模型血缘提供了一个前所未有的特殊模范吧。以后新的血缘方法提出可以拿出来溜溜。
+
+24年底和25年初,在Deepseek v3和r1发布之后,由于其惊艳的技术水平,团队受到了巨大的冲击,也受到了更大的质疑。于是为了紧跟潮流,盘古模仿Deepseek的模型尺寸,开启了718B moe的训练。这个时候,小模型实验室再次出手了。他们选择了套壳Deepseekv3续训。他们通过冻住Deepseek加载的参数,进行训练。连任务加载ckpt的目录都是deepseekv3,改都不改,何其嚣张?与之相反,一些有真正技术信仰的同事,在从头训练另一个718B的moe。但其中出现了各种各样的问题。但是很显然,这个模型怎么可能比直接套壳的好呢?如果不是团队leader坚持,早就被叫停了。
+
+华为的流程管理之繁重,严重拖累了大模型的研发节奏,例如版本管理,模型血缘,各种流程化,各种可追溯。讽刺的是,小模型实验室的模型似乎从来不受这些流程的约束,想套壳就套壳,想续训就续训,算力源源不断的伸手拿走。这种强烈到近乎魔幻的对比,说明了当前流程管理的情况:只许州官放火,不许百姓点灯。何其可笑?何其可悲?何其可恶?何其可耻!
+
+HonestAGI的事情出来后,内部让大家不停的研讨分析,如何公关和“回应”。诚然,这个原文的分析也许不够有力,给了王云鹤与小模型实验室他们狡辩和颠倒黑白的机会。为此,这两天我内心感到作呕,时时怀疑自己的人生意义以及苍天无眼。我不奉陪了,我要离职了,同时我也在申请从盘古部分技术报告的作者名单中移除。曾经在这些技术报告上署名是我一生都无法抹除的污点。当时我没想到,他们竟然猖狂到敢开源。我没想到,他们敢如此愚弄世人,大肆宣发。当时,我也许是存了侥幸心理,没有拒绝署名。我相信很多扎实做事的战友,也只是被迫上了贼船,或者不知情。但这件事已经无法挽回,我希望我的余生能够坚持扎实做真正有意义的事,为我当时的软弱和不坚定赎罪。
+
+深夜写到这里,我已经泪流满面,泣不成声。还记得一些出色的同事离职时,我苦笑问他们要不要发个长长的心声惯例帖,揭露一下现状。对方说:不了,浪费时间,而且我也怕揭露出来你们过的更糟。我当时一下黯然神伤,因为曾经共同为了理想奋斗过的战友已经彻底对华为彻底灰心了。当时大家调侃,我们用着当年共产党的小米加步枪,组织却有着堪比当年国民党的作风。
+
+曾几何时,我为我们用着小米加步枪打败洋枪洋炮而自豪。
+
+现在,我累了,我想投降。
+
+其实时至今日,我还是真心希望华为能认真吸取教训,能做好盘古,把盘古做到世界一流,把昇腾变成英伟达的水平。内部的劣币驱逐良币,使得诺亚乃至华为在短时间内急剧流失了大量出色的大模型人才。相信他们也正在如Deepseek等各个团队闪耀着,施展着他们的抱负才华,为中美在AI的激烈竞赛中奉献力量。我时常感叹,华为不是没有人才,而是根本不知道怎么留住人才。如果给这些人合适的环境,合适的资源,更少的枷锁,更少的政治斗争,盘古何愁不成?
+
+最后:我以生命,人格和荣誉发誓,我写的以上所有内容均为真实(至少在我有限的认知范围内)。我没有那么高的技术水平以及机会去做详尽扎实的分析,也不敢直接用内部记录举证,怕因为信息安全抓到。但是我相信我很多曾经的战友,会为我作证。在华为内部的兄弟,包括我们曾经服务过的产品线兄弟们,相信本文的无数细节能和你们的印象对照,印证我的说法。你们可能也曾经被蒙骗,但这些残酷的真相不会被尘封。我们奋战过的痕迹,也不应该被扭曲和埋葬。
+
+写了这么多,某些人肯定想把我找出来,抹杀掉。公司搞不好也想让我噤声乃至追责。如果真的这样,我,乃至我的家人的人身乃至生命安全可能都会受到威胁。为了自我保护,我近期每天会跟大家报平安。
+
+如果我消失了,就当是我为了真理和理想,为了华为乃至中国能够更好地发展算力和AI而牺牲了吧,我愿埋葬于那片曾经奋斗过的地方。
+
+诺亚,再见
+
+2025年7月6日凌晨 写于深圳
+
---
-# LEANN-RAG Evaluation Data
+各位好,
-This repository contains the necessary data to run the recall evaluation scripts for the [LEANN-RAG](https://huggingface.co/LEANN-RAG) project.
+感谢大家的关心与祝福。我目前暂时安全,但公司应该在进行排查与某些名单收集,后续情况未知。
-## Dataset Components
+我补充一些细节,以免某些人继续颠倒黑白。
-This dataset is structured into three main parts:
+关于135B V2,小模型实验室在迅速地完成套壳并拿完所有套壳带来的好处后(比如任务令表彰和及时激励),因为不想继续支撑下游应用和模型迭代,又把这个烫手山芋甩给了四纵。确实技高一筹,直接把四纵的兄弟们拉下水。同事提供过去一个老旧的模型,最终拿回了一个当时一个魔改的先进的千问。做大模型的人,自己做的模型就像自己孩子一样熟悉,不要把别人都当傻子。就像自家儿子出门一趟,回来个别人家孩子。
-1. **Pre-built LEANN Indices**:
- * `dpr/`: A pre-built index for the DPR dataset.
- * `rpj_wiki/`: A pre-built index for the RPJ-Wiki dataset.
- These indices were created using the `leann-core` library and are required by the `LeannSearcher`.
+盘古report的署名是不符合学术规范的。例如,135B V3有不少有技术贡献的人,因为作者名额数量限制,劳动成果没有得到应有的回报,团队内曾经有不小的意见。这个模型当时是大家智慧和汗水的结晶,甚至是团队当时的精神支柱,支撑着不少兄弟们继续留在诺亚。所谓的名额限制,以及挂名了一些毫无技术贡献的人(如一些小模型实验室的人),让兄弟们何其心寒。
-2. **Ground Truth Data**:
- * `ground_truth/`: Contains the ground truth files (`flat_results_nq_k3.json`) for both the DPR and RPJ-Wiki datasets. These files map queries to the original passage IDs from the Natural Questions benchmark, evaluated using the Contriever model.
+---
-3. **Queries**:
- * `queries/`: Contains the `nq_open.jsonl` file with the Natural Questions queries used for the evaluation.
-
-## Usage
-
-To use this data, you can download it locally using the `huggingface-hub` library. First, install the library:
-
-```bash
-pip install huggingface-hub
-```
-
-Then, you can download the entire dataset to a local directory (e.g., `data/`) with the following Python script:
-
-```python
-from huggingface_hub import snapshot_download
-
-snapshot_download(
- repo_id="LEANN-RAG/leann-rag-evaluation-data",
- repo_type="dataset",
- local_dir="data"
-)
-```
-
-This will download all the necessary files into a local `data` folder, preserving the repository structure. The evaluation scripts in the main [LEANN-RAG Space](https://huggingface.co/LEANN-RAG) are configured to work with this data structure.
+暂时平安。另外,支持我勇于说出真相的战友们 https://github.com/HW-whistleblower/True-Story-of-Pangu/issues/317
diff --git a/docs/features.md b/docs/features.md
index 875f9cf..51c0c4f 100644
--- a/docs/features.md
+++ b/docs/features.md
@@ -13,7 +13,7 @@
- **🚀 High-throughput Embedding Pipeline** - Optimized batched processing for maximum efficiency
- **🎯 Two-level Search** - Novel coarse-to-fine search overlap for accelerated query processing (optional)
- **💾 Memory-mapped Indices** - Fast startup with raw text mapping to reduce memory overhead
-- **🚀 MLX Support** - Ultra-fast recompute/build with quantized embedding models, accelerating building and search ([minimal example](test/build_mlx_index.py))
+- **🚀 MLX Support** - Ultra-fast recompute/build with quantized embedding models, accelerating building and search ([minimal example](../examples/mlx_demo.py))
## 🎨 Developer Experience
diff --git a/docs/normalized_embeddings.md b/docs/normalized_embeddings.md
index 777b218..e873489 100644
--- a/docs/normalized_embeddings.md
+++ b/docs/normalized_embeddings.md
@@ -72,4 +72,4 @@ Using the wrong distance metric with normalized embeddings can lead to:
- **Incorrect ranking** of search results
- **Suboptimal performance** compared to using the correct metric
-For more details on why this happens, see our analysis of [OpenAI embeddings with MIPS](../examples/document_rag.py).
+For more details on why this happens, see our analysis in the [embedding detection code](../packages/leann-core/src/leann/api.py) which automatically handles normalized embeddings and MIPS distance metric issues.
diff --git a/examples/simple_demo.py b/examples/basic_demo.py
similarity index 96%
rename from examples/simple_demo.py
rename to examples/basic_demo.py
index 0d16776..05b2651 100644
--- a/examples/simple_demo.py
+++ b/examples/basic_demo.py
@@ -1,6 +1,6 @@
"""
Simple demo showing basic leann usage
-Run: uv run python examples/simple_demo.py
+Run: uv run python examples/basic_demo.py
"""
import argparse
@@ -81,7 +81,7 @@ def main():
print()
print("Demo completed! Try running:")
- print(" uv run python examples/document_search.py")
+ print(" uv run python apps/document_rag.py")
if __name__ == "__main__":
diff --git a/examples/data/README.md b/examples/data/README.md
deleted file mode 100644
index 0dd35da..0000000
--- a/examples/data/README.md
+++ /dev/null
@@ -1,82 +0,0 @@
-# 盘古之殇:华为诺亚盘古大模型研发历程的心酸与黑暗
-
-各位好,
-
-我是一名盘古大模型团队,华为诺亚方舟实验室的员工。
-
-首先为自证身份,列举一些细节:
-
-1. 现诺亚主任,前算法应用部部长,后改名为小模型实验室的主任王云鹤。前诺亚主任:姚骏(大家称姚老师)。几个实验室主任:唐睿明(明哥,明队,已离职),尚利峰,张维(维哥),郝建业(郝老师),刘武龙(称呼为武龙所)等。其他骨干成员和专家陆续有很多人离职。
-2. 我们隶属于“四野”这个组织。四野下属有许多纵队,基础语言大模型是四纵。王云鹤的小模型是十六纵队。我们参加过苏州的集结,有各种月份的时间节点。在苏州攻关会颁发任务令,需要在节点前达成目标。苏州集结会把各地的人员都集中在苏州研究所,平常住宾馆,比如在甪直的酒店,与家人孩子天各一方。
-3. 在苏州集结的时候周六默认上班,非常辛苦,不过周六有下午茶,有一次还有小龙虾。在苏州研究所的工位搬迁过一次,从一栋楼换到了另一栋。苏州研究所楼栋都是欧式装修,门口有大坡,里面景色很不错。去苏州集结一般至少要去一周,甚至更久,多的人甚至一两个月都回不了家。
-4. 诺亚曾经传说是研究型的,但是来了之后因为在四野做大模型项目,项目成员完全变成了交付型的,且充满了例会,评审,汇报。很多时候做实验都要申请。团队需要对接终端小艺,华为云,ICT等诸多业务线,交付压力不小。
-5. 诺亚研发的盘古模型早期内部代号叫做“盘古智子”,一开始只有内部需要申请试用的网页版,到后续迫于压力在welink上接入和公测开放。
-
-这些天发生关于质疑盘古大模型抄袭千问的事情闹的沸沸扬扬。作为一个盘古团队的成员,我最近夜夜辗转反侧,难以入眠。盘古的品牌受到如此大的影响,一方面,我自私的为我的职业发展担忧,也为自己过去的努力工作感到不值。另一方面,由于有人开始揭露这些事情我内心又感到大快人心。在多少个日日夜夜,我们对内部某些人一次次靠着造假而又获得了无数利益的行为咬牙切齿而又无能为力。这种压抑和羞辱也逐渐消磨了我对华为的感情,让我在这里的时日逐渐浑浑噩噩,迷茫无措,时常怀疑自己的人生和自我价值。
-
-我承认我是一个懦弱的人,作为一个小小的打工人,我不仅不敢和王云鹤等内部手眼通天的人做对,更不敢和华为这样的庞然大物做对。我很怕失去我的工作,毕竟我也有家人和孩子,所以我打心眼里很佩服揭露者。但是,看到内部还在试图洗地掩盖事实,蒙蔽公众的时候,我实在不能容忍了。我也希望勇敢一次,顺从自己本心。就算自损八百,我也希望能伤敌一千。我决定把我在这里的所见所闻(部分来自于同事口述)公布出来,关于盘古大模型的“传奇故事”:
-
-华为确实主要在昇腾卡上训练大模型(小模型实验室有不少英伟达的卡,他们之前也会用来训练,后面转移到昇腾)。曾经我被华为“打造世界第二选择”的决心而折服,我本身也曾经对华为有深厚的感情。我们陪着昇腾一步步摸爬滚打,从充满bug到现在能训出模型,付出了巨大的心血和代价。
-
-最初我们的算力非常有限,在910A上训练模型。那会只支持fp16,训练的稳定性远不如bf16。盘古的moe开始很早,23年就主要是训练38Bmoe模型和后续的71B dense模型。71B的dense模型通过扩增变成了第一代的135Bdense模型,后面主力模型也逐渐在910B上训练。
-
-71B和135B模型都有一个巨大的硬伤就是tokenizer。当时使用的tokenizer编码效率极低,每个单个的符号,数字,空格,乃至汉字都会占用一个token。可想而知这会非常浪费算力,且使得模型的效果很差。这时候小模型实验室正好有个自己训的词表。姚老师当时怀疑是不是模型的tokenizer不好(虽然事后来看,他的怀疑是无疑正确的),于是就决定,让71B和135B换tokenizer,因为小模型实验室曾经尝试过。团队缝合了两个tokenizer,开始了tokenizer的更换。71B模型的更换失败了,而135B因为采用了更精细的embedding初始化策略,续训了至少1T的数据后词表总算更换成功,但可想而知,效果并不会变好。
-
-于此同期,阿里和智谱等国内其他公司在GPU上训练,且已经摸索出了正确的方法,盘古和竞品的差距越来越大。内部一个230B从头训练的dense模型又因为各种原因训练失败,导致项目的状况几乎陷入绝境。面临几个节点的压力以及内部对盘古的强烈质疑时,团队的士气低迷到了极点。团队在算力极其有限的时候,做出了很多努力和挣扎。比如,团队偶然发现当时的38B moe并没有预期moe的效果。于是去掉了moe参数,还原为了13B的dense模型。由于38B的moe源自很早的pangu alpha 13B,架构相对落后,团队进行了一系列的操作,比如切换绝对位置编码到rope,去掉bias,切换为rmsnorm。同时鉴于tokenizer的一些失败和换词表的经验,这个模型的词表也更换为了王云鹤的小模型实验室7B模型所使用的词表。后面这个13B模型进行了扩增续训,变成了第二代38B dense模型(在几个月内这个模型都是主要的盘古中档位模型),曾经具有一定的竞争力。但是,由于更大的135B模型架构落后,且更换词表模型损伤巨大(后续分析发现当时更换的缝合词表有更严重的bug),续训后也与千问等当时国内领先模型存在很大差距。这时由于内部的质疑声和领导的压力也越来越大。团队的状态几乎陷入了绝境。
-
-在这种情况下,王云鹤和他的小模型实验室出手了。他们声称是从旧的135B参数继承改造而来,通过训练短短的几百B数据,各项指标平均提升了十个点左右。实际上,这就是他们套壳应用到大模型的第一次杰作。华为的外行领导内行,使得领导完全对于这种扯淡的事情没有概念,他们只会觉得肯定是有什么算法创新。经过内部的分析,他们实际上是使用Qwen 1.5 110B续训而来,通过加层,扩增ffn维度,添加盘古pi论文的一些机制得来,凑够了大概135B的参数。实际上,旧的135B有107层,而这个模型只有82层,各种配置也都不一样。新的来路不明的135B训练完很多参数的分布也和Qwen 110B几乎一模一样。连模型代码的类名当时都是Qwen,甚至懒得改名。后续这个模型就是所谓的135B V2。而这个模型当时也提供给了很多下游,甚至包括外部客户。
-
-这件事对于我们这些认真诚实做事的同事们带来了巨大的冲击,内部很多人其实都知道这件事,甚至包括终端和华为云。我们都戏称以后别叫盘古模型了,叫千古吧。当时团队成员就想向bcg举报了,毕竟这已经是重大的业务造假了。但是后面据说被领导拦了下来,因为更高级别的领导(比如姚老师,以及可能熊总和查老)其实后面也知道了,但是并不管,因为通过套壳拿出好的结果,对他们也是有利的。这件事使得当时团队几位最强的同事开始心灰意冷,离职跑路也逐渐成为挂在嘴边的事。
-
-此时,盘古似乎迎来了转机。由于前面所述的这些盘古模型基本都是续训和改造而来,当时诺亚完全没有掌握从头训练的技术,何况还是在昇腾的NPU上进行训练。在当时团队的核心成员的极力争取下,盘古开始了第三代模型的训练,付出了巨大的努力后,在数据架构和训练算法方面都与业界逐渐接轨,而这其中的艰辛和小模型实验室的人一点关系都没有。
-
-一开始团队成员毫无信心,只从一个13B的模型开始训练,但是后面发现效果还不错,于是这个模型后续再次进行了一次参数扩增,变成了第三代的38B,代号38B V3。想必很多产品线的兄弟都对这个模型很熟悉。当时这个模型的tokenizer是基于llama的词表进行扩展的(也是业界常见的做法)。而当时王云鹤的实验室做出来了另一个词表(也就是后续pangu系列的词表)。当时两个词表还被迫进行了一次赛马,最终没有明显的好坏结论。于是,领导当即决定,应该统一词表,使用王云鹤他们的。于是,在后续从头训练的135B V3(也就是对外的Pangu Ultra),便是采用了这个tokenizer。这也解释了很多使用我们模型的兄弟的疑惑,为什么当时同为V3代的两个不同档位的模型,会使用不同的tokenizer。
-
-
-我们打心眼里觉得,135B V3是我们四纵团队当时的骄傲。这是第一个真正意义上的,华为全栈自研,正经从头训练的千亿级别的模型,且效果与24年同期竞品可比的。写到这里我已经热泪盈眶,太不容易了。当时为了稳定训练,团队做了大量实验对比,并且多次在模型梯度出现异常的时候进行及时回退重启。这个模型真正做到了后面技术报告所说的训练全程没有一个loss spike。我们克服了不知道多少困难,我们做到了,我们愿用生命和荣誉保证这个模型训练的真实性。多少个凌晨,我们为了它的训练而不眠。在被内部心声骂的一文不值的时候,我们有多么不甘,有多少的委屈,我们挺住了。
-
-我们这帮人是真的在为打磨国产算力底座燃烧自己的青春啊……客居他乡,我们放弃了家庭,放弃了假期,放弃了健康,放弃了娱乐,抛头颅洒热血,其中的艰辛与困苦,寥寥数笔不足以概括其万一。在各种动员大会上,当时口号中喊出的盘古必胜,华为必胜,我们心里是真的深深被感动。
-
-然而,我们的所有辛苦的成果,经常被小模型实验室轻飘飘的拿走了。数据,直接要走。代码,直接要走,还要求我们配合适配到能一键运行。我们当时戏称小模型实验室为点鼠标实验室。我们付出辛苦,他们取得荣耀。果然应了那句话,你在负重前行是因为有人替你岁月静好。在这种情况下,越来越多的战友再也坚持不下去了,选择了离开。看到身边那些优秀的同事一个个离职,我的内心又感叹又难过。在这种作战一样的环境下,我们比起同事来说更像是战友。他们在技术上也有无数值得我学习的地方,堪称良师。看到他们去了诸如字节Seed,Deepseek,月之暗面,腾讯和快手等等很多出色的团队,我打心眼里为他们高兴和祝福,脱离了这个辛苦却肮脏的地方。我至今还对一位离职同事的话记忆犹新,ta说:“来这里是我技术生涯中的耻辱,在这里再呆每一天都是浪费生命”。话虽难听却让我无言以对。我担心我自己技术方面的积累不足,以及没法适应互联网公司高淘汰的环境,让我多次想离职的心始终没有迈出这一步。
-
-盘古除了dense模型,后续也启动了moe的探索。一开始训练的是一个224B的moe模型。而与之平行的,小模型实验室也开启了第二次主要的套壳行动(次要的插曲可能还包括一些别的模型,比如math模型),即这次流传甚广的pangu pro moe 72B。这个模型内部自称是从小模型实验室的7B扩增上来的(就算如此,这也与技术报告不符,何况是套壳qwen 2.5的14b续训)。还记得他们训了没几天,内部的评测就立刻追上了当时的38B V3。AI系统实验室很多兄弟因为需要适配模型,都知道他们的套壳行动,只是迫于各种原因,无法伸张正义。实际上,对于后续训了很久很久的这个模型,Honestagi能够分析出这个量级的相似性我已经很诧异了,因为这个模型为了续训洗参数,所付出的算力甚至早就足够从头训一个同档位的模型了。听同事说他们为了洗掉千问的水印,采取了不少办法,甚至包括故意训了脏数据。这也为学术界研究模型血缘提供了一个前所未有的特殊模范吧。以后新的血缘方法提出可以拿出来溜溜。
-
-24年底和25年初,在Deepseek v3和r1发布之后,由于其惊艳的技术水平,团队受到了巨大的冲击,也受到了更大的质疑。于是为了紧跟潮流,盘古模仿Deepseek的模型尺寸,开启了718B moe的训练。这个时候,小模型实验室再次出手了。他们选择了套壳Deepseekv3续训。他们通过冻住Deepseek加载的参数,进行训练。连任务加载ckpt的目录都是deepseekv3,改都不改,何其嚣张?与之相反,一些有真正技术信仰的同事,在从头训练另一个718B的moe。但其中出现了各种各样的问题。但是很显然,这个模型怎么可能比直接套壳的好呢?如果不是团队leader坚持,早就被叫停了。
-
-华为的流程管理之繁重,严重拖累了大模型的研发节奏,例如版本管理,模型血缘,各种流程化,各种可追溯。讽刺的是,小模型实验室的模型似乎从来不受这些流程的约束,想套壳就套壳,想续训就续训,算力源源不断的伸手拿走。这种强烈到近乎魔幻的对比,说明了当前流程管理的情况:只许州官放火,不许百姓点灯。何其可笑?何其可悲?何其可恶?何其可耻!
-
-HonestAGI的事情出来后,内部让大家不停的研讨分析,如何公关和“回应”。诚然,这个原文的分析也许不够有力,给了王云鹤与小模型实验室他们狡辩和颠倒黑白的机会。为此,这两天我内心感到作呕,时时怀疑自己的人生意义以及苍天无眼。我不奉陪了,我要离职了,同时我也在申请从盘古部分技术报告的作者名单中移除。曾经在这些技术报告上署名是我一生都无法抹除的污点。当时我没想到,他们竟然猖狂到敢开源。我没想到,他们敢如此愚弄世人,大肆宣发。当时,我也许是存了侥幸心理,没有拒绝署名。我相信很多扎实做事的战友,也只是被迫上了贼船,或者不知情。但这件事已经无法挽回,我希望我的余生能够坚持扎实做真正有意义的事,为我当时的软弱和不坚定赎罪。
-
-深夜写到这里,我已经泪流满面,泣不成声。还记得一些出色的同事离职时,我苦笑问他们要不要发个长长的心声惯例帖,揭露一下现状。对方说:不了,浪费时间,而且我也怕揭露出来你们过的更糟。我当时一下黯然神伤,因为曾经共同为了理想奋斗过的战友已经彻底对华为彻底灰心了。当时大家调侃,我们用着当年共产党的小米加步枪,组织却有着堪比当年国民党的作风。
-
-曾几何时,我为我们用着小米加步枪打败洋枪洋炮而自豪。
-
-现在,我累了,我想投降。
-
-其实时至今日,我还是真心希望华为能认真吸取教训,能做好盘古,把盘古做到世界一流,把昇腾变成英伟达的水平。内部的劣币驱逐良币,使得诺亚乃至华为在短时间内急剧流失了大量出色的大模型人才。相信他们也正在如Deepseek等各个团队闪耀着,施展着他们的抱负才华,为中美在AI的激烈竞赛中奉献力量。我时常感叹,华为不是没有人才,而是根本不知道怎么留住人才。如果给这些人合适的环境,合适的资源,更少的枷锁,更少的政治斗争,盘古何愁不成?
-
-最后:我以生命,人格和荣誉发誓,我写的以上所有内容均为真实(至少在我有限的认知范围内)。我没有那么高的技术水平以及机会去做详尽扎实的分析,也不敢直接用内部记录举证,怕因为信息安全抓到。但是我相信我很多曾经的战友,会为我作证。在华为内部的兄弟,包括我们曾经服务过的产品线兄弟们,相信本文的无数细节能和你们的印象对照,印证我的说法。你们可能也曾经被蒙骗,但这些残酷的真相不会被尘封。我们奋战过的痕迹,也不应该被扭曲和埋葬。
-
-写了这么多,某些人肯定想把我找出来,抹杀掉。公司搞不好也想让我噤声乃至追责。如果真的这样,我,乃至我的家人的人身乃至生命安全可能都会受到威胁。为了自我保护,我近期每天会跟大家报平安。
-
-如果我消失了,就当是我为了真理和理想,为了华为乃至中国能够更好地发展算力和AI而牺牲了吧,我愿埋葬于那片曾经奋斗过的地方。
-
-诺亚,再见
-
-2025年7月6日凌晨 写于深圳
-
----
-
-各位好,
-
-感谢大家的关心与祝福。我目前暂时安全,但公司应该在进行排查与某些名单收集,后续情况未知。
-
-我补充一些细节,以免某些人继续颠倒黑白。
-
-关于135B V2,小模型实验室在迅速地完成套壳并拿完所有套壳带来的好处后(比如任务令表彰和及时激励),因为不想继续支撑下游应用和模型迭代,又把这个烫手山芋甩给了四纵。确实技高一筹,直接把四纵的兄弟们拉下水。同事提供过去一个老旧的模型,最终拿回了一个当时一个魔改的先进的千问。做大模型的人,自己做的模型就像自己孩子一样熟悉,不要把别人都当傻子。就像自家儿子出门一趟,回来个别人家孩子。
-
-盘古report的署名是不符合学术规范的。例如,135B V3有不少有技术贡献的人,因为作者名额数量限制,劳动成果没有得到应有的回报,团队内曾经有不小的意见。这个模型当时是大家智慧和汗水的结晶,甚至是团队当时的精神支柱,支撑着不少兄弟们继续留在诺亚。所谓的名额限制,以及挂名了一些毫无技术贡献的人(如一些小模型实验室的人),让兄弟们何其心寒。
-
----
-
-暂时平安。另外,支持我勇于说出真相的战友们 https://github.com/HW-whistleblower/True-Story-of-Pangu/issues/317
diff --git a/examples/document_search.py b/examples/document_search.py
deleted file mode 100644
index fdb9167..0000000
--- a/examples/document_search.py
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/usr/bin/env python3
-"""
-Document search demo with recompute mode
-"""
-
-import shutil
-import time
-from pathlib import Path
-
-# Import backend packages to trigger plugin registration
-try:
- import leann_backend_diskann # noqa: F401
- import leann_backend_hnsw # noqa: F401
-
- print("INFO: Backend packages imported successfully.")
-except ImportError as e:
- print(f"WARNING: Could not import backend packages. Error: {e}")
-
-# Import upper-level API from leann-core
-from leann.api import LeannBuilder, LeannChat, LeannSearcher
-
-
-def load_sample_documents():
- """Create sample documents for demonstration"""
- docs = [
- {
- "title": "Intro to Python",
- "content": "Python is a high-level, interpreted language known for simplicity.",
- },
- {
- "title": "ML Basics",
- "content": "Machine learning builds systems that learn from data.",
- },
- {
- "title": "Data Structures",
- "content": "Data structures like arrays, lists, and graphs organize data.",
- },
- ]
- return docs
-
-
-def main():
- print("==========================================================")
- print("=== Leann Document Search Demo (DiskANN + Recompute) ===")
- print("==========================================================")
-
- INDEX_DIR = Path("./test_indices")
- INDEX_PATH = str(INDEX_DIR / "documents.diskann")
- BACKEND_TO_TEST = "diskann"
-
- if INDEX_DIR.exists():
- print(f"--- Cleaning up old index directory: {INDEX_DIR} ---")
- shutil.rmtree(INDEX_DIR)
-
- # --- 1. Build index ---
- print(f"\n[PHASE 1] Building index using '{BACKEND_TO_TEST}' backend...")
-
- builder = LeannBuilder(backend_name=BACKEND_TO_TEST, graph_degree=32, complexity=64)
-
- documents = load_sample_documents()
- print(f"Loaded {len(documents)} sample documents.")
- for doc in documents:
- builder.add_text(doc["content"], metadata={"title": doc["title"]})
-
- builder.build_index(INDEX_PATH)
- print("\nIndex built!")
-
- # --- 2. Basic search demo ---
- print(f"\n[PHASE 2] Basic search using '{BACKEND_TO_TEST}' backend...")
- searcher = LeannSearcher(index_path=INDEX_PATH)
-
- query = "What is machine learning?"
- print(f"\nQuery: '{query}'")
-
- print("\n--- Basic search mode (PQ computation) ---")
- start_time = time.time()
- results = searcher.search(query, top_k=2)
- basic_time = time.time() - start_time
-
- print(f"⏱️ Basic search time: {basic_time:.3f} seconds")
- print(">>> Basic search results <<<")
- for i, res in enumerate(results, 1):
- print(
- f" {i}. ID: {res.id}, Score: {res.score:.4f}, Text: '{res.text}', Metadata: {res.metadata}"
- )
-
- # --- 3. Recompute search demo ---
- print("\n[PHASE 3] Recompute search using embedding server...")
-
- print("\n--- Recompute search mode (get real embeddings via network) ---")
-
- # Configure recompute parameters
- recompute_params = {
- "recompute_beighbor_embeddings": True, # Enable network recomputation
- "USE_DEFERRED_FETCH": False, # Don't use deferred fetch
- "skip_search_reorder": True, # Skip search reordering
- "dedup_node_dis": True, # Enable node distance deduplication
- "prune_ratio": 0.1, # Pruning ratio 10%
- "batch_recompute": False, # Don't use batch recomputation
- "global_pruning": False, # Don't use global pruning
- "zmq_port": 5555, # ZMQ port
- "embedding_model": "sentence-transformers/all-mpnet-base-v2",
- }
-
- print("Recompute parameter configuration:")
- for key, value in recompute_params.items():
- print(f" {key}: {value}")
-
- print("\n🔄 Executing Recompute search...")
- try:
- start_time = time.time()
- recompute_results = searcher.search(query, top_k=2, **recompute_params)
- recompute_time = time.time() - start_time
-
- print(f"⏱️ Recompute search time: {recompute_time:.3f} seconds")
- print(">>> Recompute search results <<<")
- for i, res in enumerate(recompute_results, 1):
- print(
- f" {i}. ID: {res.id}, Score: {res.score:.4f}, Text: '{res.text}', Metadata: {res.metadata}"
- )
-
- # Compare results
- print("\n--- Result comparison ---")
- print(f"Basic search time: {basic_time:.3f} seconds")
- print(f"Recompute time: {recompute_time:.3f} seconds")
-
- print("\nBasic search vs Recompute results:")
- for i in range(min(len(results), len(recompute_results))):
- basic_score = results[i].score
- recompute_score = recompute_results[i].score
- score_diff = abs(basic_score - recompute_score)
- print(
- f" Position {i + 1}: PQ={basic_score:.4f}, Recompute={recompute_score:.4f}, Difference={score_diff:.4f}"
- )
-
- if recompute_time > basic_time:
- print("✅ Recompute mode working correctly (more accurate but slower)")
- else:
- print("i️ Recompute time is unusually fast, network recomputation may not be enabled")
-
- except Exception as e:
- print(f"❌ Recompute search failed: {e}")
- print("This usually indicates an embedding server connection issue")
-
- # --- 4. Chat demo ---
- print("\n[PHASE 4] Starting chat session...")
- chat = LeannChat(index_path=INDEX_PATH)
- chat_response = chat.ask(query)
- print(f"You: {query}")
- print(f"Leann: {chat_response}")
-
- print("\n==========================================================")
- print("✅ Demo finished successfully!")
- print("==========================================================")
-
-
-if __name__ == "__main__":
- main()
diff --git a/examples/mail_reader_llamaindex.py b/examples/mail_reader_llamaindex.py
deleted file mode 100644
index cfb6b82..0000000
--- a/examples/mail_reader_llamaindex.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import argparse
-import os
-import sys
-from pathlib import Path
-
-# Add the project root to Python path so we can import from examples
-project_root = Path(__file__).parent.parent
-sys.path.insert(0, str(project_root))
-
-import torch
-from llama_index.core import StorageContext, VectorStoreIndex
-from llama_index.core.node_parser import SentenceSplitter
-
-# --- EMBEDDING MODEL ---
-from llama_index.embeddings.huggingface import HuggingFaceEmbedding
-
-# --- END EMBEDDING MODEL ---
-# Import EmlxReader from the new module
-from examples.email_data.LEANN_email_reader import EmlxReader
-
-
-def create_and_save_index(
- mail_path: str,
- save_dir: str = "mail_index_embedded",
- max_count: int = 1000,
- include_html: bool = False,
-):
- print("Creating index from mail data with embedded metadata...")
- documents = EmlxReader(include_html=include_html).load_data(mail_path, max_count=max_count)
- if not documents:
- print("No documents loaded. Exiting.")
- return None
- text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=25)
- # Use facebook/contriever as the embedder
- embed_model = HuggingFaceEmbedding(model_name="facebook/contriever")
- # set on device
-
- if torch.cuda.is_available():
- embed_model._model.to("cuda")
- # set mps
- elif torch.backends.mps.is_available():
- embed_model._model.to("mps")
- else:
- embed_model._model.to("cpu")
- index = VectorStoreIndex.from_documents(
- documents, transformations=[text_splitter], embed_model=embed_model
- )
- os.makedirs(save_dir, exist_ok=True)
- index.storage_context.persist(persist_dir=save_dir)
- print(f"Index saved to {save_dir}")
- return index
-
-
-def load_index(save_dir: str = "mail_index_embedded"):
- try:
- storage_context = StorageContext.from_defaults(persist_dir=save_dir)
- index = VectorStoreIndex.from_vector_store(
- storage_context.vector_store, storage_context=storage_context
- )
- print(f"Index loaded from {save_dir}")
- return index
- except Exception as e:
- print(f"Error loading index: {e}")
- return None
-
-
-def query_index(index, query: str):
- if index is None:
- print("No index available for querying.")
- return
- query_engine = index.as_query_engine()
- response = query_engine.query(query)
- print(f"Query: {query}")
- print(f"Response: {response}")
-
-
-def main():
- # Parse command line arguments
- parser = argparse.ArgumentParser(
- description="LlamaIndex Mail Reader - Create and query email index"
- )
- parser.add_argument(
- "--mail-path",
- type=str,
- default="/Users/yichuan/Library/Mail/V10/0FCA0879-FD8C-4B7E-83BF-FDDA930791C5/[Gmail].mbox/All Mail.mbox/78BA5BE1-8819-4F9A-9613-EB63772F1DD0/Data/9/Messages",
- help="Path to mail data directory",
- )
- parser.add_argument(
- "--save-dir",
- type=str,
- default="mail_index_embedded",
- help="Directory to store the index (default: mail_index_embedded)",
- )
- parser.add_argument(
- "--max-emails",
- type=int,
- default=10000,
- help="Maximum number of emails to process",
- )
- parser.add_argument(
- "--include-html",
- action="store_true",
- default=False,
- help="Include HTML content in email processing (default: False)",
- )
-
- args = parser.parse_args()
-
- mail_path = args.mail_path
- save_dir = args.save_dir
-
- if os.path.exists(save_dir) and os.path.exists(os.path.join(save_dir, "vector_store.json")):
- print("Loading existing index...")
- index = load_index(save_dir)
- else:
- print("Creating new index...")
- index = create_and_save_index(
- mail_path,
- save_dir,
- max_count=args.max_emails,
- include_html=args.include_html,
- )
- if index:
- queries = [
- "Hows Berkeley Graduate Student Instructor",
- "how's the icloud related advertisement saying",
- "Whats the number of class recommend to take per semester for incoming EECS students",
- ]
- for query in queries:
- print("\n" + "=" * 50)
- query_index(index, query)
-
-
-if __name__ == "__main__":
- main()
diff --git a/test/build_mlx_index.py b/examples/mlx_demo.py
similarity index 100%
rename from test/build_mlx_index.py
rename to examples/mlx_demo.py
diff --git a/examples/multi_vector_aggregator.py b/examples/multi_vector_aggregator.py
deleted file mode 100644
index 3ec376a..0000000
--- a/examples/multi_vector_aggregator.py
+++ /dev/null
@@ -1,360 +0,0 @@
-#!/usr/bin/env python3
-"""
-Multi-Vector Aggregator for Fat Embeddings
-==========================================
-
-This module implements aggregation strategies for multi-vector embeddings,
-similar to ColPali's approach where multiple patch vectors represent a single document.
-
-Key features:
-- MaxSim aggregation (take maximum similarity across patches)
-- Voting-based aggregation (count patch matches)
-- Weighted aggregation (attention-score weighted)
-- Spatial clustering of matching patches
-- Document-level result consolidation
-"""
-
-from collections import defaultdict
-from dataclasses import dataclass
-from typing import Any
-
-import numpy as np
-
-
-@dataclass
-class PatchResult:
- """Represents a single patch search result."""
-
- patch_id: int
- image_name: str
- image_path: str
- coordinates: tuple[int, int, int, int] # (x1, y1, x2, y2)
- score: float
- attention_score: float
- scale: float
- metadata: dict[str, Any]
-
-
-@dataclass
-class AggregatedResult:
- """Represents an aggregated document-level result."""
-
- image_name: str
- image_path: str
- doc_score: float
- patch_count: int
- best_patch: PatchResult
- all_patches: list[PatchResult]
- aggregation_method: str
- spatial_clusters: list[list[PatchResult]] | None = None
-
-
-class MultiVectorAggregator:
- """
- Aggregates multiple patch-level results into document-level results.
- """
-
- def __init__(
- self,
- aggregation_method: str = "maxsim",
- spatial_clustering: bool = True,
- cluster_distance_threshold: float = 100.0,
- ):
- """
- Initialize the aggregator.
-
- Args:
- aggregation_method: "maxsim", "voting", "weighted", or "mean"
- spatial_clustering: Whether to cluster spatially close patches
- cluster_distance_threshold: Distance threshold for spatial clustering
- """
- self.aggregation_method = aggregation_method
- self.spatial_clustering = spatial_clustering
- self.cluster_distance_threshold = cluster_distance_threshold
-
- def aggregate_results(
- self, search_results: list[dict[str, Any]], top_k: int = 10
- ) -> list[AggregatedResult]:
- """
- Aggregate patch-level search results into document-level results.
-
- Args:
- search_results: List of search results from LeannSearcher
- top_k: Number of top documents to return
-
- Returns:
- List of aggregated document results
- """
- # Group results by image
- image_groups = defaultdict(list)
-
- for result in search_results:
- metadata = result.metadata
- if "image_name" in metadata and "patch_id" in metadata:
- patch_result = PatchResult(
- patch_id=metadata["patch_id"],
- image_name=metadata["image_name"],
- image_path=metadata["image_path"],
- coordinates=tuple(metadata["coordinates"]),
- score=result.score,
- attention_score=metadata.get("attention_score", 0.0),
- scale=metadata.get("scale", 1.0),
- metadata=metadata,
- )
- image_groups[metadata["image_name"]].append(patch_result)
-
- # Aggregate each image group
- aggregated_results = []
- for image_name, patches in image_groups.items():
- if len(patches) == 0:
- continue
-
- agg_result = self._aggregate_image_patches(image_name, patches)
- aggregated_results.append(agg_result)
-
- # Sort by aggregated score and return top-k
- aggregated_results.sort(key=lambda x: x.doc_score, reverse=True)
- return aggregated_results[:top_k]
-
- def _aggregate_image_patches(
- self, image_name: str, patches: list[PatchResult]
- ) -> AggregatedResult:
- """Aggregate patches for a single image."""
-
- if self.aggregation_method == "maxsim":
- doc_score = max(patch.score for patch in patches)
- best_patch = max(patches, key=lambda p: p.score)
-
- elif self.aggregation_method == "voting":
- # Count patches above threshold
- threshold = np.percentile([p.score for p in patches], 75)
- doc_score = sum(1 for patch in patches if patch.score >= threshold)
- best_patch = max(patches, key=lambda p: p.score)
-
- elif self.aggregation_method == "weighted":
- # Weight by attention scores
- total_weighted_score = sum(p.score * p.attention_score for p in patches)
- total_weights = sum(p.attention_score for p in patches)
- doc_score = total_weighted_score / max(total_weights, 1e-8)
- best_patch = max(patches, key=lambda p: p.score * p.attention_score)
-
- elif self.aggregation_method == "mean":
- doc_score = np.mean([patch.score for patch in patches])
- best_patch = max(patches, key=lambda p: p.score)
-
- else:
- raise ValueError(f"Unknown aggregation method: {self.aggregation_method}")
-
- # Spatial clustering if enabled
- spatial_clusters = None
- if self.spatial_clustering:
- spatial_clusters = self._cluster_patches_spatially(patches)
-
- return AggregatedResult(
- image_name=image_name,
- image_path=patches[0].image_path,
- doc_score=float(doc_score),
- patch_count=len(patches),
- best_patch=best_patch,
- all_patches=sorted(patches, key=lambda p: p.score, reverse=True),
- aggregation_method=self.aggregation_method,
- spatial_clusters=spatial_clusters,
- )
-
- def _cluster_patches_spatially(self, patches: list[PatchResult]) -> list[list[PatchResult]]:
- """Cluster patches that are spatially close to each other."""
- if len(patches) <= 1:
- return [patches]
-
- clusters = []
- remaining_patches = patches.copy()
-
- while remaining_patches:
- # Start new cluster with highest scoring remaining patch
- seed_patch = max(remaining_patches, key=lambda p: p.score)
- current_cluster = [seed_patch]
- remaining_patches.remove(seed_patch)
-
- # Add nearby patches to cluster
- added_to_cluster = True
- while added_to_cluster:
- added_to_cluster = False
- for patch in remaining_patches.copy():
- if self._is_patch_nearby(patch, current_cluster):
- current_cluster.append(patch)
- remaining_patches.remove(patch)
- added_to_cluster = True
-
- clusters.append(current_cluster)
-
- return sorted(clusters, key=lambda cluster: max(p.score for p in cluster), reverse=True)
-
- def _is_patch_nearby(self, patch: PatchResult, cluster: list[PatchResult]) -> bool:
- """Check if a patch is spatially close to any patch in the cluster."""
- patch_center = self._get_patch_center(patch.coordinates)
-
- for cluster_patch in cluster:
- cluster_center = self._get_patch_center(cluster_patch.coordinates)
- distance = np.sqrt(
- (patch_center[0] - cluster_center[0]) ** 2
- + (patch_center[1] - cluster_center[1]) ** 2
- )
-
- if distance <= self.cluster_distance_threshold:
- return True
-
- return False
-
- def _get_patch_center(self, coordinates: tuple[int, int, int, int]) -> tuple[float, float]:
- """Get center point of a patch."""
- x1, y1, x2, y2 = coordinates
- return ((x1 + x2) / 2, (y1 + y2) / 2)
-
- def print_aggregated_results(
- self, results: list[AggregatedResult], max_patches_per_doc: int = 3
- ):
- """Pretty print aggregated results."""
- print(f"\n🔍 Aggregated Results (method: {self.aggregation_method})")
- print("=" * 80)
-
- for i, result in enumerate(results):
- print(f"\n{i + 1}. {result.image_name}")
- print(f" Doc Score: {result.doc_score:.4f} | Patches: {result.patch_count}")
- print(f" Path: {result.image_path}")
-
- # Show best patch
- best = result.best_patch
- print(
- f" 🌟 Best Patch: #{best.patch_id} at {best.coordinates} (score: {best.score:.4f})"
- )
-
- # Show top patches
- print(" 📍 Top Patches:")
- for j, patch in enumerate(result.all_patches[:max_patches_per_doc]):
- print(
- f" {j + 1}. Patch #{patch.patch_id}: {patch.score:.4f} at {patch.coordinates}"
- )
-
- # Show spatial clusters if available
- if result.spatial_clusters and len(result.spatial_clusters) > 1:
- print(f" 🗂️ Spatial Clusters: {len(result.spatial_clusters)}")
- for j, cluster in enumerate(result.spatial_clusters[:2]): # Show top 2 clusters
- cluster_score = max(p.score for p in cluster)
- print(
- f" Cluster {j + 1}: {len(cluster)} patches (best: {cluster_score:.4f})"
- )
-
-
-def demo_aggregation():
- """Demonstrate the multi-vector aggregation functionality."""
- print("=== Multi-Vector Aggregation Demo ===")
-
- # Simulate some patch-level search results
- # In real usage, these would come from LeannSearcher.search()
-
- class MockResult:
- def __init__(self, score, metadata):
- self.score = score
- self.metadata = metadata
-
- # Simulate results for 2 images with multiple patches each
- mock_results = [
- # Image 1: cats_and_kitchen.jpg - 4 patches
- MockResult(
- 0.85,
- {
- "image_name": "cats_and_kitchen.jpg",
- "image_path": "/path/to/cats_and_kitchen.jpg",
- "patch_id": 3,
- "coordinates": [100, 50, 224, 174], # Kitchen area
- "attention_score": 0.92,
- "scale": 1.0,
- },
- ),
- MockResult(
- 0.78,
- {
- "image_name": "cats_and_kitchen.jpg",
- "image_path": "/path/to/cats_and_kitchen.jpg",
- "patch_id": 7,
- "coordinates": [200, 300, 324, 424], # Cat area
- "attention_score": 0.88,
- "scale": 1.0,
- },
- ),
- MockResult(
- 0.72,
- {
- "image_name": "cats_and_kitchen.jpg",
- "image_path": "/path/to/cats_and_kitchen.jpg",
- "patch_id": 12,
- "coordinates": [150, 100, 274, 224], # Appliances
- "attention_score": 0.75,
- "scale": 1.0,
- },
- ),
- MockResult(
- 0.65,
- {
- "image_name": "cats_and_kitchen.jpg",
- "image_path": "/path/to/cats_and_kitchen.jpg",
- "patch_id": 15,
- "coordinates": [50, 250, 174, 374], # Furniture
- "attention_score": 0.70,
- "scale": 1.0,
- },
- ),
- # Image 2: city_street.jpg - 3 patches
- MockResult(
- 0.68,
- {
- "image_name": "city_street.jpg",
- "image_path": "/path/to/city_street.jpg",
- "patch_id": 2,
- "coordinates": [300, 100, 424, 224], # Buildings
- "attention_score": 0.80,
- "scale": 1.0,
- },
- ),
- MockResult(
- 0.62,
- {
- "image_name": "city_street.jpg",
- "image_path": "/path/to/city_street.jpg",
- "patch_id": 8,
- "coordinates": [100, 350, 224, 474], # Street level
- "attention_score": 0.75,
- "scale": 1.0,
- },
- ),
- MockResult(
- 0.55,
- {
- "image_name": "city_street.jpg",
- "image_path": "/path/to/city_street.jpg",
- "patch_id": 11,
- "coordinates": [400, 200, 524, 324], # Sky area
- "attention_score": 0.60,
- "scale": 1.0,
- },
- ),
- ]
-
- # Test different aggregation methods
- methods = ["maxsim", "voting", "weighted", "mean"]
-
- for method in methods:
- print(f"\n{'=' * 20} {method.upper()} AGGREGATION {'=' * 20}")
-
- aggregator = MultiVectorAggregator(
- aggregation_method=method,
- spatial_clustering=True,
- cluster_distance_threshold=100.0,
- )
-
- aggregated = aggregator.aggregate_results(mock_results, top_k=5)
- aggregator.print_aggregated_results(aggregated)
-
-
-if __name__ == "__main__":
- demo_aggregation()
diff --git a/examples/openai_hnsw_example.py b/examples/openai_hnsw_example.py
deleted file mode 100644
index 9dcbbf8..0000000
--- a/examples/openai_hnsw_example.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env python3
-"""
-OpenAI Embedding Example
-
-Complete example showing how to build and search with OpenAI embeddings using HNSW backend.
-"""
-
-import os
-from pathlib import Path
-
-import dotenv
-from leann.api import LeannBuilder, LeannSearcher
-
-# Load environment variables
-dotenv.load_dotenv()
-
-
-def main():
- # Check if OpenAI API key is available
- api_key = os.getenv("OPENAI_API_KEY")
- if not api_key:
- print("ERROR: OPENAI_API_KEY environment variable not set")
- return False
-
- print(f"✅ OpenAI API key found: {api_key[:10]}...")
-
- # Sample texts
- sample_texts = [
- "Machine learning is a powerful technology that enables computers to learn from data.",
- "Natural language processing helps computers understand and generate human language.",
- "Deep learning uses neural networks with multiple layers to solve complex problems.",
- "Computer vision allows machines to interpret and understand visual information.",
- "Reinforcement learning trains agents to make decisions through trial and error.",
- "Data science combines statistics, math, and programming to extract insights from data.",
- "Artificial intelligence aims to create machines that can perform human-like tasks.",
- "Python is a popular programming language used extensively in data science and AI.",
- "Neural networks are inspired by the structure and function of the human brain.",
- "Big data refers to extremely large datasets that require special tools to process.",
- ]
-
- INDEX_DIR = Path("./simple_openai_test_index")
- INDEX_PATH = str(INDEX_DIR / "simple_test.leann")
-
- print("\n=== Building Index with OpenAI Embeddings ===")
- print(f"Index path: {INDEX_PATH}")
-
- try:
- # Use proper configuration for OpenAI embeddings
- builder = LeannBuilder(
- backend_name="hnsw",
- embedding_model="text-embedding-3-small",
- embedding_mode="openai",
- # HNSW settings for OpenAI embeddings
- M=16, # Smaller graph degree
- efConstruction=64, # Smaller construction complexity
- is_compact=True, # Enable compact storage for recompute
- is_recompute=True, # MUST enable for OpenAI embeddings
- num_threads=1,
- )
-
- print(f"Adding {len(sample_texts)} texts to the index...")
- for i, text in enumerate(sample_texts):
- metadata = {"id": f"doc_{i}", "topic": "AI"}
- builder.add_text(text, metadata)
-
- print("Building index...")
- builder.build_index(INDEX_PATH)
- print("✅ Index built successfully!")
-
- except Exception as e:
- print(f"❌ Error building index: {e}")
- import traceback
-
- traceback.print_exc()
- return False
-
- print("\n=== Testing Search ===")
-
- try:
- searcher = LeannSearcher(INDEX_PATH)
-
- test_queries = [
- "What is machine learning?",
- "How do neural networks work?",
- "Programming languages for data science",
- ]
-
- for query in test_queries:
- print(f"\n🔍 Query: '{query}'")
- results = searcher.search(query, top_k=3)
-
- print(f" Found {len(results)} results:")
- for i, result in enumerate(results):
- print(f" {i + 1}. Score: {result.score:.4f}")
- print(f" Text: {result.text[:80]}...")
-
- print("\n✅ Search test completed successfully!")
- return True
-
- except Exception as e:
- print(f"❌ Error during search: {e}")
- import traceback
-
- traceback.print_exc()
- return False
-
-
-if __name__ == "__main__":
- success = main()
- if success:
- print("\n🎉 Simple OpenAI index test completed successfully!")
- else:
- print("\n💥 Simple OpenAI index test failed!")
diff --git a/examples/resue_index.py b/examples/resue_index.py
deleted file mode 100644
index bec55ab..0000000
--- a/examples/resue_index.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import asyncio
-from pathlib import Path
-
-from leann.api import LeannChat
-
-INDEX_DIR = Path("./test_pdf_index_huawei")
-INDEX_PATH = str(INDEX_DIR / "pdf_documents.leann")
-
-
-async def main():
- print("\n[PHASE 2] Starting Leann chat session...")
- chat = LeannChat(index_path=INDEX_PATH)
- query = "What is the main idea of RL and give me 5 exapmle of classic RL algorithms?"
- query = "Based on the paper, what are the main techniques LEANN explores to reduce the storage overhead and DLPM explore to achieve Fairness and Efiiciency trade-off?"
- # query = "什么是盘古大模型以及盘古开发过程中遇到了什么阴暗面,任务令一般在什么城市颁发"
- response = chat.ask(
- query, top_k=20, recompute_beighbor_embeddings=True, complexity=32, beam_width=1
- )
- print(f"\n[PHASE 2] Response: {response}")
-
-
-if __name__ == "__main__":
- asyncio.run(main())
diff --git a/test/mail_reader_llamaindex.py b/test/mail_reader_llamaindex.py
deleted file mode 100644
index 33fa1e9..0000000
--- a/test/mail_reader_llamaindex.py
+++ /dev/null
@@ -1,161 +0,0 @@
-import email
-import os
-from typing import Any
-
-from llama_index.core import Document, VectorStoreIndex
-from llama_index.core.readers.base import BaseReader
-
-
-class EmlxReader(BaseReader):
- """
- Apple Mail .emlx file reader.
-
- Reads individual .emlx files from Apple Mail's storage format.
- """
-
- def __init__(self) -> None:
- """Initialize."""
- pass
-
- def load_data(self, input_dir: str, **load_kwargs: Any) -> list[Document]:
- """
- Load data from the input directory containing .emlx files.
-
- Args:
- input_dir: Directory containing .emlx files
- **load_kwargs:
- max_count (int): Maximum amount of messages to read.
- """
- docs: list[Document] = []
- max_count = load_kwargs.get("max_count", 1000)
- count = 0
-
- # Walk through the directory recursively
- for dirpath, dirnames, filenames in os.walk(input_dir):
- # Skip hidden directories
- dirnames[:] = [d for d in dirnames if not d.startswith(".")]
-
- for filename in filenames:
- if count >= max_count:
- break
-
- if filename.endswith(".emlx"):
- filepath = os.path.join(dirpath, filename)
- try:
- # Read the .emlx file
- with open(filepath, encoding="utf-8", errors="ignore") as f:
- content = f.read()
-
- # .emlx files have a length prefix followed by the email content
- # The first line contains the length, followed by the email
- lines = content.split("\n", 1)
- if len(lines) >= 2:
- email_content = lines[1]
-
- # Parse the email using Python's email module
- try:
- msg = email.message_from_string(email_content)
-
- # Extract email metadata
- subject = msg.get("Subject", "No Subject")
- from_addr = msg.get("From", "Unknown")
- to_addr = msg.get("To", "Unknown")
- date = msg.get("Date", "Unknown")
-
- # Extract email body
- body = ""
- if msg.is_multipart():
- for part in msg.walk():
- if (
- part.get_content_type() == "text/plain"
- or part.get_content_type() == "text/html"
- ):
- body += part.get_payload(decode=True).decode(
- "utf-8", errors="ignore"
- )
- # break
- else:
- body = msg.get_payload(decode=True).decode(
- "utf-8", errors="ignore"
- )
-
- # Create document content
- doc_content = f"""
-From: {from_addr}
-To: {to_addr}
-Subject: {subject}
-Date: {date}
-
-{body}
-"""
-
- # Create metadata
- metadata = {
- "file_path": filepath,
- "subject": subject,
- "from": from_addr,
- "to": to_addr,
- "date": date,
- "filename": filename,
- }
- if count == 0:
- print("--------------------------------")
- print("dir path", dirpath)
- print(metadata)
- print(doc_content)
- print("--------------------------------")
- body = []
- if msg.is_multipart():
- for part in msg.walk():
- print(
- "-------------------------------- get content type -------------------------------"
- )
- print(part.get_content_type())
- print(part)
- # body.append(part.get_payload(decode=True).decode('utf-8', errors='ignore'))
- print(
- "-------------------------------- get content type -------------------------------"
- )
- else:
- body = msg.get_payload(decode=True).decode(
- "utf-8", errors="ignore"
- )
- print(body)
-
- print(body)
- print("--------------------------------")
- doc = Document(text=doc_content, metadata=metadata)
- docs.append(doc)
- count += 1
-
- except Exception as e:
- print(f"!!!!!!! Error parsing email from {filepath}: {e} !!!!!!!!")
- continue
-
- except Exception as e:
- print(f"!!!!!!! Error reading file !!!!!!!! {filepath}: {e}")
- continue
-
- print(f"Loaded {len(docs)} email documents")
- return docs
-
-
-# Use the custom EmlxReader instead of MboxReader
-documents = EmlxReader().load_data(
- "/Users/yichuan/Library/Mail/V10/0FCA0879-FD8C-4B7E-83BF-FDDA930791C5/[Gmail].mbox/All Mail.mbox/78BA5BE1-8819-4F9A-9613-EB63772F1DD0/Data/9/Messages",
- max_count=1000,
-) # Returns list of documents
-
-# Configure the index with larger chunk size to handle long metadata
-from llama_index.core.node_parser import SentenceSplitter
-
-# Create a custom text splitter with larger chunk size
-text_splitter = SentenceSplitter(chunk_size=2048, chunk_overlap=200)
-
-index = VectorStoreIndex.from_documents(
- documents, transformations=[text_splitter]
-) # Initialize index with documents
-
-query_engine = index.as_query_engine()
-res = query_engine.query("Hows Berkeley Graduate Student Instructor")
-print(res)
diff --git a/test/mail_reader_save_load.py b/test/mail_reader_save_load.py
deleted file mode 100644
index e7fb39f..0000000
--- a/test/mail_reader_save_load.py
+++ /dev/null
@@ -1,219 +0,0 @@
-import email
-import os
-from typing import Any
-
-from llama_index.core import Document, StorageContext, VectorStoreIndex
-from llama_index.core.node_parser import SentenceSplitter
-from llama_index.core.readers.base import BaseReader
-
-
-class EmlxReader(BaseReader):
- """
- Apple Mail .emlx file reader.
-
- Reads individual .emlx files from Apple Mail's storage format.
- """
-
- def __init__(self) -> None:
- """Initialize."""
- pass
-
- def load_data(self, input_dir: str, **load_kwargs: Any) -> list[Document]:
- """
- Load data from the input directory containing .emlx files.
-
- Args:
- input_dir: Directory containing .emlx files
- **load_kwargs:
- max_count (int): Maximum amount of messages to read.
- """
- docs: list[Document] = []
- max_count = load_kwargs.get("max_count", 1000)
- count = 0
-
- # Walk through the directory recursively
- for dirpath, dirnames, filenames in os.walk(input_dir):
- # Skip hidden directories
- dirnames[:] = [d for d in dirnames if not d.startswith(".")]
-
- for filename in filenames:
- if count >= max_count:
- break
-
- if filename.endswith(".emlx"):
- filepath = os.path.join(dirpath, filename)
- try:
- # Read the .emlx file
- with open(filepath, encoding="utf-8", errors="ignore") as f:
- content = f.read()
-
- # .emlx files have a length prefix followed by the email content
- # The first line contains the length, followed by the email
- lines = content.split("\n", 1)
- if len(lines) >= 2:
- email_content = lines[1]
-
- # Parse the email using Python's email module
- try:
- msg = email.message_from_string(email_content)
-
- # Extract email metadata
- subject = msg.get("Subject", "No Subject")
- from_addr = msg.get("From", "Unknown")
- to_addr = msg.get("To", "Unknown")
- date = msg.get("Date", "Unknown")
-
- # Extract email body
- body = ""
- if msg.is_multipart():
- for part in msg.walk():
- if part.get_content_type() == "text/plain":
- body = part.get_payload(decode=True).decode(
- "utf-8", errors="ignore"
- )
- break
- else:
- body = msg.get_payload(decode=True).decode(
- "utf-8", errors="ignore"
- )
-
- # Create document content
- doc_content = f"""
-From: {from_addr}
-To: {to_addr}
-Subject: {subject}
-Date: {date}
-
-{body}
-"""
-
- # Create metadata
- metadata = {
- "file_path": filepath,
- "subject": subject,
- "from": from_addr,
- "to": to_addr,
- "date": date,
- "filename": filename,
- }
-
- doc = Document(text=doc_content, metadata=metadata)
- docs.append(doc)
- count += 1
-
- except Exception as e:
- print(f"Error parsing email from {filepath}: {e}")
- continue
-
- except Exception as e:
- print(f"Error reading file {filepath}: {e}")
- continue
-
- print(f"Loaded {len(docs)} email documents")
- return docs
-
-
-def create_and_save_index(mail_path: str, save_dir: str = "mail_index", max_count: int = 1000):
- """
- Create the index from mail data and save it to disk.
-
- Args:
- mail_path: Path to the mail directory
- save_dir: Directory to save the index
- max_count: Maximum number of emails to process
- """
- print("Creating index from mail data...")
-
- # Load documents
- documents = EmlxReader().load_data(mail_path, max_count=max_count)
-
- if not documents:
- print("No documents loaded. Exiting.")
- return None
-
- # Create text splitter
- text_splitter = SentenceSplitter(chunk_size=256, chunk_overlap=0)
-
- # Create index
- index = VectorStoreIndex.from_documents(documents, transformations=[text_splitter])
-
- # Save the index
- os.makedirs(save_dir, exist_ok=True)
- index.storage_context.persist(persist_dir=save_dir)
- print(f"Index saved to {save_dir}")
-
- return index
-
-
-def load_index(save_dir: str = "mail_index"):
- """
- Load the saved index from disk.
-
- Args:
- save_dir: Directory where the index is saved
-
- Returns:
- Loaded index or None if loading fails
- """
- try:
- # Load storage context
- storage_context = StorageContext.from_defaults(persist_dir=save_dir)
-
- # Load index
- index = VectorStoreIndex.from_vector_store(
- storage_context.vector_store, storage_context=storage_context
- )
-
- print(f"Index loaded from {save_dir}")
- return index
-
- except Exception as e:
- print(f"Error loading index: {e}")
- return None
-
-
-def query_index(index, query: str):
- """
- Query the loaded index.
-
- Args:
- index: The loaded index
- query: The query string
- """
- if index is None:
- print("No index available for querying.")
- return
-
- query_engine = index.as_query_engine()
- response = query_engine.query(query)
- print(f"Query: {query}")
- print(f"Response: {response}")
-
-
-def main():
- mail_path = "/Users/yichuan/Library/Mail/V10/0FCA0879-FD8C-4B7E-83BF-FDDA930791C5/[Gmail].mbox/All Mail.mbox/78BA5BE1-8819-4F9A-9613-EB63772F1DD0/Data/9/Messages"
- save_dir = "mail_index"
-
- # Check if index already exists
- if os.path.exists(save_dir) and os.path.exists(os.path.join(save_dir, "vector_store.json")):
- print("Loading existing index...")
- index = load_index(save_dir)
- else:
- print("Creating new index...")
- index = create_and_save_index(mail_path, save_dir, max_count=1000)
-
- if index:
- # Example queries
- queries = [
- "Hows Berkeley Graduate Student Instructor",
- "What emails mention GSR appointments?",
- "Find emails about deadlines",
- ]
-
- for query in queries:
- print("\n" + "=" * 50)
- query_index(index, query)
-
-
-if __name__ == "__main__":
- main()
diff --git a/test/mail_reader_small_chunks.py b/test/mail_reader_small_chunks.py
deleted file mode 100644
index 50bd452..0000000
--- a/test/mail_reader_small_chunks.py
+++ /dev/null
@@ -1,219 +0,0 @@
-import email
-import os
-from typing import Any
-
-from llama_index.core import Document, StorageContext, VectorStoreIndex
-from llama_index.core.node_parser import SentenceSplitter
-from llama_index.core.readers.base import BaseReader
-
-
-class EmlxReader(BaseReader):
- """
- Apple Mail .emlx file reader with reduced metadata.
-
- Reads individual .emlx files from Apple Mail's storage format.
- """
-
- def __init__(self) -> None:
- """Initialize."""
- pass
-
- def load_data(self, input_dir: str, **load_kwargs: Any) -> list[Document]:
- """
- Load data from the input directory containing .emlx files.
-
- Args:
- input_dir: Directory containing .emlx files
- **load_kwargs:
- max_count (int): Maximum amount of messages to read.
- """
- docs: list[Document] = []
- max_count = load_kwargs.get("max_count", 1000)
- count = 0
-
- # Walk through the directory recursively
- for dirpath, dirnames, filenames in os.walk(input_dir):
- # Skip hidden directories
- dirnames[:] = [d for d in dirnames if not d.startswith(".")]
-
- for filename in filenames:
- if count >= max_count:
- break
-
- if filename.endswith(".emlx"):
- filepath = os.path.join(dirpath, filename)
- try:
- # Read the .emlx file
- with open(filepath, encoding="utf-8", errors="ignore") as f:
- content = f.read()
-
- # .emlx files have a length prefix followed by the email content
- # The first line contains the length, followed by the email
- lines = content.split("\n", 1)
- if len(lines) >= 2:
- email_content = lines[1]
-
- # Parse the email using Python's email module
- try:
- msg = email.message_from_string(email_content)
-
- # Extract email metadata
- subject = msg.get("Subject", "No Subject")
- from_addr = msg.get("From", "Unknown")
- to_addr = msg.get("To", "Unknown")
- date = msg.get("Date", "Unknown")
-
- # Extract email body
- body = ""
- if msg.is_multipart():
- for part in msg.walk():
- if part.get_content_type() == "text/plain":
- body = part.get_payload(decode=True).decode(
- "utf-8", errors="ignore"
- )
- break
- else:
- body = msg.get_payload(decode=True).decode(
- "utf-8", errors="ignore"
- )
-
- # Create document content with metadata embedded in text
- doc_content = f"""
-From: {from_addr}
-To: {to_addr}
-Subject: {subject}
-Date: {date}
-
-{body}
-"""
-
- # Create minimal metadata (only essential info)
- metadata = {
- "subject": subject[:50], # Truncate subject
- "from": from_addr[:30], # Truncate from
- "date": date[:20], # Truncate date
- "filename": filename, # Keep filename
- }
-
- doc = Document(text=doc_content, metadata=metadata)
- docs.append(doc)
- count += 1
-
- except Exception as e:
- print(f"Error parsing email from {filepath}: {e}")
- continue
-
- except Exception as e:
- print(f"Error reading file {filepath}: {e}")
- continue
-
- print(f"Loaded {len(docs)} email documents")
- return docs
-
-
-def create_and_save_index(
- mail_path: str, save_dir: str = "mail_index_small", max_count: int = 1000
-):
- """
- Create the index from mail data and save it to disk.
-
- Args:
- mail_path: Path to the mail directory
- save_dir: Directory to save the index
- max_count: Maximum number of emails to process
- """
- print("Creating index from mail data with small chunks...")
-
- # Load documents
- documents = EmlxReader().load_data(mail_path, max_count=max_count)
-
- if not documents:
- print("No documents loaded. Exiting.")
- return None
-
- # Create text splitter with small chunk size
- text_splitter = SentenceSplitter(chunk_size=512, chunk_overlap=50)
-
- # Create index
- index = VectorStoreIndex.from_documents(documents, transformations=[text_splitter])
-
- # Save the index
- os.makedirs(save_dir, exist_ok=True)
- index.storage_context.persist(persist_dir=save_dir)
- print(f"Index saved to {save_dir}")
-
- return index
-
-
-def load_index(save_dir: str = "mail_index_small"):
- """
- Load the saved index from disk.
-
- Args:
- save_dir: Directory where the index is saved
-
- Returns:
- Loaded index or None if loading fails
- """
- try:
- # Load storage context
- storage_context = StorageContext.from_defaults(persist_dir=save_dir)
-
- # Load index
- index = VectorStoreIndex.from_vector_store(
- storage_context.vector_store, storage_context=storage_context
- )
-
- print(f"Index loaded from {save_dir}")
- return index
-
- except Exception as e:
- print(f"Error loading index: {e}")
- return None
-
-
-def query_index(index, query: str):
- """
- Query the loaded index.
-
- Args:
- index: The loaded index
- query: The query string
- """
- if index is None:
- print("No index available for querying.")
- return
-
- query_engine = index.as_query_engine()
- response = query_engine.query(query)
- print(f"Query: {query}")
- print(f"Response: {response}")
-
-
-def main():
- mail_path = "/Users/yichuan/Library/Mail/V10/0FCA0879-FD8C-4B7E-83BF-FDDA930791C5/[Gmail].mbox/All Mail.mbox/78BA5BE1-8819-4F9A-9613-EB63772F1DD0/Data/9/Messages"
- save_dir = "mail_index_small"
-
- # Check if index already exists
- if os.path.exists(save_dir) and os.path.exists(os.path.join(save_dir, "vector_store.json")):
- print("Loading existing index...")
- index = load_index(save_dir)
- else:
- print("Creating new index...")
- index = create_and_save_index(mail_path, save_dir, max_count=1000)
-
- if index:
- # Example queries
- queries = [
- "Hows Berkeley Graduate Student Instructor",
- "What emails mention GSR appointments?",
- "Find emails about deadlines",
- ]
-
- for query in queries:
- print("\n" + "=" * 50)
- query_index(index, query)
-
-
-if __name__ == "__main__":
- main()
diff --git a/test/mail_reader_test.py b/test/mail_reader_test.py
deleted file mode 100644
index f94070a..0000000
--- a/test/mail_reader_test.py
+++ /dev/null
@@ -1,154 +0,0 @@
-import email
-import os
-from typing import Any
-
-from llama_index.core import Document, VectorStoreIndex
-from llama_index.core.readers.base import BaseReader
-
-
-class EmlxReader(BaseReader):
- """
- Apple Mail .emlx file reader.
-
- Reads individual .emlx files from Apple Mail's storage format.
- """
-
- def __init__(self) -> None:
- """Initialize."""
- pass
-
- def load_data(self, input_dir: str, **load_kwargs: Any) -> list[Document]:
- """
- Load data from the input directory containing .emlx files.
-
- Args:
- input_dir: Directory containing .emlx files
- **load_kwargs:
- max_count (int): Maximum amount of messages to read.
- """
- docs: list[Document] = []
- max_count = load_kwargs.get("max_count", 1000)
- count = 0
-
- # Check if directory exists and is accessible
- if not os.path.exists(input_dir):
- print(f"Error: Directory '{input_dir}' does not exist")
- return docs
-
- if not os.access(input_dir, os.R_OK):
- print(f"Error: Directory '{input_dir}' is not accessible (permission denied)")
- print("This is likely due to macOS security restrictions on Mail app data")
- return docs
-
- print(f"Scanning directory: {input_dir}")
-
- # Walk through the directory recursively
- for dirpath, dirnames, filenames in os.walk(input_dir):
- # Skip hidden directories
- dirnames[:] = [d for d in dirnames if not d.startswith(".")]
-
- for filename in filenames:
- if count >= max_count:
- break
-
- if filename.endswith(".emlx"):
- filepath = os.path.join(dirpath, filename)
- print(f"Found .emlx file: {filepath}")
- try:
- # Read the .emlx file
- with open(filepath, encoding="utf-8", errors="ignore") as f:
- content = f.read()
-
- # .emlx files have a length prefix followed by the email content
- # The first line contains the length, followed by the email
- lines = content.split("\n", 1)
- if len(lines) >= 2:
- email_content = lines[1]
-
- # Parse the email using Python's email module
- try:
- msg = email.message_from_string(email_content)
-
- # Extract email metadata
- subject = msg.get("Subject", "No Subject")
- from_addr = msg.get("From", "Unknown")
- to_addr = msg.get("To", "Unknown")
- date = msg.get("Date", "Unknown")
-
- # Extract email body
- body = ""
- if msg.is_multipart():
- for part in msg.walk():
- if part.get_content_type() == "text/plain":
- body = part.get_payload(decode=True).decode(
- "utf-8", errors="ignore"
- )
- break
- else:
- body = msg.get_payload(decode=True).decode(
- "utf-8", errors="ignore"
- )
-
- # Create document content
- doc_content = f"""
-From: {from_addr}
-To: {to_addr}
-Subject: {subject}
-Date: {date}
-
-{body}
-"""
-
- # Create metadata
- metadata = {
- "file_path": filepath,
- "subject": subject,
- "from": from_addr,
- "to": to_addr,
- "date": date,
- "filename": filename,
- }
-
- doc = Document(text=doc_content, metadata=metadata)
- docs.append(doc)
- count += 1
-
- except Exception as e:
- print(f"Error parsing email from {filepath}: {e}")
- continue
-
- except Exception as e:
- print(f"Error reading file {filepath}: {e}")
- continue
-
- print(f"Loaded {len(docs)} email documents")
- return docs
-
-
-def main():
- # Use the current directory where the sample.emlx file is located
- current_dir = os.path.dirname(os.path.abspath(__file__))
-
- print("Testing EmlxReader with sample .emlx file...")
- print(f"Scanning directory: {current_dir}")
-
- # Use the custom EmlxReader
- documents = EmlxReader().load_data(current_dir, max_count=1000)
-
- if not documents:
- print("No documents loaded. Make sure sample.emlx exists in the examples directory.")
- return
-
- print(f"\nSuccessfully loaded {len(documents)} document(s)")
-
- # Initialize index with documents
- index = VectorStoreIndex.from_documents(documents)
- query_engine = index.as_query_engine()
-
- print("\nTesting query: 'Hows Berkeley Graduate Student Instructor'")
- res = query_engine.query("Hows Berkeley Graduate Student Instructor")
- print(f"Response: {res}")
-
-
-if __name__ == "__main__":
- main()
diff --git a/test/query_saved_index.py b/test/query_saved_index.py
deleted file mode 100644
index dfd3295..0000000
--- a/test/query_saved_index.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import os
-
-from llama_index.core import StorageContext, VectorStoreIndex
-
-
-def load_index(save_dir: str = "mail_index"):
- """
- Load the saved index from disk.
-
- Args:
- save_dir: Directory where the index is saved
-
- Returns:
- Loaded index or None if loading fails
- """
- try:
- # Load storage context
- storage_context = StorageContext.from_defaults(persist_dir=save_dir)
-
- # Load index
- index = VectorStoreIndex.from_vector_store(
- storage_context.vector_store, storage_context=storage_context
- )
-
- print(f"Index loaded from {save_dir}")
- return index
-
- except Exception as e:
- print(f"Error loading index: {e}")
- return None
-
-
-def query_index(index, query: str):
- """
- Query the loaded index.
-
- Args:
- index: The loaded index
- query: The query string
- """
- if index is None:
- print("No index available for querying.")
- return
-
- query_engine = index.as_query_engine()
- response = query_engine.query(query)
- print(f"\nQuery: {query}")
- print(f"Response: {response}")
-
-
-def main():
- save_dir = "mail_index"
-
- # Check if index exists
- if not os.path.exists(save_dir) or not os.path.exists(
- os.path.join(save_dir, "vector_store.json")
- ):
- print(f"Index not found in {save_dir}")
- print("Please run mail_reader_save_load.py first to create the index.")
- return
-
- # Load the index
- index = load_index(save_dir)
-
- if not index:
- print("Failed to load index.")
- return
-
- print("\n" + "=" * 60)
- print("Email Query Interface")
- print("=" * 60)
- print("Type 'quit' to exit")
- print("Type 'help' for example queries")
- print("=" * 60)
-
- # Interactive query loop
- while True:
- try:
- query = input("\nEnter your query: ").strip()
-
- if query.lower() == "quit":
- print("Goodbye!")
- break
- elif query.lower() == "help":
- print("\nExample queries:")
- print("- Hows Berkeley Graduate Student Instructor")
- print("- What emails mention GSR appointments?")
- print("- Find emails about deadlines")
- print("- Search for emails from specific sender")
- print("- Find emails about meetings")
- continue
- elif not query:
- continue
-
- query_index(index, query)
-
- except KeyboardInterrupt:
- print("\nGoodbye!")
- break
- except Exception as e:
- print(f"Error processing query: {e}")
-
-
-if __name__ == "__main__":
- main()
diff --git a/test/sanity_checks/debug_zmq_issue.py b/test/sanity_checks/debug_zmq_issue.py
deleted file mode 100644
index 9ce8917..0000000
--- a/test/sanity_checks/debug_zmq_issue.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/env python3
-"""
-Debug script to test ZMQ communication with embedding models.
-"""
-
-import sys
-import time
-
-import zmq
-
-sys.path.append("packages/leann-backend-diskann")
-from leann_backend_diskann import embedding_pb2
-
-
-def test_zmq_with_same_model():
- print("=== Testing ZMQ with embedding model ===")
-
- # Test with a common embedding model
- model_name = "sentence-transformers/all-mpnet-base-v2"
-
- # Start server with the same model
- import subprocess
-
- server_cmd = [
- sys.executable,
- "-m",
- "packages.leann-backend-diskann.leann_backend_diskann.embedding_server",
- "--zmq-port",
- "5556", # Use different port to avoid conflicts
- "--model-name",
- model_name,
- ]
-
- print(f"Starting server with command: {' '.join(server_cmd)}")
- server_process = subprocess.Popen(
- server_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
- )
-
- # Wait for server to start
- print("Waiting for server to start...")
- time.sleep(10)
-
- # Check if server is running
- if server_process.poll() is not None:
- stdout, stderr = server_process.communicate()
- print(f"Server failed to start. stdout: {stdout}")
- print(f"Server failed to start. stderr: {stderr}")
- return False
-
- print(f"Server started with PID: {server_process.pid}")
-
- try:
- # Test client
- context = zmq.Context()
- socket = context.socket(zmq.REQ)
- socket.connect("tcp://127.0.0.1:5556")
- socket.setsockopt(zmq.RCVTIMEO, 30000) # 30 second timeout like C++
- socket.setsockopt(zmq.SNDTIMEO, 30000)
-
- # Create request with same format as C++
- request = embedding_pb2.NodeEmbeddingRequest()
- request.node_ids.extend([0, 1, 2, 3, 4]) # Test with some node IDs
-
- print(f"Sending request with {len(request.node_ids)} node IDs...")
- start_time = time.time()
-
- # Send request
- socket.send(request.SerializeToString())
-
- # Receive response
- response_data = socket.recv()
- end_time = time.time()
-
- print(f"Received response in {end_time - start_time:.3f} seconds")
- print(f"Response size: {len(response_data)} bytes")
-
- # Parse response
- response = embedding_pb2.NodeEmbeddingResponse()
- response.ParseFromString(response_data)
-
- print(f"Response dimensions: {list(response.dimensions)}")
- print(f"Embeddings data size: {len(response.embeddings_data)} bytes")
- print(f"Missing IDs: {list(response.missing_ids)}")
-
- # Calculate expected size
- if len(response.dimensions) == 2:
- batch_size = response.dimensions[0]
- embedding_dim = response.dimensions[1]
- expected_bytes = batch_size * embedding_dim * 4 # 4 bytes per float
- print(f"Expected bytes: {expected_bytes}, Actual: {len(response.embeddings_data)}")
-
- if len(response.embeddings_data) == expected_bytes:
- print("✅ Response format is correct!")
- return True
- else:
- print("❌ Response format mismatch!")
- return False
- else:
- print("❌ Invalid response dimensions!")
- return False
-
- except Exception as e:
- print(f"❌ Error during ZMQ test: {e}")
- return False
- finally:
- # Clean up
- server_process.terminate()
- server_process.wait()
- print("Server terminated")
-
-
-if __name__ == "__main__":
- success = test_zmq_with_same_model()
- if success:
- print("\n✅ ZMQ communication test passed!")
- else:
- print("\n❌ ZMQ communication test failed!")
diff --git a/tests/test_ci_minimal.py b/tests/test_ci_minimal.py
index 072123c..b884cbe 100644
--- a/tests/test_ci_minimal.py
+++ b/tests/test_ci_minimal.py
@@ -20,7 +20,7 @@ def test_package_imports():
def test_cli_help():
"""Test that CLI example shows help."""
result = subprocess.run(
- [sys.executable, "examples/document_rag.py", "--help"], capture_output=True, text=True
+ [sys.executable, "apps/document_rag.py", "--help"], capture_output=True, text=True
)
assert result.returncode == 0
diff --git a/tests/test_document_rag.py b/tests/test_document_rag.py
index f9c793d..97c5700 100644
--- a/tests/test_document_rag.py
+++ b/tests/test_document_rag.py
@@ -14,7 +14,7 @@ import pytest
@pytest.fixture
def test_data_dir():
"""Return the path to test data directory."""
- return Path("examples/data")
+ return Path("data")
@pytest.mark.skipif(
@@ -27,7 +27,7 @@ def test_document_rag_simulated(test_data_dir):
index_dir = Path(temp_dir) / "test_index"
cmd = [
sys.executable,
- "examples/document_rag.py",
+ "apps/document_rag.py",
"--llm",
"simulated",
"--embedding-model",
@@ -65,7 +65,7 @@ def test_document_rag_openai(test_data_dir):
index_dir = Path(temp_dir) / "test_index_openai"
cmd = [
sys.executable,
- "examples/document_rag.py",
+ "apps/document_rag.py",
"--llm",
"simulated", # Use simulated LLM to avoid GPT-4 costs
"--embedding-model",
@@ -104,7 +104,7 @@ def test_document_rag_error_handling(test_data_dir):
with tempfile.TemporaryDirectory() as temp_dir:
cmd = [
sys.executable,
- "examples/document_rag.py",
+ "apps/document_rag.py",
"--llm",
"invalid_llm_type",
"--index-dir",
From c1ccc51a75bd2cfc3406ffc7ed152a1c672f694c Mon Sep 17 00:00:00 2001
From: Andy Lee
Date: Sun, 3 Aug 2025 22:39:49 -0700
Subject: [PATCH 2/4] refactor: reorganize examples and add link checker
---
.github/workflows/link-check.yml | 19 +++++++++++++++++++
.../leann-backend-diskann/third_party/DiskANN | 2 +-
pyproject.toml | 8 ++++++++
3 files changed, 28 insertions(+), 1 deletion(-)
create mode 100644 .github/workflows/link-check.yml
diff --git a/.github/workflows/link-check.yml b/.github/workflows/link-check.yml
new file mode 100644
index 0000000..4547782
--- /dev/null
+++ b/.github/workflows/link-check.yml
@@ -0,0 +1,19 @@
+name: Link Check
+
+on:
+ push:
+ branches: [ main, master ]
+ pull_request:
+ schedule:
+ - cron: "0 3 * * 1"
+
+jobs:
+ link-check:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: lycheeverse/lychee-action@v2
+ with:
+ args: --no-progress README.md docs/ apps/ examples/ benchmarks/
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/packages/leann-backend-diskann/third_party/DiskANN b/packages/leann-backend-diskann/third_party/DiskANN
index af2a264..67a2611 160000
--- a/packages/leann-backend-diskann/third_party/DiskANN
+++ b/packages/leann-backend-diskann/third_party/DiskANN
@@ -1 +1 @@
-Subproject commit af2a26481e65232b57b82d96e68833cdee9f7635
+Subproject commit 67a2611ad14bc11d84dfdb554c5567cfb78a2656
diff --git a/pyproject.toml b/pyproject.toml
index 906593e..d3b42e3 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -134,6 +134,14 @@ dev = [
"ruff>=0.12.4",
]
+[tool.lychee]
+accept = ["200", "403", "429", "503"]
+timeout = 20
+max_retries = 2
+exclude = ["localhost", "127.0.0.1", "example.com"]
+exclude_path = [".git/", ".venv/", "__pycache__/", "third_party/"]
+scheme = ["https", "http"]
+
[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py"]
From 7fd3db1ddbd21e9dd44f6dd68a2d044ca91fc7e7 Mon Sep 17 00:00:00 2001
From: Andy Lee
Date: Sun, 3 Aug 2025 22:41:20 -0700
Subject: [PATCH 3/4] fix: add init.py
---
apps/__init__.py | 0
1 file changed, 0 insertions(+), 0 deletions(-)
create mode 100644 apps/__init__.py
diff --git a/apps/__init__.py b/apps/__init__.py
new file mode 100644
index 0000000..e69de29
From e9562acdc2f7b28a9d73a11a2c9afd4b36f2f527 Mon Sep 17 00:00:00 2001
From: Andy Lee
Date: Sun, 3 Aug 2025 22:42:16 -0700
Subject: [PATCH 4/4] fix: handle certificate errors in link checker
---
.github/workflows/link-check.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/link-check.yml b/.github/workflows/link-check.yml
index 4547782..87a8077 100644
--- a/.github/workflows/link-check.yml
+++ b/.github/workflows/link-check.yml
@@ -14,6 +14,6 @@ jobs:
- uses: actions/checkout@v4
- uses: lycheeverse/lychee-action@v2
with:
- args: --no-progress README.md docs/ apps/ examples/ benchmarks/
+ args: --no-progress --insecure README.md docs/ apps/ examples/ benchmarks/
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}