add colqwen stuff and pass ruff

This commit is contained in:
yichuan-w
2025-09-22 22:01:29 +00:00
parent 72455bb269
commit 94d9a203a2
7 changed files with 98815 additions and 99376 deletions

View File

@@ -4,7 +4,7 @@
# pip install tqdm
# pip install pillow
#%%
# %%
from pdf2image import convert_from_path
pdf_path = "pdfs/2004.12832v2.pdf"
@@ -13,9 +13,7 @@ images = convert_from_path(pdf_path)
for i, image in enumerate(images):
image.save(f"pages/page_{i + 1}.png", "PNG")
#%%
import numpy as np
import concurrent.futures
# %%
import os
from pathlib import Path
@@ -24,6 +22,7 @@ _repo_root = Path(__file__).resolve().parents[3]
_leann_core_src = _repo_root / "packages" / "leann-core" / "src"
_leann_hnsw_pkg = _repo_root / "packages" / "leann-backend-hnsw"
import sys
if str(_leann_core_src) not in sys.path:
sys.path.append(str(_leann_core_src))
if str(_leann_hnsw_pkg) not in sys.path:
@@ -34,26 +33,30 @@ from leann_multi_vector import LeannMultiVector
class LeannRetriever(LeannMultiVector):
pass
#%%
# %%
from typing import cast
import torch
from colpali_engine.models import ColPali
from colpali_engine.models.paligemma.colpali.processing_colpali import ColPaliProcessor
from colpali_engine.utils.processing_utils import BaseVisualRetrieverProcessor
from colpali_engine.utils.torch_utils import ListDataset, get_torch_device
from torch.utils.data import DataLoader
import torch
from typing import List, cast
# Auto-select device: CUDA > MPS (mac) > CPU
_device_str = (
"cuda" if torch.cuda.is_available() else (
"mps" if getattr(torch.backends, "mps", None) and torch.backends.mps.is_available() else "cpu"
"cuda"
if torch.cuda.is_available()
else (
"mps"
if getattr(torch.backends, "mps", None) and torch.backends.mps.is_available()
else "cpu"
)
)
device = get_torch_device(_device_str)
# Prefer fp16 on GPU/MPS, bfloat16 on CPU
_dtype = (
torch.float16 if _device_str in ("cuda", "mps") else torch.bfloat16
)
_dtype = torch.float16 if _device_str in ("cuda", "mps") else torch.bfloat16
model_name = "vidore/colpali-v1.2"
model = ColPali.from_pretrained(
@@ -77,21 +80,21 @@ dataloader = DataLoader(
collate_fn=lambda x: processor.process_queries(x),
)
qs: List[torch.Tensor] = []
qs: list[torch.Tensor] = []
for batch_query in dataloader:
with torch.no_grad():
batch_query = {k: v.to(model.device) for k, v in batch_query.items()}
embeddings_query = model(**batch_query)
qs.extend(list(torch.unbind(embeddings_query.to("cpu"))))
print(qs[0].shape)
#%%
# %%
from tqdm import tqdm
from PIL import Image
import os
import re
from PIL import Image
from tqdm import tqdm
page_filenames = sorted(os.listdir("./pages"), key=lambda n: int(re.search(r"\d+", n).group()))
images = [Image.open(os.path.join("./pages", name)) for name in page_filenames]
@@ -102,7 +105,7 @@ dataloader = DataLoader(
collate_fn=lambda x: processor.process_images(x),
)
ds: List[torch.Tensor] = []
ds: list[torch.Tensor] = []
for batch_doc in tqdm(dataloader):
with torch.no_grad():
batch_doc = {k: v.to(model.device) for k, v in batch_doc.items()}
@@ -111,7 +114,7 @@ for batch_doc in tqdm(dataloader):
print(ds[0].shape)
#%%
# %%
# Build HNSW index via LeannRetriever primitives and run search
index_path = "./indexes/colpali.leann"
retriever = LeannRetriever(index_path=index_path, dim=int(ds[0].shape[-1]))
@@ -129,6 +132,3 @@ for query in qs:
query_np = query.float().numpy()
result = retriever.search(query_np, topk=1)
print(filepaths[result[0][1]])