Compare commits
1 Commits
v0.3.0
...
fix/empty-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f1aca0f756 |
@@ -5,7 +5,7 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="https://img.shields.io/badge/Python-3.9%20%7C%203.10%20%7C%203.11%20%7C%203.12%20%7C%203.13-blue.svg" alt="Python Versions">
|
<img src="https://img.shields.io/badge/Python-3.9%20%7C%203.10%20%7C%203.11%20%7C%203.12%20%7C%203.13-blue.svg" alt="Python Versions">
|
||||||
<img src="https://github.com/yichuan-w/LEANN/actions/workflows/build-and-publish.yml/badge.svg" alt="CI Status">
|
<img src="https://github.com/yichuan-w/LEANN/actions/workflows/build-and-publish.yml/badge.svg" alt="CI Status">
|
||||||
<img src="https://img.shields.io/badge/Platform-Ubuntu%20%26%20Arch%20%26%20WSL%20%7C%20macOS%20(ARM64%2FIntel)-lightgrey" alt="Platform">
|
<img src="https://img.shields.io/badge/Platform-Ubuntu%20%7C%20macOS%20(ARM64%2FIntel)-lightgrey" alt="Platform">
|
||||||
<img src="https://img.shields.io/badge/License-MIT-green.svg" alt="MIT License">
|
<img src="https://img.shields.io/badge/License-MIT-green.svg" alt="MIT License">
|
||||||
<img src="https://img.shields.io/badge/MCP-Native%20Integration-blue" alt="MCP Integration">
|
<img src="https://img.shields.io/badge/MCP-Native%20Integration-blue" alt="MCP Integration">
|
||||||
</p>
|
</p>
|
||||||
@@ -94,9 +94,7 @@ CC=$(brew --prefix llvm)/bin/clang CXX=$(brew --prefix llvm)/bin/clang++ uv sync
|
|||||||
|
|
||||||
**Linux:**
|
**Linux:**
|
||||||
```bash
|
```bash
|
||||||
# Ubuntu/Debian (For Arch Linux: sudo pacman -S blas lapack openblas libaio boost protobuf abseil-cpp zeromq)
|
sudo apt-get install libomp-dev libboost-all-dev protobuf-compiler libabsl-dev libmkl-full-dev libaio-dev libzmq3-dev
|
||||||
sudo apt-get update && sudo apt-get install -y libomp-dev libboost-all-dev protobuf-compiler libabsl-dev libmkl-full-dev libaio-dev libzmq3-dev
|
|
||||||
|
|
||||||
uv sync
|
uv sync
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ build-backend = "scikit_build_core.build"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "leann-backend-diskann"
|
name = "leann-backend-diskann"
|
||||||
version = "0.3.0"
|
version = "0.2.9"
|
||||||
dependencies = ["leann-core==0.3.0", "numpy", "protobuf>=3.19.0"]
|
dependencies = ["leann-core==0.2.9", "numpy", "protobuf>=3.19.0"]
|
||||||
|
|
||||||
[tool.scikit-build]
|
[tool.scikit-build]
|
||||||
# Key: simplified CMake path
|
# Key: simplified CMake path
|
||||||
|
|||||||
@@ -6,10 +6,10 @@ build-backend = "scikit_build_core.build"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "leann-backend-hnsw"
|
name = "leann-backend-hnsw"
|
||||||
version = "0.3.0"
|
version = "0.2.9"
|
||||||
description = "Custom-built HNSW (Faiss) backend for the Leann toolkit."
|
description = "Custom-built HNSW (Faiss) backend for the Leann toolkit."
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"leann-core==0.3.0",
|
"leann-core==0.2.9",
|
||||||
"numpy",
|
"numpy",
|
||||||
"pyzmq>=23.0.0",
|
"pyzmq>=23.0.0",
|
||||||
"msgpack>=1.0.0",
|
"msgpack>=1.0.0",
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "leann-core"
|
name = "leann-core"
|
||||||
version = "0.3.0"
|
version = "0.2.9"
|
||||||
description = "Core API and plugin system for LEANN"
|
description = "Core API and plugin system for LEANN"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
|
|||||||
@@ -46,7 +46,6 @@ def compute_embeddings(
|
|||||||
- "sentence-transformers": Use sentence-transformers library (default)
|
- "sentence-transformers": Use sentence-transformers library (default)
|
||||||
- "mlx": Use MLX backend for Apple Silicon
|
- "mlx": Use MLX backend for Apple Silicon
|
||||||
- "openai": Use OpenAI embedding API
|
- "openai": Use OpenAI embedding API
|
||||||
- "gemini": Use Google Gemini embedding API
|
|
||||||
use_server: Whether to use embedding server (True for search, False for build)
|
use_server: Whether to use embedding server (True for search, False for build)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
|||||||
@@ -680,52 +680,6 @@ class HFChat(LLMInterface):
|
|||||||
return response.strip()
|
return response.strip()
|
||||||
|
|
||||||
|
|
||||||
class GeminiChat(LLMInterface):
|
|
||||||
"""LLM interface for Google Gemini models."""
|
|
||||||
|
|
||||||
def __init__(self, model: str = "gemini-2.5-flash", api_key: Optional[str] = None):
|
|
||||||
self.model = model
|
|
||||||
self.api_key = api_key or os.getenv("GEMINI_API_KEY")
|
|
||||||
|
|
||||||
if not self.api_key:
|
|
||||||
raise ValueError(
|
|
||||||
"Gemini API key is required. Set GEMINI_API_KEY environment variable or pass api_key parameter."
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(f"Initializing Gemini Chat with model='{model}'")
|
|
||||||
|
|
||||||
try:
|
|
||||||
import google.genai as genai
|
|
||||||
|
|
||||||
self.client = genai.Client(api_key=self.api_key)
|
|
||||||
except ImportError:
|
|
||||||
raise ImportError(
|
|
||||||
"The 'google-genai' library is required for Gemini models. Please install it with 'uv pip install google-genai'."
|
|
||||||
)
|
|
||||||
|
|
||||||
def ask(self, prompt: str, **kwargs) -> str:
|
|
||||||
logger.info(f"Sending request to Gemini with model {self.model}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Set generation configuration
|
|
||||||
generation_config = {
|
|
||||||
"temperature": kwargs.get("temperature", 0.7),
|
|
||||||
"max_output_tokens": kwargs.get("max_tokens", 1000),
|
|
||||||
}
|
|
||||||
|
|
||||||
# Handle top_p parameter
|
|
||||||
if "top_p" in kwargs:
|
|
||||||
generation_config["top_p"] = kwargs["top_p"]
|
|
||||||
|
|
||||||
response = self.client.models.generate_content(
|
|
||||||
model=self.model, contents=prompt, config=generation_config
|
|
||||||
)
|
|
||||||
return response.text.strip()
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error communicating with Gemini: {e}")
|
|
||||||
return f"Error: Could not get a response from Gemini. Details: {e}"
|
|
||||||
|
|
||||||
|
|
||||||
class OpenAIChat(LLMInterface):
|
class OpenAIChat(LLMInterface):
|
||||||
"""LLM interface for OpenAI models."""
|
"""LLM interface for OpenAI models."""
|
||||||
|
|
||||||
@@ -839,8 +793,6 @@ def get_llm(llm_config: Optional[dict[str, Any]] = None) -> LLMInterface:
|
|||||||
return HFChat(model_name=model or "deepseek-ai/deepseek-llm-7b-chat")
|
return HFChat(model_name=model or "deepseek-ai/deepseek-llm-7b-chat")
|
||||||
elif llm_type == "openai":
|
elif llm_type == "openai":
|
||||||
return OpenAIChat(model=model or "gpt-4o", api_key=llm_config.get("api_key"))
|
return OpenAIChat(model=model or "gpt-4o", api_key=llm_config.get("api_key"))
|
||||||
elif llm_type == "gemini":
|
|
||||||
return GeminiChat(model=model or "gemini-2.5-flash", api_key=llm_config.get("api_key"))
|
|
||||||
elif llm_type == "simulated":
|
elif llm_type == "simulated":
|
||||||
return SimulatedChat()
|
return SimulatedChat()
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -148,12 +148,6 @@ Examples:
|
|||||||
type=str,
|
type=str,
|
||||||
help="Comma-separated list of file extensions to include (e.g., '.txt,.pdf,.pptx'). If not specified, uses default supported types.",
|
help="Comma-separated list of file extensions to include (e.g., '.txt,.pdf,.pptx'). If not specified, uses default supported types.",
|
||||||
)
|
)
|
||||||
build_parser.add_argument(
|
|
||||||
"--include-hidden",
|
|
||||||
action=argparse.BooleanOptionalAction,
|
|
||||||
default=False,
|
|
||||||
help="Include hidden files and directories (paths starting with '.') during indexing (default: false)",
|
|
||||||
)
|
|
||||||
build_parser.add_argument(
|
build_parser.add_argument(
|
||||||
"--doc-chunk-size",
|
"--doc-chunk-size",
|
||||||
type=int,
|
type=int,
|
||||||
@@ -417,10 +411,7 @@ Examples:
|
|||||||
print(f" leann ask {example_name} --interactive")
|
print(f" leann ask {example_name} --interactive")
|
||||||
|
|
||||||
def load_documents(
|
def load_documents(
|
||||||
self,
|
self, docs_paths: Union[str, list], custom_file_types: Union[str, None] = None
|
||||||
docs_paths: Union[str, list],
|
|
||||||
custom_file_types: Union[str, None] = None,
|
|
||||||
include_hidden: bool = False,
|
|
||||||
):
|
):
|
||||||
# Handle both single path (string) and multiple paths (list) for backward compatibility
|
# Handle both single path (string) and multiple paths (list) for backward compatibility
|
||||||
if isinstance(docs_paths, str):
|
if isinstance(docs_paths, str):
|
||||||
@@ -464,10 +455,6 @@ Examples:
|
|||||||
|
|
||||||
all_documents = []
|
all_documents = []
|
||||||
|
|
||||||
# Helper to detect hidden path components
|
|
||||||
def _path_has_hidden_segment(p: Path) -> bool:
|
|
||||||
return any(part.startswith(".") and part not in [".", ".."] for part in p.parts)
|
|
||||||
|
|
||||||
# First, process individual files if any
|
# First, process individual files if any
|
||||||
if files:
|
if files:
|
||||||
print(f"\n🔄 Processing {len(files)} individual file{'s' if len(files) > 1 else ''}...")
|
print(f"\n🔄 Processing {len(files)} individual file{'s' if len(files) > 1 else ''}...")
|
||||||
@@ -480,12 +467,8 @@ Examples:
|
|||||||
|
|
||||||
files_by_dir = defaultdict(list)
|
files_by_dir = defaultdict(list)
|
||||||
for file_path in files:
|
for file_path in files:
|
||||||
file_path_obj = Path(file_path)
|
parent_dir = str(Path(file_path).parent)
|
||||||
if not include_hidden and _path_has_hidden_segment(file_path_obj):
|
files_by_dir[parent_dir].append(file_path)
|
||||||
print(f" ⚠️ Skipping hidden file: {file_path}")
|
|
||||||
continue
|
|
||||||
parent_dir = str(file_path_obj.parent)
|
|
||||||
files_by_dir[parent_dir].append(str(file_path_obj))
|
|
||||||
|
|
||||||
# Load files from each parent directory
|
# Load files from each parent directory
|
||||||
for parent_dir, file_list in files_by_dir.items():
|
for parent_dir, file_list in files_by_dir.items():
|
||||||
@@ -496,7 +479,6 @@ Examples:
|
|||||||
file_docs = SimpleDirectoryReader(
|
file_docs = SimpleDirectoryReader(
|
||||||
parent_dir,
|
parent_dir,
|
||||||
input_files=file_list,
|
input_files=file_list,
|
||||||
# exclude_hidden only affects directory scans; input_files are explicit
|
|
||||||
filename_as_id=True,
|
filename_as_id=True,
|
||||||
).load_data()
|
).load_data()
|
||||||
all_documents.extend(file_docs)
|
all_documents.extend(file_docs)
|
||||||
@@ -595,8 +577,6 @@ Examples:
|
|||||||
# Check if file matches any exclude pattern
|
# Check if file matches any exclude pattern
|
||||||
try:
|
try:
|
||||||
relative_path = file_path.relative_to(docs_path)
|
relative_path = file_path.relative_to(docs_path)
|
||||||
if not include_hidden and _path_has_hidden_segment(relative_path):
|
|
||||||
continue
|
|
||||||
if self._should_exclude_file(relative_path, gitignore_matches):
|
if self._should_exclude_file(relative_path, gitignore_matches):
|
||||||
continue
|
continue
|
||||||
except ValueError:
|
except ValueError:
|
||||||
@@ -624,7 +604,6 @@ Examples:
|
|||||||
try:
|
try:
|
||||||
default_docs = SimpleDirectoryReader(
|
default_docs = SimpleDirectoryReader(
|
||||||
str(file_path.parent),
|
str(file_path.parent),
|
||||||
exclude_hidden=not include_hidden,
|
|
||||||
filename_as_id=True,
|
filename_as_id=True,
|
||||||
required_exts=[file_path.suffix],
|
required_exts=[file_path.suffix],
|
||||||
).load_data()
|
).load_data()
|
||||||
@@ -653,7 +632,6 @@ Examples:
|
|||||||
encoding="utf-8",
|
encoding="utf-8",
|
||||||
required_exts=code_extensions,
|
required_exts=code_extensions,
|
||||||
file_extractor={}, # Use default extractors
|
file_extractor={}, # Use default extractors
|
||||||
exclude_hidden=not include_hidden,
|
|
||||||
filename_as_id=True,
|
filename_as_id=True,
|
||||||
).load_data(show_progress=True)
|
).load_data(show_progress=True)
|
||||||
|
|
||||||
@@ -803,9 +781,7 @@ Examples:
|
|||||||
paragraph_separator="\n\n",
|
paragraph_separator="\n\n",
|
||||||
)
|
)
|
||||||
|
|
||||||
all_texts = self.load_documents(
|
all_texts = self.load_documents(docs_paths, args.file_types)
|
||||||
docs_paths, args.file_types, include_hidden=args.include_hidden
|
|
||||||
)
|
|
||||||
if not all_texts:
|
if not all_texts:
|
||||||
print("No documents found")
|
print("No documents found")
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -57,8 +57,6 @@ def compute_embeddings(
|
|||||||
return compute_embeddings_mlx(texts, model_name)
|
return compute_embeddings_mlx(texts, model_name)
|
||||||
elif mode == "ollama":
|
elif mode == "ollama":
|
||||||
return compute_embeddings_ollama(texts, model_name, is_build=is_build)
|
return compute_embeddings_ollama(texts, model_name, is_build=is_build)
|
||||||
elif mode == "gemini":
|
|
||||||
return compute_embeddings_gemini(texts, model_name, is_build=is_build)
|
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unsupported embedding mode: {mode}")
|
raise ValueError(f"Unsupported embedding mode: {mode}")
|
||||||
|
|
||||||
@@ -670,83 +668,3 @@ def compute_embeddings_ollama(
|
|||||||
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
||||||
|
|
||||||
return embeddings
|
return embeddings
|
||||||
|
|
||||||
|
|
||||||
def compute_embeddings_gemini(
|
|
||||||
texts: list[str], model_name: str = "text-embedding-004", is_build: bool = False
|
|
||||||
) -> np.ndarray:
|
|
||||||
"""
|
|
||||||
Compute embeddings using Google Gemini API.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
texts: List of texts to compute embeddings for
|
|
||||||
model_name: Gemini model name (default: "text-embedding-004")
|
|
||||||
is_build: Whether this is a build operation (shows progress bar)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Embeddings array, shape: (len(texts), embedding_dim)
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
import os
|
|
||||||
|
|
||||||
import google.genai as genai
|
|
||||||
except ImportError as e:
|
|
||||||
raise ImportError(f"Google GenAI package not installed: {e}")
|
|
||||||
|
|
||||||
api_key = os.getenv("GEMINI_API_KEY")
|
|
||||||
if not api_key:
|
|
||||||
raise RuntimeError("GEMINI_API_KEY environment variable not set")
|
|
||||||
|
|
||||||
# Cache Gemini client
|
|
||||||
cache_key = "gemini_client"
|
|
||||||
if cache_key in _model_cache:
|
|
||||||
client = _model_cache[cache_key]
|
|
||||||
else:
|
|
||||||
client = genai.Client(api_key=api_key)
|
|
||||||
_model_cache[cache_key] = client
|
|
||||||
logger.info("Gemini client cached")
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
f"Computing embeddings for {len(texts)} texts using Gemini API, model: '{model_name}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Gemini supports batch embedding
|
|
||||||
max_batch_size = 100 # Conservative batch size for Gemini
|
|
||||||
all_embeddings = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
from tqdm import tqdm
|
|
||||||
|
|
||||||
total_batches = (len(texts) + max_batch_size - 1) // max_batch_size
|
|
||||||
batch_range = range(0, len(texts), max_batch_size)
|
|
||||||
batch_iterator = tqdm(
|
|
||||||
batch_range, desc="Computing embeddings", unit="batch", total=total_batches
|
|
||||||
)
|
|
||||||
except ImportError:
|
|
||||||
# Fallback when tqdm is not available
|
|
||||||
batch_iterator = range(0, len(texts), max_batch_size)
|
|
||||||
|
|
||||||
for i in batch_iterator:
|
|
||||||
batch_texts = texts[i : i + max_batch_size]
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Use the embed_content method from the new Google GenAI SDK
|
|
||||||
response = client.models.embed_content(
|
|
||||||
model=model_name,
|
|
||||||
contents=batch_texts,
|
|
||||||
config=genai.types.EmbedContentConfig(
|
|
||||||
task_type="RETRIEVAL_DOCUMENT" # For document embedding
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Extract embeddings from response
|
|
||||||
for embedding_data in response.embeddings:
|
|
||||||
all_embeddings.append(embedding_data.values)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Batch {i} failed: {e}")
|
|
||||||
raise
|
|
||||||
|
|
||||||
embeddings = np.array(all_embeddings, dtype=np.float32)
|
|
||||||
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
|
||||||
|
|
||||||
return embeddings
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "leann"
|
name = "leann"
|
||||||
version = "0.3.0"
|
version = "0.2.9"
|
||||||
description = "LEANN - The smallest vector index in the world. RAG Everything with LEANN!"
|
description = "LEANN - The smallest vector index in the world. RAG Everything with LEANN!"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
|
|||||||
Reference in New Issue
Block a user