Compare commits
1 Commits
feature/ad
...
fix/52-inc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cd343e1d7a |
11
README.md
11
README.md
@@ -426,21 +426,21 @@ Once the index is built, you can ask questions like:
|
||||
**The future of code assistance is here.** Transform your development workflow with LEANN's native MCP integration for Claude Code. Index your entire codebase and get intelligent code assistance directly in your IDE.
|
||||
|
||||
**Key features:**
|
||||
- 🔍 **Semantic code search** across your entire project, fully local index and lightweight
|
||||
- 🔍 **Semantic code search** across your entire project
|
||||
- 📚 **Context-aware assistance** for debugging and development
|
||||
- 🚀 **Zero-config setup** with automatic language detection
|
||||
|
||||
```bash
|
||||
# Install LEANN globally for MCP integration
|
||||
uv tool install leann-core --with leann
|
||||
claude mcp add --scope user leann-server -- leann_mcp
|
||||
uv tool install leann-core
|
||||
|
||||
# Setup is automatic - just start using Claude Code!
|
||||
```
|
||||
Try our fully agentic pipeline with auto query rewriting, semantic search planning, and more:
|
||||
|
||||

|
||||
|
||||
**🔥 Ready to supercharge your coding?** [Complete Setup Guide →](packages/leann-mcp/README.md)
|
||||
**Ready to supercharge your coding?** [Complete Setup Guide →](packages/leann-mcp/README.md)
|
||||
|
||||
## 🖥️ Command Line Interface
|
||||
|
||||
@@ -457,8 +457,7 @@ leann --help
|
||||
**To make it globally available:**
|
||||
```bash
|
||||
# Install the LEANN CLI globally using uv tool
|
||||
uv tool install leann-core --with leann
|
||||
|
||||
uv tool install leann-core
|
||||
|
||||
# Now you can use leann from anywhere without activating venv
|
||||
leann --help
|
||||
|
||||
@@ -46,7 +46,6 @@ def compute_embeddings(
|
||||
- "sentence-transformers": Use sentence-transformers library (default)
|
||||
- "mlx": Use MLX backend for Apple Silicon
|
||||
- "openai": Use OpenAI embedding API
|
||||
- "gemini": Use Google Gemini embedding API
|
||||
use_server: Whether to use embedding server (True for search, False for build)
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -680,52 +680,6 @@ class HFChat(LLMInterface):
|
||||
return response.strip()
|
||||
|
||||
|
||||
class GeminiChat(LLMInterface):
|
||||
"""LLM interface for Google Gemini models."""
|
||||
|
||||
def __init__(self, model: str = "gemini-2.5-flash", api_key: Optional[str] = None):
|
||||
self.model = model
|
||||
self.api_key = api_key or os.getenv("GEMINI_API_KEY")
|
||||
|
||||
if not self.api_key:
|
||||
raise ValueError(
|
||||
"Gemini API key is required. Set GEMINI_API_KEY environment variable or pass api_key parameter."
|
||||
)
|
||||
|
||||
logger.info(f"Initializing Gemini Chat with model='{model}'")
|
||||
|
||||
try:
|
||||
import google.genai as genai
|
||||
|
||||
self.client = genai.Client(api_key=self.api_key)
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The 'google-genai' library is required for Gemini models. Please install it with 'uv pip install google-genai'."
|
||||
)
|
||||
|
||||
def ask(self, prompt: str, **kwargs) -> str:
|
||||
logger.info(f"Sending request to Gemini with model {self.model}")
|
||||
|
||||
try:
|
||||
# Set generation configuration
|
||||
generation_config = {
|
||||
"temperature": kwargs.get("temperature", 0.7),
|
||||
"max_output_tokens": kwargs.get("max_tokens", 1000),
|
||||
}
|
||||
|
||||
# Handle top_p parameter
|
||||
if "top_p" in kwargs:
|
||||
generation_config["top_p"] = kwargs["top_p"]
|
||||
|
||||
response = self.client.models.generate_content(
|
||||
model=self.model, contents=prompt, config=generation_config
|
||||
)
|
||||
return response.text.strip()
|
||||
except Exception as e:
|
||||
logger.error(f"Error communicating with Gemini: {e}")
|
||||
return f"Error: Could not get a response from Gemini. Details: {e}"
|
||||
|
||||
|
||||
class OpenAIChat(LLMInterface):
|
||||
"""LLM interface for OpenAI models."""
|
||||
|
||||
@@ -839,8 +793,6 @@ def get_llm(llm_config: Optional[dict[str, Any]] = None) -> LLMInterface:
|
||||
return HFChat(model_name=model or "deepseek-ai/deepseek-llm-7b-chat")
|
||||
elif llm_type == "openai":
|
||||
return OpenAIChat(model=model or "gpt-4o", api_key=llm_config.get("api_key"))
|
||||
elif llm_type == "gemini":
|
||||
return GeminiChat(model=model or "gemini-2.5-flash", api_key=llm_config.get("api_key"))
|
||||
elif llm_type == "simulated":
|
||||
return SimulatedChat()
|
||||
else:
|
||||
|
||||
@@ -148,6 +148,12 @@ Examples:
|
||||
type=str,
|
||||
help="Comma-separated list of file extensions to include (e.g., '.txt,.pdf,.pptx'). If not specified, uses default supported types.",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--include-hidden",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help="Include hidden files and directories (paths starting with '.') during indexing (default: false)",
|
||||
)
|
||||
build_parser.add_argument(
|
||||
"--doc-chunk-size",
|
||||
type=int,
|
||||
@@ -411,7 +417,10 @@ Examples:
|
||||
print(f" leann ask {example_name} --interactive")
|
||||
|
||||
def load_documents(
|
||||
self, docs_paths: Union[str, list], custom_file_types: Union[str, None] = None
|
||||
self,
|
||||
docs_paths: Union[str, list],
|
||||
custom_file_types: Union[str, None] = None,
|
||||
include_hidden: bool = False,
|
||||
):
|
||||
# Handle both single path (string) and multiple paths (list) for backward compatibility
|
||||
if isinstance(docs_paths, str):
|
||||
@@ -455,6 +464,10 @@ Examples:
|
||||
|
||||
all_documents = []
|
||||
|
||||
# Helper to detect hidden path components
|
||||
def _path_has_hidden_segment(p: Path) -> bool:
|
||||
return any(part.startswith(".") and part not in [".", ".."] for part in p.parts)
|
||||
|
||||
# First, process individual files if any
|
||||
if files:
|
||||
print(f"\n🔄 Processing {len(files)} individual file{'s' if len(files) > 1 else ''}...")
|
||||
@@ -467,8 +480,12 @@ Examples:
|
||||
|
||||
files_by_dir = defaultdict(list)
|
||||
for file_path in files:
|
||||
parent_dir = str(Path(file_path).parent)
|
||||
files_by_dir[parent_dir].append(file_path)
|
||||
file_path_obj = Path(file_path)
|
||||
if not include_hidden and _path_has_hidden_segment(file_path_obj):
|
||||
print(f" ⚠️ Skipping hidden file: {file_path}")
|
||||
continue
|
||||
parent_dir = str(file_path_obj.parent)
|
||||
files_by_dir[parent_dir].append(str(file_path_obj))
|
||||
|
||||
# Load files from each parent directory
|
||||
for parent_dir, file_list in files_by_dir.items():
|
||||
@@ -479,6 +496,7 @@ Examples:
|
||||
file_docs = SimpleDirectoryReader(
|
||||
parent_dir,
|
||||
input_files=file_list,
|
||||
# exclude_hidden only affects directory scans; input_files are explicit
|
||||
filename_as_id=True,
|
||||
).load_data()
|
||||
all_documents.extend(file_docs)
|
||||
@@ -577,6 +595,8 @@ Examples:
|
||||
# Check if file matches any exclude pattern
|
||||
try:
|
||||
relative_path = file_path.relative_to(docs_path)
|
||||
if not include_hidden and _path_has_hidden_segment(relative_path):
|
||||
continue
|
||||
if self._should_exclude_file(relative_path, gitignore_matches):
|
||||
continue
|
||||
except ValueError:
|
||||
@@ -604,6 +624,7 @@ Examples:
|
||||
try:
|
||||
default_docs = SimpleDirectoryReader(
|
||||
str(file_path.parent),
|
||||
exclude_hidden=not include_hidden,
|
||||
filename_as_id=True,
|
||||
required_exts=[file_path.suffix],
|
||||
).load_data()
|
||||
@@ -632,6 +653,7 @@ Examples:
|
||||
encoding="utf-8",
|
||||
required_exts=code_extensions,
|
||||
file_extractor={}, # Use default extractors
|
||||
exclude_hidden=not include_hidden,
|
||||
filename_as_id=True,
|
||||
).load_data(show_progress=True)
|
||||
|
||||
@@ -781,7 +803,9 @@ Examples:
|
||||
paragraph_separator="\n\n",
|
||||
)
|
||||
|
||||
all_texts = self.load_documents(docs_paths, args.file_types)
|
||||
all_texts = self.load_documents(
|
||||
docs_paths, args.file_types, include_hidden=args.include_hidden
|
||||
)
|
||||
if not all_texts:
|
||||
print("No documents found")
|
||||
return
|
||||
|
||||
@@ -57,8 +57,6 @@ def compute_embeddings(
|
||||
return compute_embeddings_mlx(texts, model_name)
|
||||
elif mode == "ollama":
|
||||
return compute_embeddings_ollama(texts, model_name, is_build=is_build)
|
||||
elif mode == "gemini":
|
||||
return compute_embeddings_gemini(texts, model_name, is_build=is_build)
|
||||
else:
|
||||
raise ValueError(f"Unsupported embedding mode: {mode}")
|
||||
|
||||
@@ -660,83 +658,3 @@ def compute_embeddings_ollama(
|
||||
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
||||
|
||||
return embeddings
|
||||
|
||||
|
||||
def compute_embeddings_gemini(
|
||||
texts: list[str], model_name: str = "text-embedding-004", is_build: bool = False
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Compute embeddings using Google Gemini API.
|
||||
|
||||
Args:
|
||||
texts: List of texts to compute embeddings for
|
||||
model_name: Gemini model name (default: "text-embedding-004")
|
||||
is_build: Whether this is a build operation (shows progress bar)
|
||||
|
||||
Returns:
|
||||
Embeddings array, shape: (len(texts), embedding_dim)
|
||||
"""
|
||||
try:
|
||||
import os
|
||||
|
||||
import google.genai as genai
|
||||
except ImportError as e:
|
||||
raise ImportError(f"Google GenAI package not installed: {e}")
|
||||
|
||||
api_key = os.getenv("GEMINI_API_KEY")
|
||||
if not api_key:
|
||||
raise RuntimeError("GEMINI_API_KEY environment variable not set")
|
||||
|
||||
# Cache Gemini client
|
||||
cache_key = "gemini_client"
|
||||
if cache_key in _model_cache:
|
||||
client = _model_cache[cache_key]
|
||||
else:
|
||||
client = genai.Client(api_key=api_key)
|
||||
_model_cache[cache_key] = client
|
||||
logger.info("Gemini client cached")
|
||||
|
||||
logger.info(
|
||||
f"Computing embeddings for {len(texts)} texts using Gemini API, model: '{model_name}'"
|
||||
)
|
||||
|
||||
# Gemini supports batch embedding
|
||||
max_batch_size = 100 # Conservative batch size for Gemini
|
||||
all_embeddings = []
|
||||
|
||||
try:
|
||||
from tqdm import tqdm
|
||||
|
||||
total_batches = (len(texts) + max_batch_size - 1) // max_batch_size
|
||||
batch_range = range(0, len(texts), max_batch_size)
|
||||
batch_iterator = tqdm(
|
||||
batch_range, desc="Computing embeddings", unit="batch", total=total_batches
|
||||
)
|
||||
except ImportError:
|
||||
# Fallback when tqdm is not available
|
||||
batch_iterator = range(0, len(texts), max_batch_size)
|
||||
|
||||
for i in batch_iterator:
|
||||
batch_texts = texts[i : i + max_batch_size]
|
||||
|
||||
try:
|
||||
# Use the embed_content method from the new Google GenAI SDK
|
||||
response = client.models.embed_content(
|
||||
model=model_name,
|
||||
contents=batch_texts,
|
||||
config=genai.types.EmbedContentConfig(
|
||||
task_type="RETRIEVAL_DOCUMENT" # For document embedding
|
||||
),
|
||||
)
|
||||
|
||||
# Extract embeddings from response
|
||||
for embedding_data in response.embeddings:
|
||||
all_embeddings.append(embedding_data.values)
|
||||
except Exception as e:
|
||||
logger.error(f"Batch {i} failed: {e}")
|
||||
raise
|
||||
|
||||
embeddings = np.array(all_embeddings, dtype=np.float32)
|
||||
logger.info(f"Generated {len(embeddings)} embeddings, dimension: {embeddings.shape[1]}")
|
||||
|
||||
return embeddings
|
||||
|
||||
@@ -92,7 +92,7 @@ leann build docs-and-configs --docs $(git ls-files "*.md" "*.yml" "*.yaml" "*.js
|
||||
```
|
||||
|
||||
|
||||
## **Try this in Claude Code:**
|
||||
**Try this in Claude Code:**
|
||||
```
|
||||
Help me understand this codebase. List available indexes and search for authentication patterns.
|
||||
```
|
||||
|
||||
Reference in New Issue
Block a user