chore: vscode ruff, and format
This commit is contained in:
@@ -614,7 +614,7 @@ class LeannSearcher:
|
||||
zmq_port=zmq_port,
|
||||
)
|
||||
# logger.info(f" Generated embedding shape: {query_embedding.shape}")
|
||||
time.time() - start_time
|
||||
# time.time() - start_time
|
||||
# logger.info(f" Embedding time: {embedding_time} seconds")
|
||||
|
||||
start_time = time.time()
|
||||
@@ -680,8 +680,9 @@ class LeannSearcher:
|
||||
This method should be called after you're done using the searcher,
|
||||
especially in test environments or batch processing scenarios.
|
||||
"""
|
||||
if hasattr(self.backend_impl, "embedding_server_manager"):
|
||||
self.backend_impl.embedding_server_manager.stop_server()
|
||||
backend = getattr(self.backend_impl, "embedding_server_manager", None)
|
||||
if backend is not None:
|
||||
backend.stop_server()
|
||||
|
||||
# Enable automatic cleanup patterns
|
||||
def __enter__(self):
|
||||
|
||||
@@ -707,20 +707,28 @@ class GeminiChat(LLMInterface):
|
||||
logger.info(f"Sending request to Gemini with model {self.model}")
|
||||
|
||||
try:
|
||||
# Set generation configuration
|
||||
generation_config = {
|
||||
"temperature": kwargs.get("temperature", 0.7),
|
||||
"max_output_tokens": kwargs.get("max_tokens", 1000),
|
||||
}
|
||||
from google.genai.types import GenerateContentConfig
|
||||
|
||||
generation_config = GenerateContentConfig(
|
||||
temperature=kwargs.get("temperature", 0.7),
|
||||
max_output_tokens=kwargs.get("max_tokens", 1000),
|
||||
)
|
||||
|
||||
# Handle top_p parameter
|
||||
if "top_p" in kwargs:
|
||||
generation_config["top_p"] = kwargs["top_p"]
|
||||
generation_config.top_p = kwargs["top_p"]
|
||||
|
||||
response = self.client.models.generate_content(
|
||||
model=self.model, contents=prompt, config=generation_config
|
||||
model=self.model,
|
||||
contents=prompt,
|
||||
config=generation_config,
|
||||
)
|
||||
return response.text.strip()
|
||||
# Handle potential None response text
|
||||
response_text = response.text
|
||||
if response_text is None:
|
||||
logger.warning("Gemini returned None response text")
|
||||
return ""
|
||||
return response_text.strip()
|
||||
except Exception as e:
|
||||
logger.error(f"Error communicating with Gemini: {e}")
|
||||
return f"Error: Could not get a response from Gemini. Details: {e}"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import argparse
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
from typing import Optional, Union
|
||||
|
||||
from llama_index.core import SimpleDirectoryReader
|
||||
from llama_index.core.node_parser import SentenceSplitter
|
||||
@@ -647,7 +647,7 @@ Examples:
|
||||
return False
|
||||
|
||||
def _delete_index_directory(
|
||||
self, index_dir: Path, index_name: str, project_path: Path | None = None
|
||||
self, index_dir: Path, index_name: str, project_path: Optional[Path] = None
|
||||
):
|
||||
"""Actually delete the index directory"""
|
||||
try:
|
||||
|
||||
@@ -5,7 +5,7 @@ import importlib.metadata
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Optional, Union
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from leann.interface import LeannBackendFactoryInterface
|
||||
@@ -47,7 +47,7 @@ def autodiscover_backends():
|
||||
# print("INFO: Backend auto-discovery finished.")
|
||||
|
||||
|
||||
def register_project_directory(project_dir: str | Path | None = None):
|
||||
def register_project_directory(project_dir: Optional[Union[str, Path]] = None):
|
||||
"""
|
||||
Register a project directory in the global LEANN registry.
|
||||
|
||||
|
||||
Reference in New Issue
Block a user