add email test code

This commit is contained in:
yichuan520030910320
2025-07-11 23:59:47 -07:00
parent eb6f504789
commit 74ffd7ec64
4 changed files with 21 additions and 6 deletions

2
.gitignore vendored
View File

@@ -9,6 +9,8 @@ demo/indices/
outputs/
*.pkl
*.pdf
*.idx
*.map
.history/
scripts/
lm_eval.egg-info/

View File

@@ -198,7 +198,16 @@ async def query_leann_index(index_path: str, query: str):
top_k=5,
recompute_beighbor_embeddings=True,
complexity=128,
beam_width=1
beam_width=1,
llm_config={
"type": "openai",
"model": "gpt-4o",
"api_key": os.getenv("OPENAI_API_KEY"),
},
llm_kwargs={
"temperature": 0.0,
"max_tokens": 1000
}
)
print(f"Leann: {chat_response}")
@@ -206,7 +215,7 @@ async def main():
# Default Chrome profile path
default_chrome_profile = os.path.expanduser("~/Library/Application Support/Google/Chrome/Default")
INDEX_DIR = Path("./chrome_history_index_leann")
INDEX_DIR = Path("./chrome_history_index_leann_test")
INDEX_PATH = str(INDEX_DIR / "chrome_history.leann")
# Find all Chrome profile directories

View File

@@ -39,7 +39,8 @@ def compute_embeddings(chunks: List[str], model_name: str) -> np.ndarray:
model = model.to("mps")
# Generate embeddings
embeddings = model.encode(chunks, convert_to_numpy=True, show_progress_bar=True, batch_size=64)
# give use an warning if OOM here means we need to turn down the batch size
embeddings = model.encode(chunks, convert_to_numpy=True, show_progress_bar=True, batch_size=256)
return embeddings

View File

@@ -209,10 +209,13 @@ def get_llm(llm_config: Optional[Dict[str, Any]] = None) -> LLMInterface:
An instance of an LLMInterface subclass.
"""
if llm_config is None:
logger.info("No LLM config provided, defaulting to simulated chat.")
return SimulatedChat()
llm_config = {
"type": "openai",
"model": "gpt-4o",
"api_key": os.getenv("OPENAI_API_KEY")
}
llm_type = llm_config.get("type", "simulated")
llm_type = llm_config.get("type", "openai")
model = llm_config.get("model")
logger.info(f"Attempting to create LLM of type='{llm_type}' with model='{model}'")