feat: add simulated LLM option to document_rag.py

- Add 'simulated' to the LLM choices in base_rag_example.py
- Handle simulated case in get_llm_config() method
- This allows tests to use --llm simulated to avoid API costs
This commit is contained in:
Andy Lee
2025-08-08 10:24:49 -07:00
parent 2d9c183ebb
commit 042da1fe09

View File

@@ -85,7 +85,7 @@ class BaseRAGExample(ABC):
"--llm", "--llm",
type=str, type=str,
default="openai", default="openai",
choices=["openai", "ollama", "hf"], choices=["openai", "ollama", "hf", "simulated"],
help="LLM backend to use (default: openai)", help="LLM backend to use (default: openai)",
) )
llm_group.add_argument( llm_group.add_argument(
@@ -178,6 +178,9 @@ class BaseRAGExample(ABC):
config["host"] = args.llm_host config["host"] = args.llm_host
elif args.llm == "hf": elif args.llm == "hf":
config["model"] = args.llm_model or "Qwen/Qwen2.5-1.5B-Instruct" config["model"] = args.llm_model or "Qwen/Qwen2.5-1.5B-Instruct"
elif args.llm == "simulated":
# Simulated LLM doesn't need additional configuration
pass
return config return config