From 042da1fe09b15c836c0958826c8e5c1183cbd5a7 Mon Sep 17 00:00:00 2001 From: Andy Lee Date: Fri, 8 Aug 2025 10:24:49 -0700 Subject: [PATCH] feat: add simulated LLM option to document_rag.py - Add 'simulated' to the LLM choices in base_rag_example.py - Handle simulated case in get_llm_config() method - This allows tests to use --llm simulated to avoid API costs --- apps/base_rag_example.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/apps/base_rag_example.py b/apps/base_rag_example.py index f5a481c..611432b 100644 --- a/apps/base_rag_example.py +++ b/apps/base_rag_example.py @@ -85,7 +85,7 @@ class BaseRAGExample(ABC): "--llm", type=str, default="openai", - choices=["openai", "ollama", "hf"], + choices=["openai", "ollama", "hf", "simulated"], help="LLM backend to use (default: openai)", ) llm_group.add_argument( @@ -178,6 +178,9 @@ class BaseRAGExample(ABC): config["host"] = args.llm_host elif args.llm == "hf": config["model"] = args.llm_model or "Qwen/Qwen2.5-1.5B-Instruct" + elif args.llm == "simulated": + # Simulated LLM doesn't need additional configuration + pass return config