fix: resolve ruff linting errors

- Remove unused variables in benchmark scripts
- Rename unused loop variables to follow convention
This commit is contained in:
Andy Lee
2025-08-22 13:53:25 -07:00
parent ed72232bab
commit 8dfd2f015c
3 changed files with 7 additions and 7 deletions

View File

@@ -322,7 +322,7 @@ class FinanceBenchEvaluator:
for query in test_queries:
start_time = time.time()
search_results = non_compact_searcher.search(
_ = non_compact_searcher.search(
query, top_k=3, complexity=complexity, recompute_embeddings=False
)
search_time = time.time() - start_time
@@ -334,7 +334,7 @@ class FinanceBenchEvaluator:
for query in test_queries:
start_time = time.time()
search_results = compact_searcher.search(
_ = compact_searcher.search(
query, top_k=3, complexity=complexity, recompute_embeddings=True
)
search_time = time.time() - start_time
@@ -802,7 +802,7 @@ def main():
print(
f" Non-compact index: {non_compact_size_metrics['total_with_embeddings']:.1f} MB"
)
size_increase = (
_ = (
(
non_compact_size_metrics["total_with_embeddings"]
- compact_size_metrics["total_with_embeddings"]

View File

@@ -259,7 +259,7 @@ class LAIONEvaluator:
for caption in test_queries:
start_time = time.time()
search_results = non_compact_searcher.search(
_ = non_compact_searcher.search(
caption, top_k=3, complexity=complexity, recompute_embeddings=False
)
search_time = time.time() - start_time
@@ -271,7 +271,7 @@ class LAIONEvaluator:
for caption in test_queries:
start_time = time.time()
search_results = compact_searcher.search(
_ = compact_searcher.search(
caption, top_k=3, complexity=complexity, recompute_embeddings=True
)
search_time = time.time() - start_time

View File

@@ -76,7 +76,7 @@ class LAIONSetup:
# Collect sample metadata first (fast)
print("📋 Collecting sample metadata...")
candidates = []
for i, sample in enumerate(dataset):
for sample in dataset:
if len(candidates) >= num_samples * 3: # Get 3x more candidates in case some fail
break
@@ -377,7 +377,7 @@ class LAIONSetup:
def _add_passages_with_embeddings(self, builder, passages_file: Path, embeddings: np.ndarray):
"""Helper to add passages with pre-computed CLIP embeddings"""
with open(passages_file, encoding="utf-8") as f:
for i, line in enumerate(tqdm(f, desc="Adding passages")):
for line in tqdm(f, desc="Adding passages"):
if line.strip():
passage = json.loads(line)