Skip to content

Commit

Permalink
Added utility for saving results to specified folder (#14375)
Browse files Browse the repository at this point in the history
  • Loading branch information
ivholmlu authored Jun 26, 2024
1 parent 720e4b4 commit 6382477
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
import warnings
from collections import deque
from typing import Any, List, Optional
import os
from pathlib import Path

import pandas as pd
import tqdm
Expand Down Expand Up @@ -44,6 +46,7 @@ def __init__(
judge_llm: Optional[LLM] = None,
embed_model: Optional[BaseEmbedding] = None,
show_progress: bool = True,
result_path: Optional[str] = None,
):
self.query_engine = query_engine
self.rag_dataset = rag_dataset
Expand All @@ -64,6 +67,15 @@ def __init__(
}
self.eval_queue = deque(range(len(rag_dataset.examples)))
self.prediction_dataset = None
if result_path is None:
self.result_path = Path.cwd()
else:
self.result_path = Path(result_path)
if not self.result_path.is_absolute():
self.result_path = Path.cwd() / self.result_path

if not os.path.exists(self.result_path):
os.makedirs(self.result_path)

async def _amake_predictions(
self,
Expand Down Expand Up @@ -223,7 +235,9 @@ def _save_evaluations(self):
"relevancy": [e.dict() for e in self.evals["relevancy"]],
}

with open("_evaluations.json", "w") as json_file:
with open(
os.path.join(self.result_path, "_evaluations.json"), "w"
) as json_file:
json.dump(evaluations_objects, json_file)

def _prepare_and_save_benchmark_results(self):
Expand Down Expand Up @@ -263,7 +277,7 @@ def _prepare_and_save_benchmark_results(self):
mean_scores_df.index = mean_scores_df.index.set_names(["metrics"])

# save mean_scores_df
mean_scores_df.to_csv("benchmark.csv")
mean_scores_df.to_csv(os.path.join(self.result_path, "benchmark.csv"))
return mean_scores_df

def _make_evaluations(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ license = "MIT"
maintainers = ["nerdai"]
name = "llama-index-packs-rag-evaluator"
readme = "README.md"
version = "0.1.5"
version = "0.1.6"

[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
Expand Down

0 comments on commit 6382477

Please sign in to comment.