Skip to content

Commit

Permalink
fix model test name
Browse files Browse the repository at this point in the history
Signed-off-by: changwangss <[email protected]>
  • Loading branch information
changwangss committed May 31, 2024
1 parent 026bf95 commit 289be39
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 5 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/scripts/models/model_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,9 @@ working_dir=""
main() {
case ${tasks} in
"text-generation")
working_dir="/GenAIEval/GenAIEval/evaluation/lm_evaluation_harness/examples";;
working_dir="/GenAIEval/eval/evaluation/lm_evaluation_harness/examples";;
"code-generation")
working_dir="/GenAIEval/GenAIEval/evaluation/bigcode_evaluation_harness/examples";;
working_dir="/GenAIEval/eval/evaluation/bigcode_evaluation_harness/examples";;
*)
echo "Not suppotted task"; exit 1;;
esac
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/scripts/unittest/compare_coverage.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ coverage_PR_lines_rate=$5
coverage_base_lines_rate=$6
coverage_PR_branches_rate=$7
coverage_base_branches_rate=$8
module_name="GenAIEval"
module_name="eval"
[[ ! -f $coverage_pr_log ]] && exit 1
[[ ! -f $coverage_base_log ]] && exit 1
file_name="./coverage_compare"
Expand Down
2 changes: 1 addition & 1 deletion tests/test_bigcode_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

import unittest

from GenAIEval.evaluation.bigcode_evaluation_harness import BigcodeEvalParser, evaluate
from eval.evaluation.bigcode_evaluation_harness import BigcodeEvalParser, evaluate
from transformers import AutoModelForCausalLM, AutoTokenizer


Expand Down
2 changes: 1 addition & 1 deletion tests/test_lm_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

import unittest

from GenAIEval.evaluation.lm_evaluation_harness import LMEvalParser, evaluate
from eval.evaluation.lm_evaluation_harness import LMEvalParser, evaluate
from transformers import AutoModelForCausalLM, AutoTokenizer


Expand Down

0 comments on commit 289be39

Please sign in to comment.