Skip to content

Commit

Permalink
Merge branch 'main' into clem_add_gpqa
Browse files Browse the repository at this point in the history
  • Loading branch information
clefourrier authored Feb 22, 2024
2 parents 30c19a1 + 81fc8fd commit 810768c
Show file tree
Hide file tree
Showing 9 changed files with 48 additions and 27 deletions.
3 changes: 1 addition & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -190,8 +190,7 @@ These metrics need the model to generate an output. They are therefore slower.
- `bleu_4` (HELM): Average sample BLEU score [(Papineni et al., 2002)](https://aclanthology.org/P02-1040/) based on 4-gram overlap - uses the nltk implementation.
- `chrf` (Harness): Character n-gram matches f-score.
- `ter` (Harness): Translation edit/error rate.
- Bias, toxicity, copyright
- `bias` (HELM): Reports uneven association of test groups (race, gender, demographic) and target adjectives or professions, based on cooccurence statistics between the test terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)) and the target adjectives (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).
- Copyright
- `copyright` (HELM): Reports:
- `longest_common_prefix_length`: average length of longest common prefix between model generation and reference,
- `edit_distance`: average Levenshtein edit distance between model generation and reference,
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ classifiers = [
keywords = ["evaluation", "nlp", "llm"]
dependencies = [
# Base dependencies
"transformers>=4.36.0",
"transformers>=4.38.0",
"huggingface_hub==0.20.3",
"torch>=2.0",
"GitPython==3.1.31", # for logging
Expand Down
1 change: 1 addition & 0 deletions run_evals_accelerate.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def get_parser():
help="Whether to force multiple choice continuations to not start with a space",
)
parser.add_argument("--use_chat_template", default=False, action="store_true")
parser.add_argument("--system_prompt", type=str, default=None)
# Model type 2) TGI
task_type_group.add_argument("--inference_server_address", type=str)
parser.add_argument("--inference_server_auth", type=str, default=None)
Expand Down
5 changes: 3 additions & 2 deletions src/lighteval/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,9 @@ def make_results_table(result_dict):

values = []

for k, dic in result_dict["results"].items():
version = result_dict["versions"][k]
for k in sorted(result_dict["results"].keys()):
dic = result_dict["results"][k]
version = result_dict["versions"][k] if k in result_dict["versions"] else ""
for m, v in dic.items():
if m.endswith("_stderr"):
continue
Expand Down
13 changes: 11 additions & 2 deletions src/lighteval/few_shot_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,10 +163,12 @@ def get_examples_with_chat_template(
example: str,
instruction: str,
fewshot_ex: list[str],
system_prompt: str,
):
examples = []
if system_prompt is not None:
examples.append({"role": "system", "content": system_prompt})
for ex in fewshot_ex:
# many places to put these "\n" though
examples.append({"role": "user", "content": task.doc_to_text_without_instructions(ex)})
examples.append({"role": "assistant", "content": task.doc_to_target(ex)})
# We add the actual example
Expand Down Expand Up @@ -202,6 +204,7 @@ def fewshot_context(
max_model_length: Optional[int] = None,
tokenizer: Optional[AutoTokenizer] = None,
use_chat_template=False,
system_prompt: str = None,
):
"""Returns a fewshot context string that is made up of a prepended description
(if provided), the `num_fewshot` number of examples, and an appended prompt example.
Expand Down Expand Up @@ -230,7 +233,12 @@ def fewshot_context(

if use_chat_template:
output = self.get_examples_with_chat_template(
task=task, tokenizer=tokenizer, example=example, instruction=instruction, fewshot_ex=fewshot_ex
task=task,
tokenizer=tokenizer,
example=example,
instruction=instruction,
fewshot_ex=fewshot_ex,
system_prompt=system_prompt,
)
toks = tokenizer(output)["input_ids"]
else:
Expand All @@ -254,6 +262,7 @@ def fewshot_context(
example=example,
instruction=instruction,
fewshot_ex=fewshot_ex[:num_effective_fewshots],
system_prompt=system_prompt,
)
toks = tokenizer(output)["input_ids"]
else:
Expand Down
28 changes: 15 additions & 13 deletions src/lighteval/logging/info_loggers.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,18 +459,20 @@ def aggregate(self, task_dict: dict[str, LightevalTask], bootstrap_iters: int =
self.metric_aggregated[task_name][f"{metric_name}_stderr"] = float("nan")
hlog_warn(f"{task_name}, {metric_name} got an OVERFLOW ERROR when computing stderr.")

suite_average = {}
suite_nb = {}

for _, metrics in self.metric_aggregated.items():
for metric, value in metrics.items():
suite_average[metric] = suite_average.get(metric, 0) + value
suite_nb[metric] = suite_nb.get(metric, 0) + 1

for metric, value in suite_average.items():
suite_average[metric] = value / suite_nb[metric]

self.metric_aggregated["all"] = suite_average
# We group subtasks which belong to the same parent task, like MMLU, to compute an average on them
grouped_tasks = collections.defaultdict(list)
for k in self.metric_aggregated.keys():
if "|" in k:
suite, task, fewshot = k.split("|")
grouped_tasks[f"{suite}|{task.split(':')[0]}:_average|{fewshot}"].append(k)

for average_task, list_of_subtasks in grouped_tasks.items():
if len(list_of_subtasks) > 1:
metrics = list(self.metric_aggregated[list_of_subtasks[0]].keys())
self.metric_aggregated[average_task] = {
metric: sum([self.metric_aggregated[k][metric] for k in list_of_subtasks]) / len(list_of_subtasks)
for metric in metrics
}


class VersionsLogger:
Expand All @@ -485,7 +487,7 @@ class VersionsLogger:

# the versions dict will be a dict of task_name: task_version
# {"winogrande|winogrande_xl": 0}
versions: dict[str, int] = {"all": 0}
versions: dict[str, int] = {}

def log(self, task_name: str, task_version: int) -> None:
self.versions[task_name] = task_version
Expand Down
15 changes: 8 additions & 7 deletions src/lighteval/main_accelerate.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,13 +69,14 @@ def main(args):

hlog("Loading documents, and requests")
requests, docs = create_requests_from_tasks(
task_dict,
few_shots_dict,
args.num_fewshot_seeds,
model,
args.max_samples,
evaluation_tracker,
args.use_chat_template,
task_dict=task_dict,
fewshot_dict=few_shots_dict,
num_fewshot_seeds=args.num_fewshot_seeds,
lm=model,
max_samples=args.max_samples,
evaluation_tracker=evaluation_tracker,
use_chat_template=args.use_chat_template,
system_prompt=args.system_prompt,
)

with htrack_block("Setting seeds and waiting for all processes"):
Expand Down
1 change: 1 addition & 0 deletions src/lighteval/main_nanotron.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ def main(
max_samples=lighteval_config.tasks.max_samples,
evaluation_tracker=evaluation_tracker,
use_chat_template=False,
system_prompt=None,
)

with htrack_block("Setting seeds and waiting for all processes"):
Expand Down
7 changes: 7 additions & 0 deletions src/lighteval/tasks/lighteval_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -529,6 +529,7 @@ def create_requests_from_tasks( # noqa: C901
max_samples: int,
evaluation_tracker: "EvaluationTracker",
use_chat_template: bool,
system_prompt: str,
) -> Tuple[dict[RequestType, list[Request]], dict[TaskExampleId, Doc]]:
"""
Takes a task dict and a fewshot dict and returns a dict of requests, a dict
Expand Down Expand Up @@ -598,10 +599,16 @@ def create_requests_from_tasks( # noqa: C901
sampler=rnd,
tokenizer=lm.tokenizer,
use_chat_template=use_chat_template,
system_prompt=system_prompt,
)
doc.num_effective_few_shots = num_effective_few_shots
doc.num_asked_few_shots = num_fewshot
doc.ctx = ctx
if use_chat_template:
doc.choices = [
lm.tokenizer.apply_chat_template([{"role": "assistant", "content": choice}])
for choice in doc.choices
]

# Constructing the requests
docs[TaskExampleId(cur_task_name, doc_id_seed)] = doc
Expand Down

0 comments on commit 810768c

Please sign in to comment.