From d9e49af7e1450dd3481d7406f5da635628a368ed Mon Sep 17 00:00:00 2001 From: Petr Baudis Date: Mon, 30 Dec 2024 00:55:04 +0100 Subject: [PATCH] Add support for --multiple_choice_generate abcd --- docs/interface.md | 2 ++ lm_eval/__main__.py | 8 ++++++-- lm_eval/api/task.py | 33 +++++++++++++++++++++++++-------- lm_eval/evaluator.py | 8 ++++---- 4 files changed, 37 insertions(+), 14 deletions(-) diff --git a/docs/interface.md b/docs/interface.md index cea1aab027..4fded81883 100644 --- a/docs/interface.md +++ b/docs/interface.md @@ -54,6 +54,8 @@ This mode supports a number of command-line arguments, the details of which can - `--fewshot_as_multiturn` : If this flag is on, the Fewshot examples are treated as a multi-turn conversation. Questions are provided as user content and answers are provided as assistant responses. Requires `--num_fewshot` to be set to be greater than 0, and `--apply_chat_template` to be on. +- `--multiple_choice_generate` : If True, multiple choice problems are not evaluated based on lowest logprob continuation, but asking the model to generate the choice letter. This departs from the traditional evaluation methodology, but allows evaluation with popular chat-completion APIs and evaluates each multiple choice problem only once rather than #choice times. Without additional argument, choices must be reproduced verbatim by the model; with additional argument 'abcd' (RECOMMENDED), choices will be lettered and the model has to produce only the corresponding letter. + - `--predict_only`: Generates the model outputs without computing metrics. Use with `--log_samples` to retrieve decoded results. * `--seed`: Set seed for python's random, numpy and torch. Accepts a comma-separated list of 3 values for python's random, numpy, and torch seeds, respectively, or a single integer to set the same seed for all three. The values are either an integer or 'None' to not set the seed. Default is `0,1234,1234` (for backward compatibility). E.g. `--seed 0,None,8` sets `random.seed(0)` and `torch.manual_seed(8)`. Here numpy's seed is not set since the second value is `None`. E.g, `--seed 42` sets all three seeds to 42. diff --git a/lm_eval/__main__.py b/lm_eval/__main__.py index 989cd680ba..381618b272 100644 --- a/lm_eval/__main__.py +++ b/lm_eval/__main__.py @@ -189,13 +189,17 @@ def setup_parser() -> argparse.ArgumentParser: ) parser.add_argument( "--multiple_choice_generate", - action="store_true", + type=str, + nargs="?", + const=True, default=False, help=( "If True, multiple choice problems are not evaluated based on lowest logprob continuation, " "but asking the model to generate the choice letter. This departs from the traditional evaluation " "methodology, but allows evaluation with popular chat-completion APIs and evaluates each multiple choice " - "problem only once rather than #choice times." + "problem only once rather than #choice times. Without additional argument, choices must be reproduced " + "verbatim by the model; with additional argument 'abcd', choices will be lettered and the model has to " + "produce only the corresponding letter." ), ) parser.add_argument( diff --git a/lm_eval/api/task.py b/lm_eval/api/task.py index 82c3d6cbcf..ae30908d21 100644 --- a/lm_eval/api/task.py +++ b/lm_eval/api/task.py @@ -81,6 +81,7 @@ class TaskConfig(dict): description: str = "" target_delimiter: str = " " choice_delimiter: str = " / " + option_delimiter: str = "\n" fewshot_delimiter: str = "\n\n" fewshot_config: Optional[dict] = None # runtime configuration options @@ -380,7 +381,7 @@ def build_all_requests( system_instruction: Optional[str] = None, apply_chat_template: bool = False, fewshot_as_multiturn: bool = False, - multiple_choice_generate: bool = False, + multiple_choice_generate: Union[bool, str] = False, chat_template: Optional[Callable] = None, tokenizer_name: str = "", ) -> None: @@ -438,10 +439,13 @@ def build_all_requests( ): # sample fewshot context #TODO: need to offset doc_id by rank now! doc_system_instruction = system_instruction or "" - if multiple_choice_generate: + if self.OUTPUT_TYPE == "multiple_choice" and multiple_choice_generate: if doc_system_instruction: doc_system_instruction += " " - doc_system_instruction += "Please answer with the letter of the correct answer." + if multiple_choice_generate == "abcd": + doc_system_instruction += "Please include \"ANSWER: \" in your response with the letter of the correct last answer." + else: + doc_system_instruction += "Please answer with the letter of the correct last answer." fewshot_ctx = self.fewshot_context( doc, @@ -1034,7 +1038,7 @@ def fewshot_context( system_instruction: Optional[str] = None, apply_chat_template: bool = False, fewshot_as_multiturn: bool = False, - multiple_choice_generate: bool = False, + multiple_choice_generate: Union[bool, str] = False, chat_template: Optional[Callable] = None, ) -> str: """Returns a fewshot context string that is made up of a prepended description @@ -1050,7 +1054,7 @@ def fewshot_context( Whether to apply the chat template to the fewshot context. :param fewshot_as_multiturn: bool Whether to provide the fewshot examples as a multiturn conversation or a single user turn. - :param multiple_choice_generate: bool + :param multiple_choice_generate: Union[bool, str] Whether to generate multiple choice answer from scratch rather than pick by logprobs. :param chat_template: callable (from lm.apply_chat_template) that takes in a list[Dict] chat transcript and renders it into a string. @@ -1101,8 +1105,13 @@ def fewshot_context( if self.config.doc_to_choice is not None and multiple_choice_generate: if not isinstance(example, str): raise NotImplementedError("--multiple_choice_generate is implemented only for simple text docs") - example += self.config.target_delimiter - example += "(" + self.config.choice_delimiter.join(self.doc_to_choice(doc)) + ")" + if multiple_choice_generate == "abcd": + choices = self.doc_to_choice(doc) + for label, choice in zip(list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")[:len(choices)], choices): + example += f"{self.config.option_delimiter}({label}) {choice}" + else: + example += self.config.target_delimiter + example += "(" + self.config.choice_delimiter.join(self.doc_to_choice(doc)) + ")" if apply_chat_template: if self.multiple_input: @@ -1319,7 +1328,7 @@ def doc_to_image(self, doc: Any, doc_to_image=None) -> Union[int, str, list]: return None def construct_requests( - self, doc: dict, ctx: str, multiple_choice_generate: bool, **kwargs + self, doc: dict, ctx: str, multiple_choice_generate: Union[bool, str], **kwargs ) -> Union[List[Instance], Instance]: apply_chat_template = kwargs.pop("apply_chat_template", False) @@ -1526,6 +1535,14 @@ def process_results(self, doc, results): # it assumes that doc_to_target returns a number. choices = self.doc_to_choice(doc) gold = choices[gold] + if self.multiple_choice_generate == "abcd": + try: + result_label = re.findall(r"ANSWER: ([A-Z])", result)[-1] + result_i = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ").index(result_label) + result = choices[result_i] + except (AttributeError, ValueError, IndexError): + eval_logger.warning(f"[{self}] LLM did not pick a valid result ('{result}')") + result = choices[0] # XXX guess "randomly" # we expect multiple_targets to be a list. elif self.multiple_target: gold = list(gold) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index d5e3792a81..378edc7376 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -66,7 +66,7 @@ def simple_evaluate( system_instruction: Optional[str] = None, apply_chat_template: Union[bool, str] = False, fewshot_as_multiturn: bool = False, - multiple_choice_generate: bool = False, + multiple_choice_generate: Union[bool, str] = False, gen_kwargs: Optional[str] = None, task_manager: Optional[TaskManager] = None, verbosity: str = "INFO", @@ -120,7 +120,7 @@ def simple_evaluate( Defaults to False (no chat template applied). :param fewshot_as_multiturn: bool Whether to provide the fewshot examples as a multiturn conversation or a single user turn. - :param multiple_choice_generate: bool + :param multiple_choice_generate: Union[bool, str] Whether to generate multiple choice answer from scratch rather than pick by logprobs. :param gen_kwargs: str String arguments for model generation @@ -376,7 +376,7 @@ def evaluate( system_instruction: Optional[str] = None, apply_chat_template: Union[bool, str] = False, fewshot_as_multiturn: bool = False, - multiple_choice_generate: bool = False, + multiple_choice_generate: Union[bool, str] = False, verbosity: str = "INFO", ): """Instantiate and evaluate a model on a list of tasks. @@ -402,7 +402,7 @@ def evaluate( Defaults to False (no chat template applied). :param fewshot_as_multiturn: bool Whether to provide the fewshot examples as a multiturn conversation or a single user turn. - :param multiple_choice_generate: bool + :param multiple_choice_generate: Union[bool, str] Whether to generate multiple choice answer from scratch rather than pick by logprobs. :return Dictionary of results