-
Notifications
You must be signed in to change notification settings - Fork 301
Adds RULER benchmark #722
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Adds RULER benchmark #722
Changes from all commits
65275d5
0ef15e3
9cafd75
ed3d907
a4394ad
248bb67
a1aee68
775705c
461b8cb
57f2921
79e6a6e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -105,6 +105,8 @@ class VLLMModelConfig(ModelConfig): | |
max_num_batched_tokens: PositiveInt = 2048 # maximum number of tokens per batch | ||
subfolder: str | None = None | ||
is_async: bool = False # Whether to use the async version or sync version of the model | ||
use_dual_chunk_attention: bool = False | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what version of vllm are you using for this ? I get There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I was on 0.9.1 I think There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. (changed my env to same as you now) |
||
enforce_eager: bool = False | ||
|
||
|
||
class VLLMModel(LightevalModel): | ||
|
@@ -187,6 +189,8 @@ def _create_auto_model(self, config: VLLMModelConfig) -> Optional[LLM]: | |
"seed": int(config.seed), | ||
"max_num_seqs": int(config.max_num_seqs), | ||
"max_num_batched_tokens": int(config.max_num_batched_tokens), | ||
"enforce_eager": config.enforce_eager, | ||
"use_dual_chunk_attention": config.use_dual_chunk_attention, | ||
} | ||
|
||
if config.quantization is not None: | ||
|
@@ -276,7 +280,7 @@ def greedy_until( | |
if max_new_tokens is not None: | ||
if context_size + max_new_tokens > self.max_length: | ||
logger.warning( | ||
f"{context_size + max_new_tokens=} which is greater than {self.max_length=}. Truncating context to {self.max_length - max_new_tokens} tokens." | ||
f"{context_size + max_new_tokens=} which is greater than {self.max_length=}. Truncating context to {self.max_length=} - {max_new_tokens=} = {self.max_length - max_new_tokens} tokens." | ||
) | ||
context_size = self.max_length - max_new_tokens | ||
if context_size < 0: | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,69 @@ | ||
# MIT License | ||
|
||
# Copyright (c) 2024 The HuggingFace Team | ||
|
||
# Permission is hereby granted, free of charge, to any person obtaining a copy | ||
# of this software and associated documentation files (the "Software"), to deal | ||
# in the Software without restriction, including without limitation the rights | ||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
# copies of the Software, and to permit persons to whom the Software is | ||
# furnished to do so, subject to the following conditions: | ||
|
||
# The above copyright notice and this permission notice shall be included in all | ||
# copies or substantial portions of the Software. | ||
|
||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
# SOFTWARE. | ||
|
||
import lighteval.tasks.default_prompts as prompt | ||
from lighteval.metrics.metrics import Metrics | ||
from lighteval.tasks.lighteval_task import LightevalTaskConfig | ||
|
||
|
||
subsets = [ | ||
"niah_single_1", | ||
"niah_single_2", | ||
"niah_single_3", | ||
"niah_multikey_1", | ||
"niah_multikey_2", | ||
"niah_multikey_3", | ||
"niah_multiquery", | ||
"niah_multivalue", | ||
"vt", | ||
"cwe", | ||
"fwe", | ||
"qa_1", | ||
"qa_2", | ||
] | ||
|
||
lengths = [131072, 65536, 32768, 16384, 8192, 4096] | ||
|
||
task_configs = [] | ||
|
||
for subset in subsets: | ||
for length in lengths: | ||
task_configs.append( | ||
LightevalTaskConfig( | ||
name=f"ruler_{length}:{subset}", | ||
suite=["lighteval"], | ||
prompt_function=prompt.ruler, | ||
hf_repo=f"SaylorTwift/RULER-{length}-llama-3.2-tokenizer", | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. change the dataset here for other tokenizers |
||
hf_subset="default", | ||
hf_avail_splits=[subset], | ||
evaluation_splits=[subset], | ||
few_shots_split=None, | ||
few_shots_select=None, | ||
generation_size=128 if "niah" in subset else 30 if subset == "vt" else 120 if subset == "cwe" else 50, | ||
metric=[Metrics.ruler_match_any] if subset in ["qa_1", "qa_2"] else [Metrics.ruler_match_all], | ||
stop_sequence=None, | ||
trust_dataset=False, | ||
version=0, | ||
) | ||
) | ||
|
||
TASKS_TABLE = task_configs |
Uh oh!
There was an error while loading. Please reload this page.