Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Demo for visualize #141

Draft
wants to merge 13 commits into
base: main
Choose a base branch
from
Prev Previous commit
Next Next commit
Port Run Config Measurement (#91)
* Initial changes, basic unit tests passing

* Adding support for making the objective a telemetry metric

* Calculation logic + unit testing added

* Constraint logic in place. All unit tests passing

* Fix codeQL issues.

* Removing accidental negation
nv-braf committed Oct 1, 2024

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
commit cecdcc5a85856e487b117a8cd0c51057fd8a0870
5 changes: 2 additions & 3 deletions genai-perf/genai_perf/config/model_spec.py
Original file line number Diff line number Diff line change
@@ -16,7 +16,7 @@
from typing import Dict, List, Optional

# TODO: OPTIMIZE
# from genai_perf.result.model_constraints import ModelConstraints
from genai_perf.measurements.model_constraints import ModelConstraints


@dataclass
@@ -30,8 +30,7 @@ class ModelSpec:
model_name: str
cpu_only: bool = False
objectives: Optional[List] = None
# TODO: OPTIMIZE
# constraints: Optional[ModelConstraints]
constraints: Optional[ModelConstraints] = None
model_config_parameters: Optional[Dict] = None

# PA/GAP flags/parameters
32 changes: 15 additions & 17 deletions genai-perf/genai_perf/measurements/model_config_measurement.py
Original file line number Diff line number Diff line change
@@ -21,13 +21,13 @@

from genai_perf.record.record import Record

Records: TypeAlias = Dict[str, Record]
PerfRecords: TypeAlias = Dict[str, Record]
MetricObjectives: TypeAlias = Dict[str, float]


@dataclass(frozen=True)
class ModelConfigMeasurementDefaults:
METRIC_WEIGHTING = {"perf_throughput": 1.0}
METRIC_OBJECTIVE = {"perf_throughput": 1.0}

SELF_IS_BETTER = 1
OTHER_IS_BETTER = -1
@@ -42,7 +42,7 @@ class ModelConfigMeasurement:
Encapsulates the set of performance metrics (measurements) obtained when profiling a model
"""

def __init__(self, perf_metrics: Records):
def __init__(self, perf_metrics: PerfRecords):
"""
perf_metrics:
Metrics (stored in the Record class) that are associated with how the model
@@ -51,13 +51,13 @@ def __init__(self, perf_metrics: Records):

self._perf_metrics = perf_metrics

# Set a default metric weighting
self._metric_weights = ModelConfigMeasurementDefaults.METRIC_WEIGHTING
# Set a default metric objective
self._metric_objectives = ModelConfigMeasurementDefaults.METRIC_OBJECTIVE

###########################################################################
# Accessor Methods
###########################################################################
def get_perf_metrics(self) -> Records:
def get_perf_metrics(self) -> PerfRecords:
return self._perf_metrics

def get_perf_metric(self, name: str) -> Optional[Record]:
@@ -74,15 +74,15 @@ def get_weighted_score(self, other: "ModelConfigMeasurement") -> float:
"""
return self._calculate_weighted_score(other)

def set_metric_weighting(self, metric_objectives: MetricObjectives) -> None:
def set_metric_objectives(self, metric_objectives: MetricObjectives) -> None:
"""
Sets the metric weighting for this measurement based
Sets metric weighting for this measurement based
on the objectives
"""

# Each individual weighting is based on it's percentage of the total
# weighting. Example: {A: 1, B: 3} would be stored as {A: 0.25, B: 0.75}
self._metric_weights = {
self._metric_objectives = {
objective: (value / sum(metric_objectives.values()))
for objective, value in metric_objectives.items()
}
@@ -99,7 +99,7 @@ def write_to_checkpoint(self) -> Dict[str, Any]:

# Values based solely on user/config settings (that can vary from run to run)
# are not stored in the checkpoint
del mcm_dict["_metric_weights"]
del mcm_dict["_metric_objectives"]

return mcm_dict

@@ -118,8 +118,8 @@ def read_from_checkpoint(cls, mcm_dict: Dict[str, Any]) -> "ModelConfigMeasureme
@classmethod
def _read_perf_metrics_from_checkpoint(
cls, perf_metrics_dict: Dict[str, Any]
) -> Records:
perf_metrics: Records = {}
) -> PerfRecords:
perf_metrics: PerfRecords = {}

for [tag, record_dict] in perf_metrics_dict.values():
record = Record.get(tag)
@@ -164,9 +164,7 @@ def _compare_measurements(self, other: "ModelConfigMeasurement") -> int:

if weighted_score > ModelConfigMeasurementDefaults.COMPARISON_SCORE_THRESHOLD:
return ModelConfigMeasurementDefaults.SELF_IS_BETTER
elif (
weighted_score < -ModelConfigMeasurementDefaults.COMPARISON_SCORE_THRESHOLD
):
elif weighted_score < ModelConfigMeasurementDefaults.COMPARISON_SCORE_THRESHOLD:
return ModelConfigMeasurementDefaults.OTHER_IS_BETTER
else:
return ModelConfigMeasurementDefaults.EQUALIVILENT
@@ -184,7 +182,7 @@ def _calculate_weighted_score(self, other: "ModelConfigMeasurement") -> float:
"""

weighted_score = 0.0
for objective, weight in self._metric_weights.items():
for objective, weight in self._metric_objectives.items():
self_metric = self.get_perf_metric(objective)
other_metric = other.get_perf_metric(objective)

@@ -215,7 +213,7 @@ def calculate_weighted_percentage_gain(
"""

weighted_pct = 0.0
for objective, weight in self._metric_weights.items():
for objective, weight in self._metric_objectives.items():
self_metric = self.get_perf_metric(objective)
other_metric = other.get_perf_metric(objective)

40 changes: 40 additions & 0 deletions genai-perf/genai_perf/measurements/model_constraints.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from dataclasses import dataclass
from typing import Dict, Optional, Tuple, TypeAlias, Union

ConstraintName: TypeAlias = str
ConstraintValue: TypeAlias = Union[float, int]

Constraint: TypeAlias = Tuple[ConstraintName, ConstraintValue]
Constraints: TypeAlias = Dict[ConstraintName, ConstraintValue]


@dataclass
class ModelConstraints:
"""
A dataclass that specifies the constraints used for a single model
"""

constraints: Optional[Constraints] = None

def has_constraint(self, constraint_name: ConstraintName) -> bool:
"""
Checks if a given constraint is present
"""
if self.constraints and constraint_name in self.constraints:
return True
else:
return False
Loading