Skip to content

Commit

Permalink
Add torchbench benchmarks to targets.py (#1238)
Browse files Browse the repository at this point in the history
  • Loading branch information
riccardofelluga authored Oct 3, 2024
1 parent 3d7ffac commit c4c3ce3
Show file tree
Hide file tree
Showing 2 changed files with 124 additions and 0 deletions.
53 changes: 53 additions & 0 deletions thunder/benchmarks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import tempfile
import textwrap
import time
from collections import UserDict
from collections.abc import Callable
from collections.abc import Sequence
from dataclasses import dataclass
Expand Down Expand Up @@ -2940,6 +2941,58 @@ def fn(self) -> Callable:
return model


class TorchbenchBenchmark(Benchmark, metaclass=UserFacingBenchmarkMeta):
_args = (
BenchmarkArg(
name="module_name",
description="The torchbenchmark module name (str).",
),
BenchmarkArg(
name="device",
description="A device (str) to run on {'cpu' | 'cuda'}. Default is 'cuda'.",
),
BenchmarkArg(
name="requires_grad",
description="Whether the model parameters require grad. Default is True.",
),
)

@classmethod
@property
def name(cls) -> str:
return "torchbench"

@classmethod
@property
def description(cls) -> str:
return "Torchbench fixture"

@classmethod
@property
def args(cls) -> tuple[BenchmarkArg, ...]:
return cls._args

def __init__(self, module_name, device: str = "cuda", requires_grad: bool = True):
import importlib

module = importlib.import_module(f"torchbenchmark.models.{module_name}")
self.benchmark_cls = getattr(module, "Model", None)

benchmark = self.benchmark_cls(test="train" if requires_grad else "eval", device=device)

model, example = benchmark.get_module()
self.model = model
self.example_input = example

def make_batch(self) -> tuple[list, dict]:
if isinstance(self.example_input, (dict, UserDict)):
return [], self.example_input
return self.example_input, {}

def fn(self) -> Callable:
return self.model


# TODO Add descriptions to the executors when listed, and list them alphabetically
# TODO Allow querying benchmark for details
# TODO Allow specifying benchmark arguments
Expand Down
71 changes: 71 additions & 0 deletions thunder/benchmarks/targets.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import importlib
import warnings
import os
from collections.abc import Callable
from enum import auto, Enum
Expand Down Expand Up @@ -25,6 +27,7 @@
LitGPTGeluBenchmark,
NanoGPTLayerNormBenchmark,
ResNet50Benchmark,
TorchbenchBenchmark,
thunder_apex_executor,
thunder_apex_nvfuser_executor,
thunder_cudnn_executor,
Expand Down Expand Up @@ -812,3 +815,71 @@ def test_resnet50(benchmark, executor: Callable, compute_type: ComputeType):
fn = executor(b.fn())

benchmark_for_compute_type(compute_type, benchmark, fn, args, kwargs)


#
# Torchbench benchmarks
#
# To setup torchbenchmark please follow the instructions to
# install it as a libraty here https://github.com/pytorch/benchmark .
# To install canary models make sure to add `--canary` to `python install.py`.

torchbench_models = []
torchbench_canary_models = []
if importlib.util.find_spec("torchbenchmark"):
from torchbenchmark import _list_canary_model_paths, _list_model_paths

torchbench_models = [os.path.basename(x) for x in _list_model_paths()]
torchbench_canary_models = [os.path.basename(x) for x in _list_canary_model_paths()]


@pytest.mark.skipif(not torchbench_models, reason="requires torchbenchmark to be installed")
@pytest.mark.parametrize(
"module_name,",
torchbench_models,
ids=torchbench_models,
)
@pytest.mark.parametrize(
"executor,",
executors,
ids=executors_ids,
)
@parametrize_compute_type
def test_torchbench(benchmark, module_name, executor, compute_type: ComputeType):
if not importlib.util.find_spec("torchbenchmark.models." + module_name):
pytest.skip(f"model {module_name} not installed")

with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
b = TorchbenchBenchmark(module_name, device="cuda", requires_grad=is_requires_grad(compute_type))

args, kwargs = b.make_batch()
fn = executor(b.fn())

benchmark_for_compute_type(compute_type, benchmark, fn, args, kwargs)


@pytest.mark.skipif(not torchbench_canary_models, reason="requires torchbenchmark to be installed with flag --canary")
@pytest.mark.parametrize(
"module_name,",
torchbench_canary_models,
ids=torchbench_canary_models,
)
@pytest.mark.parametrize(
"executor,",
executors,
ids=executors_ids,
)
@parametrize_compute_type
def test_torchbench_canary(benchmark, module_name, executor, compute_type: ComputeType):
if not importlib.util.find_spec("torchbenchmark.models." + module_name):
pytest.skip(f"model {module_name} not installed")

with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
b = TorchbenchBenchmark(module_name, device="cuda", requires_grad=is_requires_grad(compute_type))

args, kwargs = b.make_batch()
fn = executor(b.fn())

benchmark_for_compute_type(compute_type, benchmark, fn, args, kwargs)

0 comments on commit c4c3ce3

Please sign in to comment.