From 03ea4cc7cc17c18a233be15c55caba738d4e7625 Mon Sep 17 00:00:00 2001 From: lrq619 Date: Mon, 14 Aug 2023 12:33:32 +0800 Subject: [PATCH] add bert benchmark,support knative and cpu cluster,cannot run on CI Signed-off-by: lrq619 --- .github/dependabot.yml | 8 + .github/workflows/e2e-bert.yml | 134 ++++++ benchmarks/bert/Makefile | 116 +++++ benchmarks/bert/README.md | 22 + benchmarks/bert/docker/Dockerfile | 35 ++ benchmarks/bert/python/accuracy-squad.py | 449 ++++++++++++++++++ .../bert/python/config/bert_config.json | 13 + benchmarks/bert/python/config/user.conf | 6 + benchmarks/bert/python/create_squad_data.py | 413 ++++++++++++++++ benchmarks/bert/python/pytorch_SUT.py | 78 +++ benchmarks/bert/python/server.py | 181 +++++++ benchmarks/bert/python/squad_QSL.py | 97 ++++ benchmarks/bert/requirements.txt | 6 + .../yamls/docker-compose/dc-bert-python.yaml | 33 ++ .../bert/yamls/knative/kn-bert-python.yaml | 44 ++ 15 files changed, 1635 insertions(+) create mode 100644 .github/workflows/e2e-bert.yml create mode 100644 benchmarks/bert/Makefile create mode 100644 benchmarks/bert/README.md create mode 100644 benchmarks/bert/docker/Dockerfile create mode 100644 benchmarks/bert/python/accuracy-squad.py create mode 100644 benchmarks/bert/python/config/bert_config.json create mode 100644 benchmarks/bert/python/config/user.conf create mode 100644 benchmarks/bert/python/create_squad_data.py create mode 100644 benchmarks/bert/python/pytorch_SUT.py create mode 100644 benchmarks/bert/python/server.py create mode 100644 benchmarks/bert/python/squad_QSL.py create mode 100644 benchmarks/bert/requirements.txt create mode 100644 benchmarks/bert/yamls/docker-compose/dc-bert-python.yaml create mode 100644 benchmarks/bert/yamls/knative/kn-bert-python.yaml diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 7f0722de..a00e1b89 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -24,6 +24,14 @@ updates: - dependency-name: "*" update-types: [ "version-update:semver-patch" ] + - package-ecosystem: "gomod" + directory: "/benchmarks/bert" + schedule: + interval: "weekly" + ignore: + - dependency-name: "*" + update-types: [ "version-update:semver-patch" ] + - package-ecosystem: "gomod" directory: "/benchmarks/hotel-app" schedule: diff --git a/.github/workflows/e2e-bert.yml b/.github/workflows/e2e-bert.yml new file mode 100644 index 00000000..e735ad70 --- /dev/null +++ b/.github/workflows/e2e-bert.yml @@ -0,0 +1,134 @@ +name: Bert End-to-End Tests + +on: + schedule: + - cron: "0 9 * * 1" + workflow_dispatch: + push: + branches: [main] + paths: + - "benchmarks/bert/**" + - "utils/**" + - "tools/**" + - "runner/**" + + pull_request: + branches: [main] + paths: + - "benchmarks/bert/**" + - "utils/**" + - "tools/**" + - "runner/**" + +env: + GOOS: linux + GO111MODULE: on + PORT: 50051 + PLATFORMS: linux/amd64,linux/arm64 + +jobs: + build-and-push: + name: Build and push all images + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + service: + [ + bert-python + ] + + steps: + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + with: + lfs: "true" + + - uses: actions/setup-go@v4 + with: + go-version: '1.18' + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v2 + + - name: Install AWS CLI + uses: unfor19/install-aws-cli-action@master + with: + version: '2' + + - name: Set up Python version + uses: actions/setup-python@v4 + with: + python-version: "3.9" + + - name: Set up python dependencies + run: | + python3 -m pip install --upgrade pip + python3 -m pip install wheel ez_setup setuptools + GRPC_PYTHON_BUILD_SYSTEM_ZLIB=true + + - name: Setup go dependencies + working-directory: benchmarks/auth + env: + GOPRIVATE_KEY: ${{ secrets.XDT_REPO_ACCESS_KEY }} + run: | + go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.26 + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.1 + + # - name: Data Setup + # working-directory: benchmarks/bert + # run: make setup + + # - name: Build and push + # working-directory: benchmarks/bert + # run: make push-${{ matrix.service }} + # Downloading the model takes too much time, simply pull the image from docker container + + # - name: Pull the Image + # working-directory: benchmarks/bert + # run: make pull-${{ matrix.service }} + + test-compose: + name: Test Docker Compose + needs: build-and-push + env: + YAML_DIR: benchmarks/bert/yamls/docker-compose/ + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + service: + [ + bert-python + ] + + steps: + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + with: + lfs: "true" + + # - name: start docker-compose benchmark + # run: | + # docker-compose -f ${{ env.YAML_DIR }}/dc-${{ matrix.service }}.yaml pull + # docker-compose -f ${{ env.YAML_DIR }}/dc-${{ matrix.service }}.yaml up &> log_file & + # sleep 60s + # cat log_file + + # - name: invoke the chain + # run: | + # ./tools/bin/grpcurl -plaintext localhost:50000 helloworld.Greeter.SayHello + # Currently cannot run on CI due to no enough space on device + + # - name: show docker-compose log + # run: cat log_file diff --git a/benchmarks/bert/Makefile b/benchmarks/bert/Makefile new file mode 100644 index 00000000..4f3e5f38 --- /dev/null +++ b/benchmarks/bert/Makefile @@ -0,0 +1,116 @@ +DOCKER_HUB_ACCOUNT=vhiveease +SHELL := /bin/bash + +MAKEFILE_NAME := $(lastword $(MAKEFILE_LIST)) +UNAME := $(shell whoami) +UID := $(shell id -u `whoami`) +GROUPNAME := $(shell id -gn `whoami`) +GROUPID := $(shell id -g `whoami`) + +HOST_VOL ?= ${PWD} +CONTAINER_VOL ?= /workspace + +BUILD_DIR := build +DATA_DIR := $(BUILD_DIR)/data +BERT_DIR := $(DATA_DIR)/bert_tf_v1_1_large_fp32_384_v2 +RESULT_DIR := $(BUILD_DIR)/result +MLPERF_CONF := $(BUILD_DIR)/mlperf.conf +FEATURE_CACHE := eval_features.pickle + +ROOT = ../../ + +FUNCTIONS = bert-python +ALL_IMAGES = $(addsuffix -image, $(FUNCTIONS)) + +# Handle different nvidia-docker version +ifneq ($(wildcard /usr/bin/nvidia-docker),) + DOCKER_RUN_CMD := nvidia-docker run +else + DOCKER_RUN_CMD := docker run --gpus=all +endif + +all: all_image + +all_image: $(ALL_IMAGES) + +.PHONY: setup +setup: + @if [ ! -e $(BUILD_DIR) ]; then \ + mkdir $(BUILD_DIR); \ + fi + @$(MAKE) -f $(MAKEFILE_NAME) download_data + @$(MAKE) -f $(MAKEFILE_NAME) download_model + +.PHONY: download_data +download_data: + @if [ ! -e $(DATA_DIR) ]; then \ + mkdir $(DATA_DIR); \ + fi + @if [ ! -e $(DATA_DIR)/dev-v1.1.json ]; then \ + wget -O $(DATA_DIR)/dev-v1.1.json https://github.com/rajpurkar/SQuAD-explorer/blob/master/dataset/dev-v1.1.json?raw=true; \ + fi + @if [ ! -e $(DATA_DIR)/evaluate-v1.1.py ]; then \ + wget -O $(DATA_DIR)/evaluate-v1.1.py https://github.com/allenai/bi-att-flow/raw/master/squad/evaluate-v1.1.py; \ + fi + @if [ ! -e $(BERT_DIR) ]; then \ + mkdir $(BERT_DIR) ; \ + fi + @if [ ! -e $(RESULT_DIR) ]; then \ + mkdir $(RESULT_DIR); \ + fi + +.PHONY: download_model +download_model: + @if [ ! -e $(BERT_DIR)/model.ckpt-5474.data-00000-of-00001 ]; then \ + wget -O $(BERT_DIR)/model.ckpt-5474.data-00000-of-00001 https://zenodo.org/record/3733868/files/model.ckpt-5474.data-00000-of-00001?download=1; \ + fi + @if [ ! -e $(BERT_DIR)/model.ckpt-5474.index ]; then \ + wget -O $(BERT_DIR)/model.ckpt-5474.index https://zenodo.org/record/3733868/files/model.ckpt-5474.index?download=1; \ + fi + @if [ ! -e $(BERT_DIR)/model.ckpt-5474.meta ]; then \ + wget -O $(BERT_DIR)/model.ckpt-5474.meta https://zenodo.org/record/3733868/files/model.ckpt-5474.meta?download=1; \ + fi + @if [ ! -e $(BERT_DIR)/vocab.txt ]; then \ + wget -O $(BERT_DIR)/vocab.txt https://zenodo.org/record/3733868/files/vocab.txt?download=1; \ + fi + @if [ ! -e $(BERT_DIR)/model.pb ]; then \ + wget -O $(BERT_DIR)/model.pb https://zenodo.org/record/3939747/files/model.pb?download=1; \ + fi + @if [ ! -e $(BERT_DIR)/model.pytorch ]; then \ + wget -O $(BERT_DIR)/model.pytorch https://zenodo.org/record/3733896/files/model.pytorch?download=1; \ + fi + @if [ ! -e $(BERT_DIR)/vocab.txt ]; then \ + wget -O $(BERT_DIR)/vocab.txt https://zenodo.org/record/3733896/files/vocab.txt?download=1; \ + fi + + +# .PHONY: build_docker +# build_docker: +bert-python-image: docker/Dockerfile python/server.py + @docker pull nvcr.io/nvidia/tensorrtserver:19.08-py3 + DOCKER_BUILDKIT=1 docker buildx build \ + --build-arg GID=$(GROUPID) \ + --build-arg UID=$(UID) \ + --build-arg GROUP=$(GROUPNAME) \ + --build-arg USER=$(UNAME) \ + --build-arg BASE_IMAGE=mlperf-inference-bert \ + --tag $(DOCKER_HUB_ACCOUNT)/bert-python:latest \ + -f docker/Dockerfile \ + $(ROOT) --load + + + + +.PHONY: launch_docker +launch_docker: + $(DOCKER_RUN_CMD) --rm -it -w /workspace $(DOCKER_HUB_ACCOUNT)/bert-python + +.PHONY: clean +clean: + @rm -rf ${BUILD_DIR} + +push-%: %-image + docker push docker.io/$(DOCKER_HUB_ACCOUNT)/$(subst push-,,$@):latest + +pull-%: + docker pull docker.io/$(DOCKER_HUB_ACCOUNT)/$(subst pull-,,$@):latest \ No newline at end of file diff --git a/benchmarks/bert/README.md b/benchmarks/bert/README.md new file mode 100644 index 00000000..81ae11ce --- /dev/null +++ b/benchmarks/bert/README.md @@ -0,0 +1,22 @@ +# Bert Benchmark + +The `bert` benchmark is a large-language model that does inference tasks. + +The function currently is only implemented in one runtime, namely Python. + + +## Running this benchmark locally (using docker) + +The detailed and general description how to run benchmarks local you can find [here](../../docs/running_locally.md). The following steps show it on the `bert-python` function. +1. Download the data and model using `make setup`, this may take a lot of time +2. Build or pull the function images using `make all` or `make pull`. +### Invoke once +3. Start the function with docker-compose + ```bash + docker-compose -f ./yamls/docker-compose/dc-bert-python.yaml up + ``` +4. In a new terminal, invoke the interface function with grpcurl. + ```bash + ./tools/bin/grpcurl -plaintext localhost:50000 helloworld.Greeter.SayHello + ``` + This will outputs the min, max and mean inference time of 1 inference, this may take around a few seconds. You can change the default settings in `python/config/user.conf` diff --git a/benchmarks/bert/docker/Dockerfile b/benchmarks/bert/docker/Dockerfile new file mode 100644 index 00000000..43499103 --- /dev/null +++ b/benchmarks/bert/docker/Dockerfile @@ -0,0 +1,35 @@ +ARG FROM_IMAGE_NAME=nvcr.io/nvidia/tensorflow:19.10-py3 +FROM ${FROM_IMAGE_NAME} + +RUN apt-get update && apt-get install -y pbzip2 pv bzip2 libcurl4 curl + +WORKDIR /workspace + +# Install third_party library +RUN mkdir /tmp/third_party \ + && cd /tmp/third_party \ + && git clone https://github.com/pybind/pybind11.git \ + && mv pybind11 pybind \ + && cd /tmp/third_party/pybind \ + && git reset --hard 25abf7efba + +# Install LoadGen +RUN cd /tmp/ \ + && git clone https://github.com/lrq619/loadgen.git \ + && cd /tmp/loadgen \ + && python3 setup.py install \ + && cd /tmp \ + && rm -rf /tmp/loadgen \ + && rm -rf /tmp/third_party + +COPY benchmarks/bert/requirements.txt /workspace/ +RUN python3 -m pip install -r requirements.txt + +COPY benchmarks/bert/build /workspace/build +COPY benchmarks/bert/python /workspace/python +RUN mv /workspace/python/config/bert_config.json /workspace/ && mv /workspace/python/config/user.conf /workspace/ + +ADD https://raw.githubusercontent.com/vhive-serverless/vSwarm-proto/add-bert/proto/bert/bert_pb2_grpc.py /workspace/python +ADD https://raw.githubusercontent.com/vhive-serverless/vSwarm-proto/add-bert/proto/bert/bert_pb2.py /workspace/python/proto/bert/ + +ENTRYPOINT [ "python3", "python/server.py" ,"--addr=0.0.0.0", "--port=50051"] diff --git a/benchmarks/bert/python/accuracy-squad.py b/benchmarks/bert/python/accuracy-squad.py new file mode 100644 index 00000000..3f3a01c0 --- /dev/null +++ b/benchmarks/bert/python/accuracy-squad.py @@ -0,0 +1,449 @@ +# coding=utf-8 +# Copyright 2021 Arm Limited and affiliates. +# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import collections +import json +import math +import os +import subprocess +import sys + +import numpy as np +import pkg_resources +import six +from transformers import BertTokenizer + +# To support feature cache. +import pickle + +sys.path.insert(0, os.path.dirname(__file__)) + +installed = {pkg.key for pkg in pkg_resources.working_set} +if "tensorflow" in installed: + import tensorflow + sys.path.insert( + 0, os.path.join( + os.path.dirname(__file__), + "DeepLearningExamples", "TensorFlow", "LanguageModeling", "BERT" + ) + ) +elif "torch" in installed: + import torch + sys.path.insert( + 0, os.path.join( + os.path.dirname(__file__), + "DeepLearningExamples", "PyTorch", "LanguageModeling", "BERT" + ) + ) + +import tokenization +from create_squad_data import convert_examples_to_features, read_squad_examples + +max_seq_length = 384 +max_query_length = 64 +doc_stride = 128 + +RawResult = collections.namedtuple( + "RawResult", ["unique_id", "start_logits", "end_logits"]) + +dtype_map = { + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + "float16": np.float16, + "float32": np.float32, + "float64": np.float64 +} + + +def get_final_text(pred_text, orig_text, do_lower_case): + """Project the tokenized prediction back to the original text.""" + + # When we created the data, we kept track of the alignment between original + # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So + # now `orig_text` contains the span of our original text corresponding to the + # span that we predicted. + # + # However, `orig_text` may contain extra characters that we don't want in + # our prediction. + # + # For example, let's say: + # pred_text = steve smith + # orig_text = Steve Smith's + # + # We don't want to return `orig_text` because it contains the extra "'s". + # + # We don't want to return `pred_text` because it's already been normalized + # (the SQuAD eval script also does punctuation stripping/lower casing but + # our tokenizer does additional normalization like stripping accent + # characters). + # + # What we really want to return is "Steve Smith". + # + # Therefore, we have to apply a semi-complicated alignment heruistic between + # `pred_text` and `orig_text` to get a character-to-charcter alignment. This + # can fail in certain cases in which case we just return `orig_text`. + + def _strip_spaces(text): + ns_chars = [] + ns_to_s_map = collections.OrderedDict() + for (i, c) in enumerate(text): + if c == " ": + continue + ns_to_s_map[len(ns_chars)] = i + ns_chars.append(c) + ns_text = "".join(ns_chars) + return (ns_text, ns_to_s_map) + + # We first tokenize `orig_text`, strip whitespace from the result + # and `pred_text`, and check if they are the same length. If they are + # NOT the same length, the heuristic has failed. If they are the same + # length, we assume the characters are one-to-one aligned. + tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) + + tok_text = " ".join(tokenizer.tokenize(orig_text)) + + start_position = tok_text.find(pred_text) + if start_position == -1: + return orig_text + end_position = start_position + len(pred_text) - 1 + + (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) + (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) + + if len(orig_ns_text) != len(tok_ns_text): + return orig_text + + # We then project the characters in `pred_text` back to `orig_text` using + # the character-to-character alignment. + tok_s_to_ns_map = {} + for (i, tok_index) in six.iteritems(tok_ns_to_s_map): + tok_s_to_ns_map[tok_index] = i + + orig_start_position = None + if start_position in tok_s_to_ns_map: + ns_start_position = tok_s_to_ns_map[start_position] + if ns_start_position in orig_ns_to_s_map: + orig_start_position = orig_ns_to_s_map[ns_start_position] + + if orig_start_position is None: + return orig_text + + orig_end_position = None + if end_position in tok_s_to_ns_map: + ns_end_position = tok_s_to_ns_map[end_position] + if ns_end_position in orig_ns_to_s_map: + orig_end_position = orig_ns_to_s_map[ns_end_position] + + if orig_end_position is None: + return orig_text + + output_text = orig_text[orig_start_position:(orig_end_position + 1)] + return output_text + + +def _get_best_indexes(logits, n_best_size): + """Get the n-best logits from a list.""" + index_and_score = sorted( + enumerate(logits), key=lambda x: x[1], reverse=True) + + best_indexes = [] + for i in range(len(index_and_score)): + if i >= n_best_size: + break + best_indexes.append(index_and_score[i][0]) + return best_indexes + + +def _compute_softmax(scores): + """Compute softmax probability over raw logits.""" + if not scores: + return [] + + max_score = None + for score in scores: + if max_score is None or score > max_score: + max_score = score + + exp_scores = [] + total_sum = 0.0 + for score in scores: + x = math.exp(score - max_score) + exp_scores.append(x) + total_sum += x + + probs = [] + for score in exp_scores: + probs.append(score / total_sum) + return probs + + +def write_predictions(all_examples, all_features, all_results, n_best_size, + max_answer_length, do_lower_case, output_prediction_file, max_examples=None): + """Write final predictions to the json file and log-odds of null if needed.""" + print("Writing predictions to: %s" % (output_prediction_file)) + + example_index_to_features = collections.defaultdict(list) + for feature in all_features: + example_index_to_features[feature.example_index].append(feature) + + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", + ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) + + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + scores_diff_json = collections.OrderedDict() + + for (example_index, example) in enumerate(all_examples): + if max_examples and example_index == max_examples: + break + + features = example_index_to_features[example_index] + + prelim_predictions = [] + # keep track of the minimum score of null start+end of position 0 + score_null = 1000000 # large and positive + min_null_feature_index = 0 # the paragraph slice with min mull score + null_start_logit = 0 # the start logit at the slice with min null score + null_end_logit = 0 # the end logit at the slice with min null score + for (feature_index, feature) in enumerate(features): + # FIX: During compliance/audit runs, we only generate a small subset of + # all entries from the dataset. As a result, sometimes dict retrieval + # fails because a key is missing. + # result = unique_id_to_result[feature.unique_id] + result = unique_id_to_result.get(feature.unique_id, None) + if result is None: + continue + start_indexes = _get_best_indexes(result.start_logits, n_best_size) + end_indexes = _get_best_indexes(result.end_logits, n_best_size) + # if we could have irrelevant answers, get the min score of irrelevant + for start_index in start_indexes: + for end_index in end_indexes: + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if start_index >= len(feature.tokens): + continue + if end_index >= len(feature.tokens): + continue + if start_index not in feature.token_to_orig_map: + continue + if end_index not in feature.token_to_orig_map: + continue + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + length = end_index - start_index + 1 + if length > max_answer_length: + continue + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + start_index=start_index, + end_index=end_index, + start_logit=result.start_logits[start_index], + end_logit=result.end_logits[end_index])) + + prelim_predictions = sorted( + prelim_predictions, + key=lambda x: (x.start_logit + x.end_logit), + reverse=True) + + _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "start_logit", "end_logit"]) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= n_best_size: + break + feature = features[pred.feature_index] + tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] + orig_doc_start = feature.token_to_orig_map[pred.start_index] + orig_doc_end = feature.token_to_orig_map[pred.end_index] + orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] + tok_text = " ".join(tok_tokens) + + # De-tokenize WordPieces that have been split off. + tok_text = tok_text.replace(" ##", "") + tok_text = tok_text.replace("##", "") + + # Clean whitespace + tok_text = tok_text.strip() + tok_text = " ".join(tok_text.split()) + orig_text = " ".join(orig_tokens) + + final_text = get_final_text(tok_text, orig_text, do_lower_case) + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + + nbest.append( + _NbestPrediction( + text=final_text, + start_logit=pred.start_logit, + end_logit=pred.end_logit)) + + # In very rare edge cases we could have no valid predictions. So we + # just create a nonce prediction in this case to avoid failure. + if not nbest: + nbest.append( + _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) + + assert len(nbest) >= 1 + + total_scores = [] + best_non_null_entry = None + for entry in nbest: + total_scores.append(entry.start_logit + entry.end_logit) + if not best_non_null_entry: + if entry.text: + best_non_null_entry = entry + + probs = _compute_softmax(total_scores) + + nbest_json = [] + for (i, entry) in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["start_logit"] = entry.start_logit + output["end_logit"] = entry.end_logit + nbest_json.append(output) + + assert len(nbest_json) >= 1 + + all_predictions[example.qas_id] = nbest_json[0]["text"] + + with open(output_prediction_file, "w") as writer: + writer.write(json.dumps(all_predictions, indent=4) + "\n") + + +def load_loadgen_log(log_path, eval_features, dtype=np.float32, output_transposed=False): + with open(log_path) as f: + predictions = json.load(f) + + results = [] + for prediction in predictions: + qsl_idx = prediction["qsl_idx"] + if output_transposed: + logits = np.frombuffer(bytes.fromhex( + prediction["data"]), dtype).reshape(2, -1) + logits = np.transpose(logits) + else: + logits = np.frombuffer(bytes.fromhex( + prediction["data"]), dtype).reshape(-1, 2) + # Pad logits to max_seq_length + seq_length = logits.shape[0] + start_logits = np.ones(max_seq_length) * -10000.0 + end_logits = np.ones(max_seq_length) * -10000.0 + start_logits[:seq_length] = logits[:, 0] + end_logits[:seq_length] = logits[:, 1] + results.append(RawResult( + unique_id=eval_features[qsl_idx].unique_id, + start_logits=start_logits.tolist(), + end_logits=end_logits.tolist() + )) + + return results + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--vocab_file", default="build/data/bert_tf_v1_1_large_fp32_384_v2/vocab.txt", help="Path to vocab.txt") + parser.add_argument( + "--val_data", default="build/data/dev-v1.1.json", help="Path to validation data") + parser.add_argument("--log_file", default="build/logs/mlperf_log_accuracy.json", + help="Path to LoadGen accuracy log") + parser.add_argument("--out_file", default="build/result/predictions.json", + help="Path to output predictions file") + parser.add_argument("--features_cache_file", + default="eval_features.pickle", help="Path to features' cache file") + parser.add_argument("--output_transposed", + action="store_true", help="Transpose the output") + parser.add_argument("--output_dtype", default="float32", + choices=dtype_map.keys(), help="Output data type") + parser.add_argument("--max_examples", type=int, + help="Maximum number of examples to consider (not limited by default)") + args = parser.parse_args() + + output_dtype = dtype_map[args.output_dtype] + + print("Reading examples...") + eval_examples = read_squad_examples(input_file=args.val_data, + is_training=False, version_2_with_negative=False) + + eval_features = [] + # Load features if cached, convert from examples otherwise. + cache_path = args.features_cache_file + if os.path.exists(cache_path): + print("Loading cached features from '%s'..." % cache_path) + with open(cache_path, 'rb') as cache_file: + eval_features = pickle.load(cache_file) + else: + print("No cached features at '%s'... converting from examples..." % cache_path) + + print("Creating tokenizer...") + tokenizer = BertTokenizer(args.vocab_file) + + print("Converting examples to features...") + + def append_feature(feature): + eval_features.append(feature) + + convert_examples_to_features( + examples=eval_examples, + tokenizer=tokenizer, + max_seq_length=max_seq_length, + doc_stride=doc_stride, + max_query_length=max_query_length, + is_training=False, + output_fn=append_feature, + verbose_logging=False) + + print("Caching features at '%s'..." % cache_path) + with open(cache_path, 'wb') as cache_file: + pickle.dump(eval_features, cache_file) + + print("Loading LoadGen logs...") + results = load_loadgen_log( + args.log_file, eval_features, output_dtype, args.output_transposed) + + print("Post-processing predictions...") + write_predictions(eval_examples, eval_features, results, + 20, 30, True, args.out_file, args.max_examples) + + print("Evaluating predictions...") + cmd = "python3 {:}/evaluate_v1.1.py {:} {:} {}".format( + os.path.dirname(os.path.abspath(__file__)), args.val_data, + args.out_file, '--max_examples {}'.format( + args.max_examples) if args.max_examples else '') + subprocess.check_call(cmd, shell=True) + + +if __name__ == "__main__": + main() diff --git a/benchmarks/bert/python/config/bert_config.json b/benchmarks/bert/python/config/bert_config.json new file mode 100644 index 00000000..a7efa973 --- /dev/null +++ b/benchmarks/bert/python/config/bert_config.json @@ -0,0 +1,13 @@ +{ + "attention_probs_dropout_prob": 0.1, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 1024, + "initializer_range": 0.02, + "intermediate_size": 4096, + "max_position_embeddings": 512, + "num_attention_heads": 16, + "num_hidden_layers": 24, + "type_vocab_size": 2, + "vocab_size": 30522 +} diff --git a/benchmarks/bert/python/config/user.conf b/benchmarks/bert/python/config/user.conf new file mode 100644 index 00000000..f14fc590 --- /dev/null +++ b/benchmarks/bert/python/config/user.conf @@ -0,0 +1,6 @@ +# Please set these fields depending on the performance of your system to +# override default LoadGen settings. +*.SingleStream.target_latency = 10 +*.MultiStream.target_latency = 80 +*.Server.target_qps = 1.0 +*.Offline.target_qps = 32.0 \ No newline at end of file diff --git a/benchmarks/bert/python/create_squad_data.py b/benchmarks/bert/python/create_squad_data.py new file mode 100644 index 00000000..a86efa49 --- /dev/null +++ b/benchmarks/bert/python/create_squad_data.py @@ -0,0 +1,413 @@ +# Copyright 2021 Arm Limited and affiliates. +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is identical to DeepLearningExamples/TensorFlow/LanguageModeling/BERT/utils/create_squad_data.py +# except that the dependency on horovod is removed. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import json +import tokenization +import six + +class SquadExample(object): + """A single training/test example for simple sequence classification. + + For examples without an answer, the start and end position are -1. + """ + + def __init__(self, + qas_id, + question_text, + doc_tokens, + orig_answer_text=None, + start_position=None, + end_position=None, + is_impossible=False): + self.qas_id = qas_id + self.question_text = question_text + self.doc_tokens = doc_tokens + self.orig_answer_text = orig_answer_text + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + + def __str__(self): + return self.__repr__() + + def __repr__(self): + s = "" + s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) + s += ", question_text: %s" % ( + tokenization.printable_text(self.question_text)) + s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) + if self.start_position: + s += ", start_position: %d" % (self.start_position) + if self.start_position: + s += ", end_position: %d" % (self.end_position) + if self.start_position: + s += ", is_impossible: %r" % (self.is_impossible) + return s + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, + unique_id, + example_index, + doc_span_index, + tokens, + token_to_orig_map, + token_is_max_context, + input_ids, + input_mask, + segment_ids, + start_position=None, + end_position=None, + is_impossible=None): + self.unique_id = unique_id + self.example_index = example_index + self.doc_span_index = doc_span_index + self.tokens = tokens + self.token_to_orig_map = token_to_orig_map + self.token_is_max_context = token_is_max_context + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + +def read_squad_examples(input_file, is_training, version_2_with_negative=False): + """Read a SQuAD json file into a list of SquadExample.""" + with open(input_file) as reader: + input_data = json.load(reader)["data"] + + def is_whitespace(c): + if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: + return True + return False + + examples = [] + for entry in input_data: + for paragraph in entry["paragraphs"]: + paragraph_text = paragraph["context"] + doc_tokens = [] + char_to_word_offset = [] + prev_is_whitespace = True + for c in paragraph_text: + if is_whitespace(c): + prev_is_whitespace = True + else: + if prev_is_whitespace: + doc_tokens.append(c) + else: + doc_tokens[-1] += c + prev_is_whitespace = False + char_to_word_offset.append(len(doc_tokens) - 1) + + for qa in paragraph["qas"]: + qas_id = qa["id"] + question_text = qa["question"] + start_position = None + end_position = None + orig_answer_text = None + is_impossible = False + if is_training: + + if version_2_with_negative: + is_impossible = qa["is_impossible"] + if (len(qa["answers"]) != 1) and (not is_impossible): + raise ValueError( + "For training, each question should have exactly 1 answer.") + if not is_impossible: + answer = qa["answers"][0] + orig_answer_text = answer["text"] + answer_offset = answer["answer_start"] + answer_length = len(orig_answer_text) + start_position = char_to_word_offset[answer_offset] + end_position = char_to_word_offset[answer_offset + answer_length - + 1] + # Only add answers where the text can be exactly recovered from the + # document. If this CAN'T happen it's likely due to weird Unicode + # stuff so we will just skip the example. + # + # Note that this means for training mode, every example is NOT + # guaranteed to be preserved. + actual_text = " ".join( + doc_tokens[start_position:(end_position + 1)]) + cleaned_answer_text = " ".join( + tokenization.whitespace_tokenize(orig_answer_text)) + if actual_text.find(cleaned_answer_text) == -1: + print("Could not find answer: '%s' vs. '%s'", + actual_text, cleaned_answer_text) + continue + else: + start_position = -1 + end_position = -1 + orig_answer_text = "" + + example = SquadExample( + qas_id=qas_id, + question_text=question_text, + doc_tokens=doc_tokens, + orig_answer_text=orig_answer_text, + start_position=start_position, + end_position=end_position, + is_impossible=is_impossible) + examples.append(example) + + return examples[0:100] + +def _check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + + # Because of the sliding window approach taken to scoring documents, a single + # token can appear in multiple documents. E.g. + # Doc: the man went to the store and bought a gallon of milk + # Span A: the man went to the + # Span B: to the store and bought + # Span C: and bought a gallon of + # ... + # + # Now the word 'bought' will have two scores from spans B and C. We only + # want to consider the score with "maximum context", which we define as + # the *minimum* of its left and right context (the *sum* of left and + # right context will always be the same, of course). + # + # In the example the maximum context for 'bought' would be span C since + # it has 1 left context and 3 right context, while span B has 4 left context + # and 0 right context. + best_score = None + best_span_index = None + for (span_index, doc_span) in enumerate(doc_spans): + end = doc_span.start + doc_span.length - 1 + if position < doc_span.start: + continue + if position > end: + continue + num_left_context = position - doc_span.start + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span.length + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + +def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, + orig_answer_text): + """Returns tokenized answer spans that better match the annotated answer.""" + + # The SQuAD annotations are character based. We first project them to + # whitespace-tokenized words. But then after WordPiece tokenization, we can + # often find a "better match". For example: + # + # Question: What year was John Smith born? + # Context: The leader was John Smith (1895-1943). + # Answer: 1895 + # + # The original whitespace-tokenized answer will be "(1895-1943).". However + # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match + # the exact answer, 1895. + # + # However, this is not always possible. Consider the following: + # + # Question: What country is the top exporter of electornics? + # Context: The Japanese electronics industry is the lagest in the world. + # Answer: Japan + # + # In this case, the annotator chose "Japan" as a character sub-span of + # the word "Japanese". Since our WordPiece tokenizer does not split + # "Japanese", we just use "Japanese" as the annotation. This is fairly rare + # in SQuAD, but does happen. + tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) + + for new_start in range(input_start, input_end + 1): + for new_end in range(input_end, new_start - 1, -1): + text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) + if text_span == tok_answer_text: + return (new_start, new_end) + + return (input_start, input_end) + + +def convert_examples_to_features(examples, tokenizer, max_seq_length, + doc_stride, max_query_length, is_training, + output_fn, verbose_logging=False): + """Loads a data file into a list of `InputBatch`s.""" + + unique_id = 1000000000 + + for (example_index, example) in enumerate(examples): + query_tokens = tokenizer.tokenize(example.question_text) + + if len(query_tokens) > max_query_length: + query_tokens = query_tokens[0:max_query_length] + + tok_to_orig_index = [] + orig_to_tok_index = [] + all_doc_tokens = [] + for (i, token) in enumerate(example.doc_tokens): + orig_to_tok_index.append(len(all_doc_tokens)) + sub_tokens = tokenizer.tokenize(token) + for sub_token in sub_tokens: + tok_to_orig_index.append(i) + all_doc_tokens.append(sub_token) + + tok_start_position = None + tok_end_position = None + if is_training and example.is_impossible: + tok_start_position = -1 + tok_end_position = -1 + if is_training and not example.is_impossible: + tok_start_position = orig_to_tok_index[example.start_position] + if example.end_position < len(example.doc_tokens) - 1: + tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 + else: + tok_end_position = len(all_doc_tokens) - 1 + (tok_start_position, tok_end_position) = _improve_answer_span( + all_doc_tokens, tok_start_position, tok_end_position, tokenizer, + example.orig_answer_text) + + # The -3 accounts for [CLS], [SEP] and [SEP] + max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 + + # We can have documents that are longer than the maximum sequence length. + # To deal with this we do a sliding window approach, where we take chunks + # of the up to our max length with a stride of `doc_stride`. + _DocSpan = collections.namedtuple( # pylint: disable=invalid-name + "DocSpan", ["start", "length"]) + doc_spans = [] + start_offset = 0 + while start_offset < len(all_doc_tokens): + length = len(all_doc_tokens) - start_offset + if length > max_tokens_for_doc: + length = max_tokens_for_doc + doc_spans.append(_DocSpan(start=start_offset, length=length)) + if start_offset + length == len(all_doc_tokens): + break + start_offset += min(length, doc_stride) + + for (doc_span_index, doc_span) in enumerate(doc_spans): + tokens = [] + token_to_orig_map = {} + token_is_max_context = {} + segment_ids = [] + tokens.append("[CLS]") + segment_ids.append(0) + for token in query_tokens: + tokens.append(token) + segment_ids.append(0) + tokens.append("[SEP]") + segment_ids.append(0) + + for i in range(doc_span.length): + split_token_index = doc_span.start + i + token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] + + is_max_context = _check_is_max_context(doc_spans, doc_span_index, + split_token_index) + token_is_max_context[len(tokens)] = is_max_context + tokens.append(all_doc_tokens[split_token_index]) + segment_ids.append(1) + tokens.append("[SEP]") + segment_ids.append(1) + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length. + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + start_position = None + end_position = None + if is_training and not example.is_impossible: + # For training, if our document chunk does not contain an annotation + # we throw it out, since there is nothing to predict. + doc_start = doc_span.start + doc_end = doc_span.start + doc_span.length - 1 + out_of_span = False + if not (tok_start_position >= doc_start and + tok_end_position <= doc_end): + out_of_span = True + if out_of_span: + start_position = 0 + end_position = 0 + else: + doc_offset = len(query_tokens) + 2 + start_position = tok_start_position - doc_start + doc_offset + end_position = tok_end_position - doc_start + doc_offset + + if is_training and example.is_impossible: + start_position = 0 + end_position = 0 + + if verbose_logging and example_index < 20: + print("*** Example ***") + print("unique_id: %s" % (unique_id)) + print("example_index: %s" % (example_index)) + print("doc_span_index: %s" % (doc_span_index)) + print("tokens: %s" % " ".join( + [tokenization.printable_text(x) for x in tokens])) + print("token_to_orig_map: %s" % " ".join( + ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) + print("token_is_max_context: %s" % " ".join([ + "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) + ])) + print("input_ids: %s" % " ".join([str(x) for x in input_ids])) + print( + "input_mask: %s" % " ".join([str(x) for x in input_mask])) + print( + "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + if is_training and example.is_impossible: + print("impossible example") + if is_training and not example.is_impossible: + answer_text = " ".join(tokens[start_position:(end_position + 1)]) + print("start_position: %d" % (start_position)) + print("end_position: %d" % (end_position)) + print( + "answer: %s" % (tokenization.printable_text(answer_text))) + + feature = InputFeatures( + unique_id=unique_id, + example_index=example_index, + doc_span_index=doc_span_index, + tokens=tokens, + token_to_orig_map=token_to_orig_map, + token_is_max_context=token_is_max_context, + input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + start_position=start_position, + end_position=end_position, + is_impossible=example.is_impossible) + + # Run callback + output_fn(feature) + + unique_id += 1 diff --git a/benchmarks/bert/python/pytorch_SUT.py b/benchmarks/bert/python/pytorch_SUT.py new file mode 100644 index 00000000..af20e3f2 --- /dev/null +++ b/benchmarks/bert/python/pytorch_SUT.py @@ -0,0 +1,78 @@ +import array +import json +import os +import sys +sys.path.insert(0, os.path.join(os.getcwd(), "DeepLearningExamples", "PyTorch", "LanguageModeling", "BERT")) +sys.path.insert(0, os.getcwd()) + +import mlperf_loadgen as lg +import numpy as np +import torch +import transformers +from transformers import BertConfig, BertForQuestionAnswering +from squad_QSL import get_squad_QSL + +class BERT_PyTorch_SUT(): + def __init__(self, args): + print("Loading BERT configs...") + with open("bert_config.json") as f: + config_json = json.load(f) + + config = BertConfig( + attention_probs_dropout_prob=config_json["attention_probs_dropout_prob"], + hidden_act=config_json["hidden_act"], + hidden_dropout_prob=config_json["hidden_dropout_prob"], + hidden_size=config_json["hidden_size"], + initializer_range=config_json["initializer_range"], + intermediate_size=config_json["intermediate_size"], + max_position_embeddings=config_json["max_position_embeddings"], + num_attention_heads=config_json["num_attention_heads"], + num_hidden_layers=config_json["num_hidden_layers"], + type_vocab_size=config_json["type_vocab_size"], + vocab_size=config_json["vocab_size"]) + + self.dev = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") + print("Using: %s"%self.dev) + self.version = transformers.__version__ + + print("Loading PyTorch model...") + self.model = BertForQuestionAnswering(config) + self.model.to(self.dev) + self.model.eval() + model_file = os.environ.get("ML_MODEL_FILE_WITH_PATH", "build/data/bert_tf_v1_1_large_fp32_384_v2/model.pytorch") + self.model.load_state_dict(torch.load(model_file), strict=False) + + print("Constructing SUT...") + self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries) + print("Finished constructing SUT.") + + self.qsl = get_squad_QSL(args.max_examples) + + def issue_queries(self, query_samples): + with torch.no_grad(): + print("query_samples number: %d"%len(query_samples)) + for i in range(len(query_samples)): + eval_features = self.qsl.get_features(query_samples[i].index) + model_output = self.model.forward(input_ids=torch.LongTensor(eval_features.input_ids).unsqueeze(0).to(self.dev), + attention_mask=torch.LongTensor(eval_features.input_mask).unsqueeze(0).to(self.dev), + token_type_ids=torch.LongTensor(eval_features.segment_ids).unsqueeze(0).to(self.dev)) + if self.version >= '4.0.0': + start_scores = model_output.start_logits + end_scores = model_output.end_logits + else: + start_scores, end_scores = model_output + output = torch.stack([start_scores, end_scores], axis=-1).squeeze(0).cpu().numpy() + + response_array = array.array("B", output.tobytes()) + bi = response_array.buffer_info() + response = lg.QuerySampleResponse(query_samples[i].id, bi[0], bi[1]) + lg.QuerySamplesComplete([response]) + + def flush_queries(self): + pass + + def __del__(self): + print("Finished destroying SUT.") + +def get_pytorch_sut(args): + return BERT_PyTorch_SUT(args) \ No newline at end of file diff --git a/benchmarks/bert/python/server.py b/benchmarks/bert/python/server.py new file mode 100644 index 00000000..b848af78 --- /dev/null +++ b/benchmarks/bert/python/server.py @@ -0,0 +1,181 @@ +import subprocess +import mlperf_loadgen as lg +import argparse +import os +import sys +import re +# protobuf +from proto.bert import bert_pb2 +import bert_pb2_grpc +import grpc +from concurrent import futures +# from grpc_reflection.v1alpha import reflection + +sys.path.insert(0, os.getcwd()) + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--backend", choices=["tf", "pytorch", "onnxruntime", "tf_estimator"], default="pytorch", help="Backend") + parser.add_argument("--scenario", choices=["SingleStream", "Offline", + "Server", "MultiStream"], default="Offline", help="Scenario") + parser.add_argument("--accuracy", action="store_true", + help="enable accuracy pass") + parser.add_argument("--quantized", action="store_true", + help="use quantized model (only valid for onnxruntime backend)") + parser.add_argument("--profile", action="store_true", + help="enable profiling (only valid for onnxruntime backend)") + parser.add_argument( + "--mlperf_conf", default="build/mlperf.conf", help="mlperf rules config") + parser.add_argument("--user_conf", default="user.conf", + help="user config for user LoadGen settings such as target QPS") + parser.add_argument("--max_examples", type=int, + help="Maximum number of examples to consider (not limited by default)") + parser.add_argument("-a", "--addr", dest="addr", default="0.0.0.0", help="IP address") + parser.add_argument("-p", "--port", dest="port", default="50051", help="serve port") + args = parser.parse_args() + return args + + +scenario_map = { + "SingleStream": lg.TestScenario.SingleStream, + "Offline": lg.TestScenario.Offline, + "Server": lg.TestScenario.Server, + "MultiStream": lg.TestScenario.MultiStream +} + + +def do_bert_inference(): + args = get_args() + + if args.backend == "pytorch": + assert not args.quantized, "Quantized model is only supported by onnxruntime backend!" + assert not args.profile, "Profiling is only supported by onnxruntime backend!" + from pytorch_SUT import get_pytorch_sut + sut = get_pytorch_sut(args) + elif args.backend == "tf": + assert not args.quantized, "Quantized model is only supported by onnxruntime backend!" + assert not args.profile, "Profiling is only supported by onnxruntime backend!" + from tf_SUT import get_tf_sut + sut = get_tf_sut(args) + elif args.backend == "tf_estimator": + assert not args.quantized, "Quantized model is only supported by onnxruntime backend!" + assert not args.profile, "Profiling is only supported by onnxruntime backend!" + from tf_estimator_SUT import get_tf_estimator_sut + sut = get_tf_estimator_sut() + elif args.backend == "onnxruntime": + from onnxruntime_SUT import get_onnxruntime_sut + sut = get_onnxruntime_sut(args) + else: + raise ValueError("Unknown backend: {:}".format(args.backend)) + + settings = lg.TestSettings() + settings.scenario = scenario_map[args.scenario] + settings.FromConfig(args.mlperf_conf, "bert", args.scenario) + settings.FromConfig(args.user_conf, "bert", args.scenario) + + if args.accuracy: + settings.mode = lg.TestMode.AccuracyOnly + else: + settings.mode = lg.TestMode.PerformanceOnly + log_path = os.environ.get("LOG_PATH") + if not log_path: + log_path = "build/logs" + if not os.path.exists(log_path): + os.makedirs(log_path) + log_output_settings = lg.LogOutputSettings() + log_output_settings.outdir = log_path + log_output_settings.copy_summary_to_stdout = True + log_settings = lg.LogSettings() + log_settings.log_output = log_output_settings + log_settings.enable_trace = True + + print("Running LoadGen test...") + lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings, log_settings) + if args.accuracy and not os.environ.get("SKIP_VERIFY_ACCURACY"): + cmd = "python3 {:}/accuracy-squad.py {}".format( + os.path.dirname(os.path.abspath(__file__)), + '--max_examples {}'.format( + args.max_examples) if args.max_examples else '') + subprocess.check_call(cmd, shell=True) + + print("Done!") + + print("Destroying SUT...") + lg.DestroySUT(sut.sut) + + print("Destroying QSL...") + lg.DestroyQSL(sut.qsl.qsl) + + summary_file = "./build/logs/mlperf_log_summary.txt" + while True: + if not os.path.isfile(summary_file): continue + if os.stat(summary_file).st_size == 0: continue + latency_dict = parse_summary_file(summary_file) + if latency_dict == None: continue + + return latency_dict + +def parse_summary_file(summary_file): + keys = ["Min latency (ns)", "Max latency (ns)", "Mean latency (ns)"] + res_dic = {} + with open(summary_file) as f: + try: + text = f.read() + for key in keys: + val = extract_text_between_strings(text,key,"\n") + + res_dic[key] = int(val.split(': ')[-1]) + except Exception as e: + # print(e) + return None + return res_dic + + +def extract_text_between_strings(text, str1, str2): + try: + pattern = re.escape(str1) + r'(.*?)' + re.escape(str2) + match = re.search(pattern, text, re.DOTALL) + if match: + extracted_text = match.group(1) + return extracted_text + else: + raise Exception('Pattern not found in the text for start: %s'%(str1)) + except IOError: + raise Exception('Failed to read the file') + + +class Greeter(bert_pb2_grpc.GreeterServicer): + def SayHello(self, request, context): + token = request.name + print("Start to do bert inference...") + latency_dict = do_bert_inference() + return bert_pb2.HelloReply( + min_latency = latency_dict["Min latency (ns)"], + max_latency = latency_dict["Max latency (ns)"], + mean_latency = latency_dict["Mean latency (ns)"] + ) + + +def serve(): + args = get_args() + server = grpc.server(futures.ThreadPoolExecutor(max_workers=1)) + + bert_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server) + # SERVICE_NAMES = ( + # bert_pb2.DESCRIPTOR.services_by_name['Greeter'].full_name, + # reflection.SERVICE_NAME, + # ) + # reflection.enable_server_reflection(SERVICE_NAMES, server) + + address = (args.addr + ":" + args.port) + server.add_insecure_port(address) + print("Start Bert-python server. Addr: " + address) + server.start() + server.wait_for_termination() + + +if __name__ == "__main__": + serve() + \ No newline at end of file diff --git a/benchmarks/bert/python/squad_QSL.py b/benchmarks/bert/python/squad_QSL.py new file mode 100644 index 00000000..b1652426 --- /dev/null +++ b/benchmarks/bert/python/squad_QSL.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# Copyright 2021 Arm Limited and affiliates. +# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +sys.path.insert(0, os.getcwd()) + +from transformers import BertTokenizer +from create_squad_data import read_squad_examples, convert_examples_to_features + +import mlperf_loadgen as lg + +# To support feature cache. +import pickle + +max_seq_length = 384 +max_query_length = 64 +doc_stride = 128 + +class SQuAD_v1_QSL(): + def __init__(self, total_count_override=None, perf_count_override=None, cache_path='eval_features.pickle'): + print("Constructing QSL...") + eval_features = [] + # Load features if cached, convert from examples otherwise. + if os.path.exists(cache_path): + print("Loading cached features from '%s'..." % cache_path) + with open(cache_path, 'rb') as cache_file: + eval_features = pickle.load(cache_file) + else: + print("No cached features at '%s'... converting from examples..." % cache_path) + + print("Creating tokenizer...") + vocab_file = os.environ.get("VOCAB_FILE") + if not vocab_file: + vocab_file = "build/data/bert_tf_v1_1_large_fp32_384_v2/vocab.txt" + tokenizer = BertTokenizer(vocab_file) + + print("Reading examples...") + dataset_file = os.environ.get("DATASET_FILE") + if not dataset_file: + dataset_file = "build/data/dev-v1.1.json" + eval_examples = read_squad_examples(input_file=dataset_file, + is_training=False, version_2_with_negative=False) + + print("Converting examples to features...") + def append_feature(feature): + eval_features.append(feature) + + convert_examples_to_features( + examples=eval_examples, + tokenizer=tokenizer, + max_seq_length=max_seq_length, + doc_stride=doc_stride, + max_query_length=max_query_length, + is_training=False, + output_fn=append_feature, + verbose_logging=False) + + print("Caching features at '%s'..." % cache_path) + with open(cache_path, 'wb') as cache_file: + pickle.dump(eval_features, cache_file) + + self.eval_features = eval_features + self.count = total_count_override or len(self.eval_features) + print("count: %d"%self.count) + self.perf_count = perf_count_override or self.count + self.qsl = lg.ConstructQSL(self.count, self.perf_count, self.load_query_samples, self.unload_query_samples) + print("Finished constructing QSL.") + + def load_query_samples(self, sample_list): + pass + + def unload_query_samples(self, sample_list): + pass + + def get_features(self, sample_id): + return self.eval_features[sample_id] + + def __del__(self): + print("Finished destroying QSL.") + +def get_squad_QSL(total_count_override=None, perf_count_override=None): + return SQuAD_v1_QSL(total_count_override, perf_count_override) diff --git a/benchmarks/bert/requirements.txt b/benchmarks/bert/requirements.txt new file mode 100644 index 00000000..9c678ec6 --- /dev/null +++ b/benchmarks/bert/requirements.txt @@ -0,0 +1,6 @@ +torch==1.4.0 +onnx==1.6.0 +transformers==2.4.0 +onnxruntime==1.2.0 +numpy==1.18.0 +tokenization==1.0.7 \ No newline at end of file diff --git a/benchmarks/bert/yamls/docker-compose/dc-bert-python.yaml b/benchmarks/bert/yamls/docker-compose/dc-bert-python.yaml new file mode 100644 index 00000000..37aac579 --- /dev/null +++ b/benchmarks/bert/yamls/docker-compose/dc-bert-python.yaml @@ -0,0 +1,33 @@ +version: "3.9" +services: + bert-python: + image: vhiveease/bert-python:latest + container_name: bert-python + working_dir: /workspace + entrypoint: + - python3 + - python/server.py + - --addr=0.0.0.0 + - --port=50051 + ports: + - target: 50051 + # deploy: + # resources: + # reservations: + # devices: + # - driver: nvidia + # count: 2 + # capabilities: [gpu] + + + relay: + image: vhiveease/relay:latest + entrypoint: + - /app/server + - --addr=0.0.0.0:50000 + - --function-endpoint-url=bert-python + - --function-endpoint-port=50051 + - --function-name=bert-python + ports: + - published: 50000 + target: 50000 \ No newline at end of file diff --git a/benchmarks/bert/yamls/knative/kn-bert-python.yaml b/benchmarks/bert/yamls/knative/kn-bert-python.yaml new file mode 100644 index 00000000..c582a208 --- /dev/null +++ b/benchmarks/bert/yamls/knative/kn-bert-python.yaml @@ -0,0 +1,44 @@ +# MIT License +# +# Copyright (c) 2021 David Schall and EASE lab +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: bert-python + namespace: default +spec: + template: + spec: + containers: + - image: docker.io/vhiveease/relay:latest + ports: + - name: h2c + containerPort: 50000 + args: + - --addr=0.0.0.0:50000 + - --function-endpoint-url=0.0.0.0 + - --function-endpoint-port=50051 + - --function-name=bert-python + - image: docker.io/vhiveease/bert-python:latest + args: + - --addr=0.0.0.0 + - --port=50051 \ No newline at end of file