From c89ca263bbe5979fa653e287da7c1d2ea556b4a4 Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 14 Jun 2024 21:44:55 +0200 Subject: [PATCH] all matrix multiplication backend --- ggml-blas.cpp | 270 +++++++++++++++++++++++++++++++++++++++++++++++++- ggml.c | 19 ++-- ggml.h | 2 + 3 files changed, 283 insertions(+), 8 deletions(-) diff --git a/ggml-blas.cpp b/ggml-blas.cpp index d709a357bbf290..b4e39f0508da33 100644 --- a/ggml-blas.cpp +++ b/ggml-blas.cpp @@ -1,6 +1,8 @@ #include "ggml-blas.h" #include "ggml-backend-impl.h" +#include +#include #include #include @@ -22,6 +24,7 @@ struct ggml_backend_blas_context { #ifndef GGML_USE_OPENMP std::vector> tasks; #endif + std::atomic current_chunk; }; // helper function to determine if it is better to use BLAS or not @@ -48,6 +51,265 @@ static bool ggml_backend_blas_use_blas(const struct ggml_tensor * dst) { return false; } +static void ggml_compute_forward_mul_mat_one_chunk( + ggml_backend_blas_context * ctx, + struct ggml_tensor * dst, + const int64_t num_rows_per_vec_dot, + const int64_t ir0_start, + const int64_t ir0_end, + const int64_t ir1_start, + const int64_t ir1_end) { + + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const enum ggml_type type = src0->type; + + const bool src1_cont = ggml_is_contiguous(src1); + + const ggml_type_traits_t * type_traits = ggml_internal_get_type_traits_ptr(type); + + ggml_vec_dot_t const vec_dot = type_traits->vec_dot; + enum ggml_type const vec_dot_type = type_traits->vec_dot_type; + + // broadcast factors + const int64_t r2 = ne12 / ne02; + const int64_t r3 = ne13 / ne03; + + //printf("ir0_start = %6lld, ir0_end = %6lld, ir1_start = %6lld, ir1_end = %6lld\n", ir0_start, ir0_end, ir1_start, ir1_end); + + // threads with no work simply yield (not sure if it helps) + if (ir0_start >= ir0_end || ir1_start >= ir1_end) { + return; + } + + const void * wdata = (src1->type == vec_dot_type) ? src1->data : ctx->work_data.get(); + const size_t row_size = ggml_row_size(vec_dot_type, ne10); + + assert(ne12 % ne02 == 0); + assert(ne13 % ne03 == 0); + + // block-tiling attempt + const int64_t blck_0 = 16; + const int64_t blck_1 = 16; + + const size_t src1_col_stride = src1_cont || src1->type != vec_dot_type ? row_size : nb11; + + // attempt to reduce false-sharing (does not seem to make a difference) + // 16 * 2, accounting for mmla kernels + float tmp[32]; + + for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) { + for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) { + for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ir1 += num_rows_per_vec_dot) { + const int64_t i13 = (ir1 / (ne12 * ne1)); + const int64_t i12 = (ir1 - i13 * ne12 * ne1) / ne1; + const int64_t i11 = (ir1 - i13 * ne12 * ne1 - i12 * ne1); + + // broadcast src0 into src1 + const int64_t i03 = i13 / r3; + const int64_t i02 = i12 / r2; + + const int64_t i1 = i11; + const int64_t i2 = i12; + const int64_t i3 = i13; + + const char * src0_row = (const char*)src0->data + (0 + i02 * nb02 + i03 * nb03); + + // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides + // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using + // the original src1 data pointer, so we should index using the indices directly + // TODO: this is a bit of a hack, we should probably have a better way to handle this + const char * src1_col = (const char*)wdata + + (src1_cont || src1->type != vec_dot_type + ? (i11 + i12 * ne11 + i13 * ne12 * ne11) * row_size + : (i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * dst_col = (float*)((char*)dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ++ir0) { + // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col); + //} + + for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ir0 += num_rows_per_vec_dot) { + vec_dot(ne00, &tmp[ir0 - iir0], (num_rows_per_vec_dot > 1 ? 16 : 0), src0_row + ir0 * nb01, (num_rows_per_vec_dot > 1 ? nb01 : 0), src1_col, (num_rows_per_vec_dot > 1 ? src1_col_stride : 0), num_rows_per_vec_dot); + } + + for (int cn = 0; cn < num_rows_per_vec_dot; ++cn) { + memcpy(&dst_col[iir0 + cn * nb1 / nb0], tmp + (cn * 16), (std::min(iir0 + blck_0, ir0_end) - iir0) * sizeof(float)); + } + } + } + } +} + +static void ggml_compute_forward_mul_mat( + ggml_backend_blas_context * ctx, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + + GGML_TENSOR_BINARY_OP_LOCALS + + const enum ggml_type type = src0->type; + + const ggml_type_traits_t * type_traits = ggml_internal_get_type_traits_ptr(type); + const ggml_type_traits_t * type_traits_vec_dot = ggml_internal_get_type_traits_ptr(type_traits->vec_dot_type); + enum ggml_type const vec_dot_type = type_traits->vec_dot_type; + ggml_from_float_t const from_float_to_vec_dot = type_traits_vec_dot->from_float; + int64_t const vec_dot_num_rows = type_traits->nrows; + + GGML_ASSERT(ne0 == ne01); + GGML_ASSERT(ne1 == ne11); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == ggml_type_size(type)); + GGML_ASSERT(nb10 == ggml_type_size(src1->type)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + // broadcast factors + const int64_t r2 = ne12 / ne02; + const int64_t r3 = ne13 / ne03; + GGML_UNUSED(r2); + GGML_UNUSED(r3); + + // nb01 >= nb00 - src0 is not transposed + // compute by src0 rows + + if (src1->type != vec_dot_type) { + const size_t row_size = ggml_row_size(vec_dot_type, ne10); + if (ctx->work_size < ne13*ne12*ne11*row_size) { + ctx->work_data.reset(new char[ne13*ne12*ne11*row_size]); + ctx->work_size = ne13*ne12*ne11*row_size; + } + char * wdata = ctx->work_data.get(); + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + int block_size = ggml_blck_size(vec_dot_type); + int type_size = ggml_type_size(vec_dot_type); + + for (int64_t i13 = 0; i13 < ne13; ++i13) { + for (int64_t i12 = 0; i12 < ne12; ++i12) { + for (int64_t i11 = 0; i11 < ne11; ++i11) { + //from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10); + #pragma omp parallel num_threads(ctx->n_threads) + { + int nth = omp_get_num_threads(); + int ith = omp_get_thread_num(); + int blocks_per_thread = (ne10 + block_size - 1) / block_size / nth; + int i10_start = ith * blocks_per_thread * block_size; + int i10_end = std::min(i10_start + blocks_per_thread * block_size, (int)ne10); + //printf("thread %d/%d: i10_start = %d, i10_end = %d\n", ith, nth, i10_start, i10_end); + from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10_start*nb10), + (void *) ((char *) wdata + (type_size*i10_start/block_size)), + i10_end - i10_start); + + } + + wdata += row_size; + } + } + } + } + + // This is the size of the first dimension of the result, so we can iterate that way. (see the ASSERT above, these are the same numbers) + const int64_t nr0 = ne0; + + // This is the size of the rest of the dimensions of the result + const int64_t nr1 = ne1 * ne2 * ne3; + + // dot kernels can handle 1 row and col at a time, but mmla kernels can process 2 rows and cols + int64_t num_rows_per_vec_dot = vec_dot_num_rows; + // TODO: currently the mmla kernels support only even numbered rows/cols. + // this check can be removed once they are extended to support odd numbered rows/cols too + if ((nr0 % 2 != 0) || (ne11 % 2 != 0)) { + num_rows_per_vec_dot = 1; + } + + // Now select a reasonable chunk size. + int chunk_size = 16; + + // We need to step up the size if it's small + if (nr0 == 1 || nr1 == 1) { + chunk_size = 64; + } + + // distribute the work across the inner or outer loop based on which one is larger + // The number of chunks in the 0/1 dim. + // CEIL(nr0/chunk_size) + int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size; + int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size; + + // If the chunking is poor for the number of threads on this setup, scrap the whole plan. Re-chunk it by thread. + // Also, chunking by thread was measured to have perform better on NUMA systems. See https://github.com/ggerganov/llama.cpp/pull/6915 + // In theory, chunking should be just as useful on NUMA and non NUMA systems, but testing disagreed with that. + + //const int ith = 0; // params->ith; + const int nth = ctx->n_threads; + + // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. + ctx->current_chunk.store(nth); + + if (nchunk0 * nchunk1 < nth * 4 || ggml_is_numa()) { + // distribute the thread work across the inner or outer loop based on which one is larger + nchunk0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows + nchunk1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows + } + + // The number of elements in each chunk + const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; + const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; + + //if (ith == 0) + // printf("MUL_MAT = [%d, %d, %d, %d] x [%d, %d, %d, %d] = %d x %d = %d. Fp Ops/Ch %d\n", ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nchunk0, nchunk1, nchunk0 * nchunk1, ne00 * nr0 * nr1 / nchunk0 / nchunk1); + + // The first chunk comes from our thread_id, the rest will get auto-assigned. + if (nth > 1) { + #pragma omp parallel num_threads(nth) + { + int current_chunk = omp_get_thread_num(); + + while (current_chunk < nchunk0 * nchunk1) { + const int64_t ith0 = current_chunk % nchunk0; + const int64_t ith1 = current_chunk / nchunk0; + + const int64_t ir0_start = dr0 * ith0; + const int64_t ir0_end = std::min(ir0_start + dr0, nr0); + + const int64_t ir1_start = dr1 * ith1; + const int64_t ir1_end = std::min(ir1_start + dr1, nr1); + + ggml_compute_forward_mul_mat_one_chunk(ctx, dst, num_rows_per_vec_dot, ir0_start, ir0_end, ir1_start, ir1_end); + + if (nth >= nchunk0 * nchunk1) { + break; + } + + current_chunk = ctx->current_chunk.fetch_add(1); + } + } + } else { + ggml_compute_forward_mul_mat_one_chunk(ctx, dst, num_rows_per_vec_dot, 0, nr0, 0, nr1); + } + +#ifdef GGML_PERF + // These numbers are useful when trying to measure how well the threading scheduling works. + //int64_t workSize = (ne01 * ne11 * ne12 * ne13 * ne00) / nchunk0 / nchunk1; + //float time = (ggml_perf_time_us() - t0); + //printf("MUL_MAT = %f ms, [%d, %d, %d, %d] x [%d, %d, %d, %d] = %I64u, %f ops/usec in %d chunks.\n", time / 1000.0, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, workSize, (float)workSize/time, chunks_executed); +#endif +} + static void ggml_backend_blas_mul_mat(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) { const struct ggml_tensor * src0 = dst->src[0]; const struct ggml_tensor * src1 = dst->src[1]; @@ -255,7 +517,8 @@ GGML_CALL static enum ggml_status ggml_backend_blas_graph_compute(ggml_backend_t switch (node->op) { case GGML_OP_MUL_MAT: - ggml_backend_blas_mul_mat(ctx, node); + //ggml_backend_blas_mul_mat(ctx, node); + ggml_compute_forward_mul_mat(ctx, node); break; case GGML_OP_OUT_PROD: @@ -281,6 +544,10 @@ GGML_CALL static enum ggml_status ggml_backend_blas_graph_compute(ggml_backend_t } GGML_CALL static bool ggml_backend_blas_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) { + + return op->op == GGML_OP_MUL_MAT || op->op == GGML_OP_OUT_PROD; + + /* const struct ggml_tensor * src0 = op->src[0]; const struct ggml_tensor * src1 = op->src[1]; @@ -291,6 +558,7 @@ GGML_CALL static bool ggml_backend_blas_supports_op(ggml_backend_t backend, cons ggml_is_matrix(src1) && ggml_is_contiguous(src0) && (ggml_is_contiguous(src1) || ggml_is_transposed(src1))); + */ GGML_UNUSED(backend); } diff --git a/ggml.c b/ggml.c index d5d33c2ba10299..841f8a354ba2b5 100644 --- a/ggml.c +++ b/ggml.c @@ -911,6 +911,11 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) { return type_traits[type]; } +const ggml_type_traits_t * ggml_internal_get_type_traits_ptr(enum ggml_type type) { + GGML_ASSERT(type < GGML_TYPE_COUNT); + return &type_traits[type]; +} + // // simd mappings // @@ -19331,13 +19336,13 @@ static enum ggml_status ggml_graph_compute_parallel(struct ggml_compute_state * if (n_threads > 1) { #pragma omp parallel num_threads(n_threads) { - #pragma omp single - { - // update the number of threads from the actual number of threads that we got from OpenMP - n_threads = omp_get_num_threads(); - workers[0].shared->n_threads = n_threads; - workers[0].shared->n_active = n_threads; - } + //#pragma omp single + //{ + // // update the number of threads from the actual number of threads that we got from OpenMP + // n_threads = omp_get_num_threads(); + // workers[0].shared->n_threads = n_threads; + // workers[0].shared->n_active = n_threads; + //} ggml_graph_compute_thread(&workers[omp_get_thread_num()]); } } else { diff --git a/ggml.h b/ggml.h index 13502a3622fc4f..2a90747f983a42 100644 --- a/ggml.h +++ b/ggml.h @@ -2447,6 +2447,8 @@ extern "C" { GGML_API ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type); + GGML_API const ggml_type_traits_t * ggml_internal_get_type_traits_ptr(enum ggml_type type); + #ifdef __cplusplus } #endif