diff --git a/velox/dwio/common/QplJobPool.cpp b/velox/dwio/common/QplJobPool.cpp index 6a62ef6376cff..bb5c7d1088572 100644 --- a/velox/dwio/common/QplJobPool.cpp +++ b/velox/dwio/common/QplJobPool.cpp @@ -16,17 +16,12 @@ #include "velox/dwio/common/QplJobPool.h" #include -#include #include "velox/common/base/Exceptions.h" namespace facebook::velox::dwio::common { -// std::array -// QplJobHWPool::hwJobPtrPool; std::array, QplJobHWPool::MAX_JOB_NUMBER> QplJobHWPool::hwJobPtrLocks; -// bool QplJobHWPool::iaa_job_ready = false; -// std::unique_ptr QplJobHWPool::hwJobsBuffer; QplJobHWPool& QplJobHWPool::getInstance() { static QplJobHWPool pool; @@ -49,6 +44,9 @@ QplJobHWPool::~QplJobHWPool() { iaaJobReady = false; } +/** + * Allocate qpl job and put it into hwJobPtrPool + */ void QplJobHWPool::allocateQPLJob() { uint32_t job_size = 0; @@ -60,6 +58,7 @@ void QplJobHWPool::allocateQPLJob() { // Initialize pool for storing all job object pointers // Allocate buffer by shifting address offset for each job object. + hwJobPtrPool.resize(MAX_JOB_NUMBER); for (uint32_t i = 0; i < MAX_JOB_NUMBER; ++i) { qpl_job* qplJobPtr = reinterpret_cast(hwJobsBuffer.get() + i * job_size); @@ -84,7 +83,7 @@ void QplJobHWPool::allocateQPLJob() { * QplJobHWPool maintains MAX_JOB_NUMBER job slot to avoid frequently allocate, * initialize and release job. Random slots is used to select a job and * tryLockJob will check if the job is free. - * @return job_id and qpl_job pointer + * @return job_id and qpl_job pair */ std::pair QplJobHWPool::acquireDeflateJob() { std::pair res; diff --git a/velox/dwio/common/QplJobPool.h b/velox/dwio/common/QplJobPool.h index ae3fe51b45f16..f80f3a02a7043 100644 --- a/velox/dwio/common/QplJobPool.h +++ b/velox/dwio/common/QplJobPool.h @@ -39,7 +39,7 @@ namespace facebook::velox::dwio::common { // together, so that each analytics operation can perform decompress-only, // filter-only, or decompress-and-filter processing. // -// Intel QPLis library to provide application programming interface (API) for +// Intel QPL is library to provide application programming interface (API) for // interaction with Intel® In-Memory Analytics Accelerator (Intel® IAA) hardware // // Intel® IAA: @@ -61,6 +61,12 @@ class QplJobHWPool { } std::pair acquireDeflateJob(); + + /** + * Get qpl job by job id + * @param job_id the job id or index in the qpl job pool + * @return nullptr if the job id is invalid + */ qpl_job* getJobById(int job_id) { if (job_id >= MAX_JOB_NUMBER || job_id <= 0) { return nullptr; @@ -81,7 +87,7 @@ class QplJobHWPool { std::unique_ptr hwJobsBuffer; // Job pool for storing all job object pointers - std::array hwJobPtrPool; + std::vector hwJobPtrPool; // Locks for accessing each job object pointers bool iaaJobReady; diff --git a/velox/dwio/common/compression/AsyncCompression.h b/velox/dwio/common/compression/AsyncCompression.h new file mode 100644 index 0000000000000..b6c01bdb8ab75 --- /dev/null +++ b/velox/dwio/common/compression/AsyncCompression.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ASYNC_COMPRESSION_H_ +#define ASYNC_COMPRESSION_H_ + +#include +#include "velox/common/compression/Compression.h" + +namespace facebook::velox::dwio::common::compression { + +using facebook::velox::common::CompressionKind; + +class AsyncDecompressor { + public: + explicit AsyncDecompressor(){}; + + virtual ~AsyncDecompressor() = default; + + virtual folly::SemiFuture decompressAsync( + const char* src, + uint64_t srcLength, + char* dest, + uint64_t destLength) = 0; +}; + +std::unique_ptr MakeIAAGzipCodec(); + +/** + * Get the window size from zlib header(rfc1950). + * 0 1 + * +---+---+ + * |CMF|FLG| (more-->) + * +---+---+ + * bits 0 to 3 CM Compression method + * bits 4 to 7 CINFO Compression info + * CM (Compression method) This identifies the compression method used in the + * file. CM = 8 denotes the "deflate" compression method with a window size up + * to 32K. CINFO (Compression info) For CM = 8, CINFO is the base-2 logarithm of + * the LZ77 window size, minus eight (CINFO=7 indicates a 32K window size). + * @param stream_ptr the compressed block length for raw decompression + * @param stream_size compression options to use + */ +static int getZlibWindowBits(const uint8_t* stream_ptr, uint32_t stream_size) { + static constexpr uint8_t CM_ZLIB_DEFAULT_VALUE = 8u; + static constexpr uint32_t ZLIB_MIN_HEADER_SIZE = 2u; + static constexpr uint32_t ZLIB_INFO_OFFSET = 4u; + if (stream_size < ZLIB_MIN_HEADER_SIZE) { + return -1; + } + const uint8_t compression_method_and_flag = *stream_ptr++; + const uint8_t compression_method = compression_method_and_flag & 0xf; + const uint8_t compression_info = + compression_method_and_flag >> ZLIB_INFO_OFFSET; + + if (CM_ZLIB_DEFAULT_VALUE != compression_method) { + return -1; + } + if (compression_info > 7) { + return -1; + } + return CM_ZLIB_DEFAULT_VALUE + compression_info; +} + +/** + * Create a decompressor for the given compression kind in asynchronous mode. + * @param kind the compression type to implement + */ +static std::unique_ptr +createAsyncDecompressor(facebook::velox::common::CompressionKind kind) { + switch (static_cast(kind)) { +#ifdef VELOX_ENABLE_INTEL_IAA + case CompressionKind::CompressionKind_GZIP: + return MakeIAAGzipCodec(); +#endif + default: + LOG(WARNING) << "Asynchronous mode not support for compression codec " + << kind; + return nullptr; + } + return nullptr; +} +} // namespace facebook::velox::dwio::common::compression + +#endif \ No newline at end of file diff --git a/velox/dwio/common/compression/CMakeLists.txt b/velox/dwio/common/compression/CMakeLists.txt index 20bceedbc576d..c5e2fb9ef5237 100644 --- a/velox/dwio/common/compression/CMakeLists.txt +++ b/velox/dwio/common/compression/CMakeLists.txt @@ -17,3 +17,11 @@ add_library(velox_dwio_common_compression Compression.cpp PagedInputStream.cpp target_link_libraries(velox_dwio_common_compression velox_dwio_common xsimd gtest Folly::folly) + +if(VELOX_ENABLE_INTEL_IAA) + add_library(velox_dwio_common_iaa_compression IAACompression.cpp) + target_link_libraries(velox_dwio_common_iaa_compression velox_dwio_qpl + Folly::folly xsimd) + target_link_libraries(velox_dwio_common_compression + velox_dwio_common_iaa_compression) +endif() diff --git a/velox/dwio/common/compression/Compression.cpp b/velox/dwio/common/compression/Compression.cpp index e36d58e7756fc..dfe1e3f315646 100644 --- a/velox/dwio/common/compression/Compression.cpp +++ b/velox/dwio/common/compression/Compression.cpp @@ -445,113 +445,6 @@ std::pair ZstdDecompressor::getDecompressedLength( return {uncompressedLength, true}; } -class GzipIAADecompressor : public AsyncDecompressor { - public: - explicit GzipIAADecompressor() {} - - explicit GzipIAADecompressor( - uint64_t blockSize, - const std::string& streamDebugInfo) - : AsyncDecompressor{blockSize, streamDebugInfo} {} - - int decompress( - const char* src, - uint64_t srcLength, - char* dest, - uint64_t destLength) override; - - bool waitResult(int job_id) override; - - void releaseJob(int job_id) override; -}; - -int GzipIAADecompressor::decompress( - const char* src, - uint64_t srcLength, - char* dest, - uint64_t destLength) { -#ifdef VELOX_ENABLE_INTEL_IAA - dwio::common::QplJobHWPool& qpl_job_pool = - dwio::common::QplJobHWPool::getInstance(); - // int job_id = 0; - auto deflate_job = qpl_job_pool.acquireDeflateJob(); - // qpl_job* job = qpl_job_pool.AcquireDeflateJob(job_id); - auto job = deflate_job.second; - if (job == nullptr) { - LOG(WARNING) << "cannot AcquireDeflateJob "; - return -1; // Invalid job id to illustrate the - // failed decompress job. - } - job->op = qpl_op_decompress; - job->next_in_ptr = reinterpret_cast(const_cast(src)); - job->next_out_ptr = reinterpret_cast(dest); - job->available_in = static_cast(srcLength); - job->available_out = static_cast(destLength); - job->flags = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_ZLIB_MODE; - - qpl_status status = qpl_submit_job(job); - if (status == QPL_STS_QUEUES_ARE_BUSY_ERR) { - qpl_job_pool.releaseJob(deflate_job.first); - deflate_job = qpl_job_pool.acquireDeflateJob(); - job = deflate_job.second; - if (job == nullptr) { - LOG(WARNING) - << "cannot acqure deflate job after QPL_STS_QUEUES_ARE_BUSY_ERR "; - return -1; // Invalid job id to illustrate the - // failed decompress job. - } - job->op = qpl_op_decompress; - job->next_in_ptr = reinterpret_cast(const_cast(src)); - job->next_out_ptr = reinterpret_cast(dest); - job->available_in = static_cast(srcLength); - job->available_out = static_cast(destLength); - job->flags = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_ZLIB_MODE; - - status = qpl_submit_job(job); - } - if (status != QPL_STS_OK) { - qpl_job_pool.releaseJob(deflate_job.first); - LOG(WARNING) << "cannot submit job, error status: " << status; - return -1; // Invalid job id to illustrate the - // failed decompress job. - } else { - return deflate_job.first; - } -#else - return -1; -#endif -} - -bool GzipIAADecompressor::waitResult(int job_id) { -#ifdef VELOX_ENABLE_INTEL_IAA - dwio::common::QplJobHWPool& qpl_job_pool = - dwio::common::QplJobHWPool::getInstance(); - if (job_id <= 0 || job_id >= qpl_job_pool.MAX_JOB_NUMBER) { - return true; - } - qpl_job* job = qpl_job_pool.getJobById(job_id); - - auto status = qpl_wait_job(job); - qpl_job_pool.releaseJob(job_id); - if (status == QPL_STS_OK) { - return true; - } - LOG(WARNING) << "Decompress w/IAA error, status: " << status; -#endif - return false; -} - -void GzipIAADecompressor::releaseJob(int job_id) { -#ifdef VELOX_ENABLE_INTEL_IAA - dwio::common::QplJobHWPool& qpl_job_pool = - dwio::common::QplJobHWPool::getInstance(); - if (job_id <= 0 || job_id >= qpl_job_pool.MAX_JOB_NUMBER) { - return; - } - return qpl_job_pool.releaseJob(job_id); -#endif -} - class SnappyDecompressor : public Decompressor { public: explicit SnappyDecompressor( @@ -833,21 +726,4 @@ std::unique_ptr createDecompressor( compressedLength); } -std::unique_ptr -createAsyncDecompressor( - facebook::velox::common::CompressionKind kind, - uint64_t bufferSize, - const std::string& streamDebugInfo) { - std::unique_ptr decompressor; - switch (static_cast(kind)) { - case CompressionKind::CompressionKind_GZIP: - return std::make_unique(bufferSize, streamDebugInfo); - default: - LOG(WARNING) << "Asynchronous mode not support for compression codec " - << kind; - return nullptr; - } - return nullptr; -} - } // namespace facebook::velox::dwio::common::compression diff --git a/velox/dwio/common/compression/Compression.h b/velox/dwio/common/compression/Compression.h index f24c2c4867820..3d26b3af98a42 100644 --- a/velox/dwio/common/compression/Compression.h +++ b/velox/dwio/common/compression/Compression.h @@ -19,9 +19,6 @@ #include "velox/common/compression/Compression.h" #include "velox/dwio/common/SeekableInputStream.h" #include "velox/dwio/common/encryption/Encryption.h" -#ifdef VELOX_ENABLE_INTEL_IAA -#include "velox/dwio/common/QplJobPool.h" -#endif namespace facebook::velox::dwio::common::compression { @@ -33,7 +30,6 @@ class Compressor { // https://zlib.net/manual.html static constexpr int DWRF_ORC_ZLIB_WINDOW_BITS = -15; static constexpr int PARQUET_ZLIB_WINDOW_BITS = 15; - static constexpr int PARQUET_ZLIB_WINDOW_BITS_4KB = 12; explicit Compressor(int32_t level) : level_{level} {} @@ -69,37 +65,6 @@ class Decompressor { const std::string streamDebugInfo_; }; -class AsyncDecompressor { - public: - explicit AsyncDecompressor(){}; - explicit AsyncDecompressor( - uint64_t blockSize, - const std::string& streamDebugInfo) - : blockSize_{blockSize}, streamDebugInfo_{streamDebugInfo} {} - - virtual ~AsyncDecompressor() = default; - - virtual uint64_t getUncompressedLength( - const char* /* unused */, - uint64_t /* unused */) const { - return blockSize_; - } - - virtual int decompress( - const char* src, - uint64_t srcLength, - char* dest, - uint64_t destLength) = 0; - - virtual bool waitResult(int job_id) = 0; - - virtual void releaseJob(int job_id) = 0; - - protected: - uint64_t blockSize_; - const std::string streamDebugInfo_; -}; - struct CompressionOptions { /// Format specific compression/decompression options union Format { @@ -124,42 +89,6 @@ struct CompressionOptions { uint32_t compressionThreshold; }; -/** - * Get the window size from zlib header(rfc1950). - * 0 1 - * +---+---+ - * |CMF|FLG| (more-->) - * +---+---+ - * bits 0 to 3 CM Compression method - * bits 4 to 7 CINFO Compression info - * CM (Compression method) This identifies the compression method used in the - * file. CM = 8 denotes the "deflate" compression method with a window size up - * to 32K. CINFO (Compression info) For CM = 8, CINFO is the base-2 logarithm of - * the LZ77 window size, minus eight (CINFO=7 indicates a 32K window size). - * @param stream_ptr the compressed block length for raw decompression - * @param stream_size compression options to use - */ -static int getZlibWindowBits(const uint8_t* stream_ptr, uint32_t stream_size) { - static constexpr uint8_t CM_ZLIB_DEFAULT_VALUE = 8u; - static constexpr uint32_t ZLIB_MIN_HEADER_SIZE = 2u; - static constexpr uint32_t ZLIB_INFO_OFFSET = 4u; - if (stream_size < ZLIB_MIN_HEADER_SIZE) { - return -1; - } - const uint8_t compression_method_and_flag = *stream_ptr++; - const uint8_t compression_method = compression_method_and_flag & 0xf; - const uint8_t compression_info = - compression_method_and_flag >> ZLIB_INFO_OFFSET; - - if (CM_ZLIB_DEFAULT_VALUE != compression_method) { - return -1; - } - if (compression_info > 7) { - return -1; - } - return CM_ZLIB_DEFAULT_VALUE + compression_info; -} - /** * Create a decompressor for the given compression kind. * @param kind The compression type to implement @@ -190,19 +119,4 @@ std::unique_ptr createCompressor( facebook::velox::common::CompressionKind kind, const CompressionOptions& options); -/** - * Create a decompressor for the given compression kind in asynchronous mode. - * @param kind the compression type to implement - * @param input the input stream that is the underlying source - * @param bufferSize the maximum size of the buffer - * @param pool the memory pool - * @param useRawDecompression specify whether to perform raw decompression - * @param compressedLength the compressed block length for raw decompression - * @param options compression options to use - */ -std::unique_ptr -createAsyncDecompressor( - facebook::velox::common::CompressionKind kind, - uint64_t bufferSize, - const std::string& streamDebugInfo); } // namespace facebook::velox::dwio::common::compression diff --git a/velox/dwio/common/compression/IAACompression.cpp b/velox/dwio/common/compression/IAACompression.cpp new file mode 100644 index 0000000000000..139ec94941113 --- /dev/null +++ b/velox/dwio/common/compression/IAACompression.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "velox/common/base/Exceptions.h" +#include "velox/dwio/common/QplJobPool.h" +#include "velox/dwio/common/compression/AsyncCompression.h" + +namespace facebook::velox::dwio::common::compression { + +class GzipIAADecompressor : public AsyncDecompressor { + public: + explicit GzipIAADecompressor() {} + + folly::SemiFuture decompressAsync( + const char* src, + uint64_t srcLength, + char* dest, + uint64_t destLength) override; + + int waitResult(int job_id); +}; + +folly::SemiFuture GzipIAADecompressor::decompressAsync( + const char* src, + uint64_t srcLength, + char* dest, + uint64_t destLength) { + dwio::common::QplJobHWPool& qpl_job_pool = + dwio::common::QplJobHWPool::getInstance(); + auto deflate_job = qpl_job_pool.acquireDeflateJob(); + auto job = deflate_job.second; + if (job == nullptr) { + LOG(WARNING) << "cannot AcquireDeflateJob "; + return folly::makeSemiFutureWith([]() -> uint64_t { + throw std::runtime_error("Cannot acquire deflate job from pool"); + return 0; + }); + } + job->op = qpl_op_decompress; + job->next_in_ptr = reinterpret_cast(const_cast(src)); + job->next_out_ptr = reinterpret_cast(dest); + job->available_in = static_cast(srcLength); + job->available_out = static_cast(destLength); + job->flags = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_ZLIB_MODE; + + qpl_status status = qpl_submit_job(job); + if (status == QPL_STS_QUEUES_ARE_BUSY_ERR) { + qpl_job_pool.releaseJob(deflate_job.first); + deflate_job = qpl_job_pool.acquireDeflateJob(); + job = deflate_job.second; + if (job == nullptr) { + LOG(WARNING) + << "cannot acqure deflate job after QPL_STS_QUEUES_ARE_BUSY_ERR "; + return folly::makeSemiFutureWith([]() -> uint64_t { + throw std::runtime_error( + "Cannot acqure deflate job from pool after QPL_STS_QUEUES_ARE_BUSY_ERR"); + return 0; + }); + } + job->op = qpl_op_decompress; + job->next_in_ptr = reinterpret_cast(const_cast(src)); + job->next_out_ptr = reinterpret_cast(dest); + job->available_in = static_cast(srcLength); + job->available_out = static_cast(destLength); + job->flags = QPL_FLAG_FIRST | QPL_FLAG_LAST | QPL_FLAG_ZLIB_MODE; + + status = qpl_submit_job(job); + } + if (status != QPL_STS_OK) { + qpl_job_pool.releaseJob(deflate_job.first); + LOG(WARNING) << "cannot submit job, error status: " << status; + return folly::makeSemiFutureWith([this, status]() -> uint64_t { + throw std::runtime_error("Cannot submit job, error status: " + status); + return 0; + }); + } else { + return folly::makeSemiFuture().deferValue( + [this, deflate_job](auto&&) -> uint64_t { + return this->waitResult(deflate_job.first); + }); + } +} + +int GzipIAADecompressor::waitResult(int job_id) { + dwio::common::QplJobHWPool& qpl_job_pool = + dwio::common::QplJobHWPool::getInstance(); + VELOX_CHECK_LT(job_id, qpl_job_pool.MAX_JOB_NUMBER); + qpl_job* job = qpl_job_pool.getJobById(job_id); + + auto status = qpl_wait_job(job); + qpl_job_pool.releaseJob(job_id); + if (status == QPL_STS_OK) { + return 1; + } + LOG(WARNING) << "Decompress w/IAA error, status: " << status; + return 0; +} + +std::unique_ptr MakeIAAGzipCodec() { + return std::make_unique(); +} + +} // namespace facebook::velox::dwio::common::compression \ No newline at end of file diff --git a/velox/dwio/parquet/reader/CMakeLists.txt b/velox/dwio/parquet/reader/CMakeLists.txt index 464d257aea9b2..742b1ae2de7a9 100644 --- a/velox/dwio/parquet/reader/CMakeLists.txt +++ b/velox/dwio/parquet/reader/CMakeLists.txt @@ -18,6 +18,7 @@ add_library( ParquetReader.cpp ParquetTypeWithId.cpp PageReader.cpp + IAAPageReader.cpp ParquetColumnReader.cpp ParquetData.cpp RepeatedColumnReader.cpp diff --git a/velox/dwio/parquet/reader/IAAPageReader.cpp b/velox/dwio/parquet/reader/IAAPageReader.cpp new file mode 100644 index 0000000000000..44c757acc1871 --- /dev/null +++ b/velox/dwio/parquet/reader/IAAPageReader.cpp @@ -0,0 +1,508 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "velox/dwio/parquet/reader/IAAPageReader.h" +#include "velox/dwio/common/BufferUtil.h" + +namespace facebook::velox::parquet { + +using thrift::Encoding; +using thrift::PageHeader; + +void IAAPageReader::preDecompressPage( + bool& need_pre_decompress, + int64_t numValues) { + if (codec_ != thrift::CompressionCodec::GZIP) { + need_pre_decompress = false; + return; + } + for (;;) { + auto dataStart = pageStart_; + if (chunkSize_ <= pageStart_) { + // This may happen if seeking to exactly end of row group. + numRepDefsInPage_ = 0; + numRowsInPage_ = 0; + break; + } + PageHeader pageHeader = readPageHeader(); + pageStart_ = pageDataStart_ + pageHeader.compressed_page_size; + switch (pageHeader.type) { + case thrift::PageType::DATA_PAGE: + prefetchDataPageV1(pageHeader); + break; + case thrift::PageType::DATA_PAGE_V2: + prefetchDataPageV2(pageHeader); + break; + case thrift::PageType::DICTIONARY_PAGE: + prefetchDictionary(pageHeader); + continue; + default: + break; // ignore INDEX page type and any other custom extensions + } + break; + } + need_pre_decompress = isWinSizeFit_; + rowGroupPageInfo_.numValues = numValues; + rowGroupPageInfo_.visitedRows = 0; +} + +void IAAPageReader::prefetchNextPage() { + if (rowGroupPageInfo_.visitedRows + numRowsInPage_ >= + rowGroupPageInfo_.numValues) { + return; + } + if (chunkSize_ <= pageStart_) { + return; + } + PageHeader pageHeader = readPageHeader(); + switch (pageHeader.type) { + case thrift::PageType::DATA_PAGE: { + dataPageHeader_ = pageHeader; + VELOX_CHECK( + pageHeader.type == thrift::PageType::DATA_PAGE && + pageHeader.__isset.data_page_header); + rowGroupPageInfo_.dataPageData = + readBytes(pageHeader.compressed_page_size, pageBuffer_); + preDecompressData_ = iaaDecompress( + rowGroupPageInfo_.dataPageData, + pageHeader.compressed_page_size, + pageHeader.uncompressed_page_size, + rowGroupPageInfo_.uncompressedData, + dataDecompFuture); + break; + } + case thrift::PageType::DATA_PAGE_V2: + LOG(WARNING) << "Data Page V2 not support "; + break; + case thrift::PageType::DICTIONARY_PAGE: + LOG(WARNING) << "Wrong path "; + break; + default: + break; // ignore INDEX page type and any other custom extensions + } +} + +bool IAAPageReader::seekToPreDecompPage(int64_t row) { + bool has_qpl = false; + if (this->dictDecompFuture.valid()) { + bool job_success = std::move(this->dictDecompFuture).get() > 0; + prepareDictionary(dictPageHeader_, job_success); + preDecompressDict_ = false; + has_qpl = true; + } + + if (dataDecompFuture.valid()) { + bool job_success = std::move(this->dataDecompFuture).get() > 0; + prepareDataPageV1(dataPageHeader_, row, job_success); + preDecompressData_ = false; + has_qpl = true; + } + + if (has_qpl) { + if (row == kRepDefOnly || row < rowOfPage_ + numRowsInPage_) { + return true; + } + updateRowInfoAfterPageSkipped(); + } + return false; +} + +void IAAPageReader::prefetchDataPageV1(const thrift::PageHeader& pageHeader) { + dataPageHeader_ = pageHeader; + VELOX_CHECK( + pageHeader.type == thrift::PageType::DATA_PAGE && + pageHeader.__isset.data_page_header); + + dataPageData_ = readBytes(pageHeader.compressed_page_size, pageBuffer_); + preDecompressData_ = iaaDecompress( + dataPageData_, + pageHeader.compressed_page_size, + pageHeader.uncompressed_page_size, + uncompressedDataV1Data_, + dataDecompFuture); + return; +} + +void IAAPageReader::prefetchDataPageV2(const thrift::PageHeader& pageHeader) { + return; +} + +void IAAPageReader::prefetchDictionary(const thrift::PageHeader& pageHeader) { + dictPageHeader_ = pageHeader; + dictionaryEncoding_ = pageHeader.dictionary_page_header.encoding; + VELOX_CHECK( + dictionaryEncoding_ == Encoding::PLAIN_DICTIONARY || + dictionaryEncoding_ == Encoding::PLAIN); + dictPageData_ = readBytes(pageHeader.compressed_page_size, pageBuffer_); + + preDecompressDict_ = iaaDecompress( + dictPageData_, + pageHeader.compressed_page_size, + pageHeader.uncompressed_page_size, + uncompressedDictData_, + dictDecompFuture); + + return; +} + +const bool IAAPageReader::iaaDecompress( + const char* pageData, + uint32_t compressedSize, + uint32_t uncompressedSize, + BufferPtr& uncompressedData, + folly::SemiFuture& future) { + dwio::common::ensureCapacity( + uncompressedData, uncompressedSize, &pool_); + static constexpr int PARQUET_ZLIB_WINDOW_BITS_4KB = 12; + future = folly::makeSemiFuture((uint64_t)0); + if (!isWinSizeFit_) { + // window size should be 4KB for IAA + if (PARQUET_ZLIB_WINDOW_BITS_4KB == + dwio::common::compression::getZlibWindowBits( + (const uint8_t*)pageData, uncompressedSize)) { + isWinSizeFit_ = true; + } else { + future = folly::makeSemiFuture((uint64_t)0); + return true; + } + } + std::unique_ptr decompressor = + dwio::common::compression::createAsyncDecompressor( + thriftCodecToCompressionKind()); + if (decompressor == nullptr) { + return true; + } + auto decompFuture = decompressor->decompressAsync( + (const char*)pageData, + compressedSize, + (char*)uncompressedData->asMutable(), + uncompressedSize); + if (decompFuture.isReady()) { + auto result = std::move(decompFuture).getTry(); + if (result.hasException()) { + future = folly::makeSemiFuture((uint64_t)0); + return true; + } + } + future = std::move(decompFuture); + return true; +} + +void IAAPageReader::seekToPage(int64_t row) { + this->defineDecoder_.reset(); + this->repeatDecoder_.reset(); + // 'rowOfPage_' is the row number of the first row of the next page. + this->rowOfPage_ += this->numRowsInPage_; + + if (seekToPreDecompPage(row)) { + if (isWinSizeFit_) { + prefetchNextPage(); + } + rowGroupPageInfo_.visitedRows += numRowsInPage_; + return; + } + + for (;;) { + auto dataStart = pageStart_; + if (chunkSize_ <= pageStart_) { + // This may happen if seeking to exactly end of row group. + numRepDefsInPage_ = 0; + numRowsInPage_ = 0; + break; + } + PageHeader pageHeader = this->readPageHeader(); + pageStart_ = pageDataStart_ + pageHeader.compressed_page_size; + + switch (pageHeader.type) { + case thrift::PageType::DATA_PAGE: + prepareDataPageV1(pageHeader, row); + break; + case thrift::PageType::DATA_PAGE_V2: + prepareDataPageV2(pageHeader, row); + break; + case thrift::PageType::DICTIONARY_PAGE: + if (row == kRepDefOnly) { + skipBytes( + pageHeader.compressed_page_size, + inputStream_.get(), + bufferStart_, + bufferEnd_); + continue; + } + prepareDictionary(pageHeader); + continue; + default: + break; // ignore INDEX page type and any other custom extensions + } + if (row == kRepDefOnly || row < rowOfPage_ + numRowsInPage_) { + break; + } + this->updateRowInfoAfterPageSkipped(); + } + if (isWinSizeFit_) { + prefetchNextPage(); + } + rowGroupPageInfo_.visitedRows += numRowsInPage_; +} + +void IAAPageReader::prepareDataPageV1( + const PageHeader& pageHeader, + int64_t row, + bool job_success) { + VELOX_CHECK( + pageHeader.type == thrift::PageType::DATA_PAGE && + pageHeader.__isset.data_page_header); + numRepDefsInPage_ = pageHeader.data_page_header.num_values; + setPageRowInfo(row == kRepDefOnly); + if (row != kRepDefOnly && numRowsInPage_ != kRowsUnknown && + numRowsInPage_ + rowOfPage_ <= row) { + dwio::common::skipBytes( + pageHeader.compressed_page_size, + inputStream_.get(), + bufferStart_, + bufferEnd_); + + return; + } + if (job_success) { + if (rowGroupPageInfo_.visitedRows > 0) { + BufferPtr tmp = uncompressedDataV1Data_; + uncompressedDataV1Data_ = rowGroupPageInfo_.uncompressedData; + rowGroupPageInfo_.uncompressedData = tmp; + } + pageData_ = uncompressedDataV1Data_->as(); + } else { + if (!preDecompressData_) { + dataPageData_ = readBytes(pageHeader.compressed_page_size, pageBuffer_); + } else if (rowGroupPageInfo_.visitedRows > 0) { + dataPageData_ = rowGroupPageInfo_.dataPageData; + } + pageData_ = decompressData( + dataPageData_, + pageHeader.compressed_page_size, + pageHeader.uncompressed_page_size); + } + auto pageEnd = pageData_ + pageHeader.uncompressed_page_size; + if (maxRepeat_ > 0) { + uint32_t repeatLength = readField(pageData_); + repeatDecoder_ = std::make_unique<::arrow::util::RleDecoder>( + reinterpret_cast(pageData_), + repeatLength, + ::arrow::bit_util::NumRequiredBits(maxRepeat_)); + + pageData_ += repeatLength; + } + + if (maxDefine_ > 0) { + auto defineLength = readField(pageData_); + if (maxDefine_ == 1) { + defineDecoder_ = std::make_unique( + pageData_, + pageData_ + defineLength, + ::arrow::bit_util::NumRequiredBits(maxDefine_)); + } + wideDefineDecoder_ = std::make_unique<::arrow::util::RleDecoder>( + reinterpret_cast(pageData_), + defineLength, + ::arrow::bit_util::NumRequiredBits(maxDefine_)); + pageData_ += defineLength; + } + encodedDataSize_ = pageEnd - pageData_; + + encoding_ = pageHeader.data_page_header.encoding; + if (!hasChunkRepDefs_ && (numRowsInPage_ == kRowsUnknown || maxDefine_ > 1)) { + readPageDefLevels(); + } + + if (row != kRepDefOnly) { + makeDecoder(); + } +} + +void IAAPageReader::prepareDictionary( + const PageHeader& pageHeader, + bool job_success) { + dictionary_.numValues = pageHeader.dictionary_page_header.num_values; + dictionaryEncoding_ = pageHeader.dictionary_page_header.encoding; + dictionary_.sorted = pageHeader.dictionary_page_header.__isset.is_sorted && + pageHeader.dictionary_page_header.is_sorted; + VELOX_CHECK( + dictionaryEncoding_ == Encoding::PLAIN_DICTIONARY || + dictionaryEncoding_ == Encoding::PLAIN); + + if (codec_ != thrift::CompressionCodec::UNCOMPRESSED) { + if (job_success) { + pageData_ = uncompressedDictData_->as(); + } else { + if (!preDecompressDict_) { + dictPageData_ = readBytes(pageHeader.compressed_page_size, pageBuffer_); + } + pageData_ = decompressData( + dictPageData_, + pageHeader.compressed_page_size, + pageHeader.uncompressed_page_size); + } + } + + auto parquetType = type_->parquetType_.value(); + switch (parquetType) { + case thrift::Type::INT32: + case thrift::Type::INT64: + case thrift::Type::FLOAT: + case thrift::Type::DOUBLE: { + int32_t typeSize = (parquetType == thrift::Type::INT32 || + parquetType == thrift::Type::FLOAT) + ? sizeof(float) + : sizeof(double); + auto numBytes = dictionary_.numValues * typeSize; + if (type_->type()->isShortDecimal() && + parquetType == thrift::Type::INT32) { + auto veloxTypeLength = type_->type()->cppSizeInBytes(); + auto numVeloxBytes = dictionary_.numValues * veloxTypeLength; + dictionary_.values = + AlignedBuffer::allocate(numVeloxBytes, &pool_); + } else { + dictionary_.values = AlignedBuffer::allocate(numBytes, &pool_); + } + if (pageData_) { + memcpy(dictionary_.values->asMutable(), pageData_, numBytes); + } else { + dwio::common::readBytes( + numBytes, + inputStream_.get(), + dictionary_.values->asMutable(), + bufferStart_, + bufferEnd_); + } + if (type_->type()->isShortDecimal() && + parquetType == thrift::Type::INT32) { + auto values = dictionary_.values->asMutable(); + auto parquetValues = dictionary_.values->asMutable(); + for (auto i = dictionary_.numValues - 1; i >= 0; --i) { + // Expand the Parquet type length values to Velox type length. + // We start from the end to allow in-place expansion. + values[i] = parquetValues[i]; + } + } + break; + } + case thrift::Type::BYTE_ARRAY: { + dictionary_.values = + AlignedBuffer::allocate(dictionary_.numValues, &pool_); + auto numBytes = pageHeader.uncompressed_page_size; + auto values = dictionary_.values->asMutable(); + dictionary_.strings = AlignedBuffer::allocate(numBytes, &pool_); + auto strings = dictionary_.strings->asMutable(); + if (pageData_) { + memcpy(strings, pageData_, numBytes); + } else { + dwio::common::readBytes( + numBytes, inputStream_.get(), strings, bufferStart_, bufferEnd_); + } + auto header = strings; + for (auto i = 0; i < dictionary_.numValues; ++i) { + auto length = *reinterpret_cast(header); + values[i] = StringView(header + sizeof(int32_t), length); + header += length + sizeof(int32_t); + } + VELOX_CHECK_EQ(header, strings + numBytes); + break; + } + case thrift::Type::FIXED_LEN_BYTE_ARRAY: { + auto parquetTypeLength = type_->typeLength_; + auto numParquetBytes = dictionary_.numValues * parquetTypeLength; + auto veloxTypeLength = type_->type()->cppSizeInBytes(); + auto numVeloxBytes = dictionary_.numValues * veloxTypeLength; + dictionary_.values = AlignedBuffer::allocate(numVeloxBytes, &pool_); + auto data = dictionary_.values->asMutable(); + // Read the data bytes. + if (pageData_) { + memcpy(data, pageData_, numParquetBytes); + } else { + dwio::common::readBytes( + numParquetBytes, + inputStream_.get(), + data, + bufferStart_, + bufferEnd_); + } + if (type_->type()->isShortDecimal()) { + // Parquet decimal values have a fixed typeLength_ and are in big-endian + // layout. + if (numParquetBytes < numVeloxBytes) { + auto values = dictionary_.values->asMutable(); + for (auto i = dictionary_.numValues - 1; i >= 0; --i) { + // Expand the Parquet type length values to Velox type length. + // We start from the end to allow in-place expansion. + auto sourceValue = data + (i * parquetTypeLength); + int64_t value = *sourceValue >= 0 ? 0 : -1; + memcpy( + reinterpret_cast(&value) + veloxTypeLength - + parquetTypeLength, + sourceValue, + parquetTypeLength); + values[i] = value; + } + } + auto values = dictionary_.values->asMutable(); + for (auto i = 0; i < dictionary_.numValues; ++i) { + values[i] = __builtin_bswap64(values[i]); + } + break; + } else if (type_->type()->isLongDecimal()) { + // Parquet decimal values have a fixed typeLength_ and are in big-endian + // layout. + if (numParquetBytes < numVeloxBytes) { + auto values = dictionary_.values->asMutable(); + for (auto i = dictionary_.numValues - 1; i >= 0; --i) { + // Expand the Parquet type length values to Velox type length. + // We start from the end to allow in-place expansion. + auto sourceValue = data + (i * parquetTypeLength); + int128_t value = *sourceValue >= 0 ? 0 : -1; + memcpy( + reinterpret_cast(&value) + veloxTypeLength - + parquetTypeLength, + sourceValue, + parquetTypeLength); + values[i] = value; + } + } + auto values = dictionary_.values->asMutable(); + for (auto i = 0; i < dictionary_.numValues; ++i) { + values[i] = bits::builtin_bswap128(values[i]); + } + break; + } + VELOX_UNSUPPORTED( + "Parquet type {} not supported for dictionary", parquetType); + } + case thrift::Type::INT96: + default: + VELOX_UNSUPPORTED( + "Parquet type {} not supported for dictionary", parquetType); + } +} + +IAAPageReader::~IAAPageReader() { + if (dataDecompFuture.valid()) { + std::move(dataDecompFuture).get(); + } + + if (dictDecompFuture.valid()) { + std::move(dictDecompFuture).get(); + } +} +} // namespace facebook::velox::parquet \ No newline at end of file diff --git a/velox/dwio/parquet/reader/IAAPageReader.h b/velox/dwio/parquet/reader/IAAPageReader.h new file mode 100644 index 0000000000000..5d06492cc2da2 --- /dev/null +++ b/velox/dwio/parquet/reader/IAAPageReader.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#include "velox/dwio/common/compression/AsyncCompression.h" +#include "velox/dwio/parquet/reader/PageReader.h" + +namespace facebook::velox::parquet { + +using folly::SemiFuture; + +struct PreDecompPageInfo { + int64_t numValues; // Number of values in this row group + int64_t visitedRows; // rows already read + const char* FOLLY_NULLABLE dataPageData{nullptr}; + BufferPtr uncompressedData; +}; + +class IAAPageReader : public PageReader { + public: + IAAPageReader( + std::unique_ptr stream, + memory::MemoryPool& pool, + ParquetTypeWithIdPtr fileType, + thrift::CompressionCodec::type codec, + int64_t chunkSize) + : PageReader(std::move(stream), pool, fileType, codec, chunkSize) { + uncompressedDictData_ = nullptr; + uncompressedDataV1Data_ = nullptr; + } + ~IAAPageReader(); + + PageReaderType getType() { + return PageReaderType::IAA; + }; + + /** + * Submit decompression job to IAA, store the decompression future + * @param need_pre_decompress true if the codec and window bits are adaptable + * for IAA + * @param numValues number of values in row group. This value is stored in + * rowGroupPageInfo_ and used for determine whether there is need to + * pre-decompress by IAA + */ + void preDecompressPage(bool& need_pre_decompress, int64_t numValues); + + // Override method to call seekToPreDecompPage and + // prefetchNextPage in IAAPageReader + virtual void seekToPage(int64_t row); + + private: + void prefetchDataPageV1(const thrift::PageHeader& pageHeader); + void prefetchDataPageV2(const thrift::PageHeader& pageHeader); + void prefetchDictionary(const thrift::PageHeader& pageHeader); + + // Prefetch the next page if there are more than one page in the row group + // and then submit the page decompression job to IAA + void prefetchNextPage(); + + // Get decompressed page from IAA async decompressor. Then Reads and sets + // 'rowOfPage_' and 'numRowsInPage_' and initializes a decoder for the found + // page. + bool seekToPreDecompPage(int64_t row); + + void prepareDataPageV1( + const thrift::PageHeader& pageHeader, + int64_t row, + bool job_success = false); + void prepareDictionary( + const thrift::PageHeader& pageHeader, + bool job_success = false); + const bool iaaDecompress( + const char* FOLLY_NONNULL pageData, + uint32_t compressedSize, + uint32_t uncompressedSize, + BufferPtr& uncompressedData, + SemiFuture& future); + + // Used for pre-decompress + BufferPtr uncompressedDictData_; + BufferPtr uncompressedDataV1Data_; + thrift::PageHeader dictPageHeader_; + const char* FOLLY_NULLABLE dictPageData_{nullptr}; + bool needUncompressDict; + + thrift::PageHeader dataPageHeader_; + const char* FOLLY_NULLABLE dataPageData_{nullptr}; + + SemiFuture dictDecompFuture = SemiFuture::makeEmpty(); + SemiFuture dataDecompFuture = SemiFuture::makeEmpty(); + + bool preDecompressDict_ = false; + bool preDecompressData_ = false; + bool isWinSizeFit_ = false; + PreDecompPageInfo rowGroupPageInfo_; +}; + +} // namespace facebook::velox::parquet diff --git a/velox/dwio/parquet/reader/PageReader.cpp b/velox/dwio/parquet/reader/PageReader.cpp index 162ad14006231..042150f37a035 100644 --- a/velox/dwio/parquet/reader/PageReader.cpp +++ b/velox/dwio/parquet/reader/PageReader.cpp @@ -28,116 +28,11 @@ namespace facebook::velox::parquet { using thrift::Encoding; using thrift::PageHeader; -void PageReader::preDecompressPage( - bool& need_pre_decompress, - int64_t numValues) { - if (codec_ != thrift::CompressionCodec::GZIP) { - need_pre_decompress = false; - return; - } - for (;;) { - auto dataStart = pageStart_; - if (chunkSize_ <= pageStart_) { - // This may happen if seeking to exactly end of row group. - numRepDefsInPage_ = 0; - numRowsInPage_ = 0; - break; - } - PageHeader pageHeader = readPageHeader(); - pageStart_ = pageDataStart_ + pageHeader.compressed_page_size; - switch (pageHeader.type) { - case thrift::PageType::DATA_PAGE: - prefetchDataPageV1(pageHeader); - break; - case thrift::PageType::DATA_PAGE_V2: - prefetchDataPageV2(pageHeader); - break; - case thrift::PageType::DICTIONARY_PAGE: - prefetchDictionary(pageHeader); - continue; - default: - break; // ignore INDEX page type and any other custom extensions - } - break; - } - need_pre_decompress = isWinSizeFit; - rowGroupPageInfo_.numValues = numValues; - rowGroupPageInfo_.visitedRows = 0; -} - -void PageReader::prefetchNextPage() { - if (rowGroupPageInfo_.visitedRows + numRowsInPage_ >= - rowGroupPageInfo_.numValues) { - return; - } - if (chunkSize_ <= pageStart_) { - return; - } - PageHeader pageHeader = readPageHeader(); - rowGroupPageInfo_.pageStart = - pageDataStart_ + pageHeader.compressed_page_size; - switch (pageHeader.type) { - case thrift::PageType::DATA_PAGE: { - dataPageHeader_ = pageHeader; - VELOX_CHECK( - pageHeader.type == thrift::PageType::DATA_PAGE && - pageHeader.__isset.data_page_header); - rowGroupPageInfo_.dataPageData = - readBytes(pageHeader.compressed_page_size, pageBuffer_); - pre_decompress_data = iaaDecompressGzip( - rowGroupPageInfo_.dataPageData, - pageHeader.compressed_page_size, - pageHeader.uncompressed_page_size, - rowGroupPageInfo_.uncompressedDataV1Data, - data_qpl_job_id); - break; - } - case thrift::PageType::DATA_PAGE_V2: - LOG(WARNING) << "Data Page V2 not support "; - break; - case thrift::PageType::DICTIONARY_PAGE: - LOG(WARNING) << "Wrong path "; - break; - default: - break; // ignore INDEX page type and any other custom extensions - } -} - -bool PageReader::seekToPreDecompPage(int64_t row) { - bool has_qpl = false; - if (dict_qpl_job_id != 0) { - bool job_success = getDecompRes(dict_qpl_job_id); - prepareDictionary(dictPageHeader_, job_success); - dict_qpl_job_id = 0; - has_qpl = true; - } - - if (this->data_qpl_job_id != 0) { - bool job_success = getDecompRes(data_qpl_job_id); - prepareDataPageV1(dataPageHeader_, row, job_success); - data_qpl_job_id = 0; - has_qpl = true; - } - - if (has_qpl) { - if (row == kRepDefOnly || row < rowOfPage_ + numRowsInPage_) { - return true; - } - updateRowInfoAfterPageSkipped(); - } - return false; -} - void PageReader::seekToPage(int64_t row) { defineDecoder_.reset(); repeatDecoder_.reset(); // 'rowOfPage_' is the row number of the first row of the next page. rowOfPage_ += numRowsInPage_; - - if (seekToPreDecompPage(row)) { - return; - } - for (;;) { auto dataStart = pageStart_; if (chunkSize_ <= pageStart_) { @@ -324,10 +219,7 @@ void PageReader::updateRowInfoAfterPageSkipped() { } } -void PageReader::prepareDataPageV1( - const PageHeader& pageHeader, - int64_t row, - bool job_success) { +void PageReader::prepareDataPageV1(const PageHeader& pageHeader, int64_t row) { VELOX_CHECK( pageHeader.type == thrift::PageType::DATA_PAGE && pageHeader.__isset.data_page_header); @@ -343,24 +235,11 @@ void PageReader::prepareDataPageV1( return; } - if (data_qpl_job_id != 0 && pre_decompress_data && job_success) { - if (rowGroupPageInfo_.visitedRows > 0) { - BufferPtr tmp = uncompressedDataV1Data_; - uncompressedDataV1Data_ = rowGroupPageInfo_.uncompressedDataV1Data; - rowGroupPageInfo_.uncompressedDataV1Data = tmp; - } - pageData_ = uncompressedDataV1Data_->as(); - } else { - if (data_qpl_job_id == 0) { - dataPageData_ = readBytes(pageHeader.compressed_page_size, pageBuffer_); - } else if (rowGroupPageInfo_.visitedRows > 0) { - dataPageData_ = rowGroupPageInfo_.dataPageData; - } - pageData_ = decompressData( - dataPageData_, - pageHeader.compressed_page_size, - pageHeader.uncompressed_page_size); - } + pageData_ = readBytes(pageHeader.compressed_page_size, pageBuffer_); + pageData_ = decompressData( + pageData_, + pageHeader.compressed_page_size, + pageHeader.uncompressed_page_size); auto pageEnd = pageData_ + pageHeader.uncompressed_page_size; if (maxRepeat_ > 0) { uint32_t repeatLength = readField(pageData_); @@ -458,9 +337,7 @@ void PageReader::prepareDataPageV2(const PageHeader& pageHeader, int64_t row) { } } -void PageReader::prepareDictionary( - const PageHeader& pageHeader, - bool job_success) { +void PageReader::prepareDictionary(const PageHeader& pageHeader) { dictionary_.numValues = pageHeader.dictionary_page_header.num_values; dictionaryEncoding_ = pageHeader.dictionary_page_header.encoding; dictionary_.sorted = pageHeader.dictionary_page_header.__isset.is_sorted && @@ -470,17 +347,11 @@ void PageReader::prepareDictionary( dictionaryEncoding_ == Encoding::PLAIN); if (codec_ != thrift::CompressionCodec::UNCOMPRESSED) { - if (dict_qpl_job_id != 0 && pre_decompress_dict && job_success) { - pageData_ = uncompressedDictData_->as(); - } else { - if (dict_qpl_job_id == 0) { - dictPageData_ = readBytes(pageHeader.compressed_page_size, pageBuffer_); - } - pageData_ = decompressData( - dictPageData_, - pageHeader.compressed_page_size, - pageHeader.uncompressed_page_size); - } + pageData_ = readBytes(pageHeader.compressed_page_size, pageBuffer_); + pageData_ = decompressData( + pageData_, + pageHeader.compressed_page_size, + pageHeader.uncompressed_page_size); } auto parquetType = type_->parquetType_.value(); @@ -812,10 +683,6 @@ void PageReader::skip(int64_t numRows) { numLeafNullsConsumed_ = rowOfPage_; } toSkip -= rowOfPage_ - firstUnvisited_; - if (isWinSizeFit) { - prefetchNextPage(); - } - rowGroupPageInfo_.visitedRows += numRowsInPage_; } firstUnvisited_ += numRows; @@ -867,10 +734,6 @@ void PageReader::skipNullsOnly(int64_t numRows) { seekToPage(firstUnvisited_ + numRows); firstUnvisited_ += numRows; toSkip = firstUnvisited_ - rowOfPage_; - if (isWinSizeFit) { - prefetchNextPage(); - } - rowGroupPageInfo_.visitedRows += numRowsInPage_; } else { firstUnvisited_ += numRows; } @@ -891,10 +754,6 @@ void PageReader::readNullsOnly(int64_t numValues, BufferPtr& buffer) { if (!availableOnPage) { seekToPage(firstUnvisited_); availableOnPage = numRowsInPage_; - if (isWinSizeFit) { - prefetchNextPage(); - } - rowGroupPageInfo_.visitedRows += numRowsInPage_; } auto numRead = std::min(availableOnPage, toRead); auto nulls = readNulls(numRead, nullsInReadRange_); @@ -955,10 +814,6 @@ bool PageReader::rowsForPage( if (hasChunkRepDefs_) { numLeafNullsConsumed_ = rowOfPage_; } - if (isWinSizeFit) { - prefetchNextPage(); - } - rowGroupPageInfo_.visitedRows += numRowsInPage_; } auto& scanState = reader.scanState(); if (isDictionary()) { @@ -1054,102 +909,4 @@ const VectorPtr& PageReader::dictionaryValues(const TypePtr& type) { return dictionaryValues_; } -void PageReader::prefetchDataPageV1(const thrift::PageHeader& pageHeader) { - dataPageHeader_ = pageHeader; - VELOX_CHECK( - pageHeader.type == thrift::PageType::DATA_PAGE && - pageHeader.__isset.data_page_header); - - dataPageData_ = readBytes(pageHeader.compressed_page_size, pageBuffer_); - pre_decompress_data = iaaDecompressGzip( - dataPageData_, - pageHeader.compressed_page_size, - pageHeader.uncompressed_page_size, - uncompressedDataV1Data_, - data_qpl_job_id); - return; -} - -void PageReader::prefetchDataPageV2(const thrift::PageHeader& pageHeader) { - return; -} - -void PageReader::prefetchDictionary(const thrift::PageHeader& pageHeader) { - dictPageHeader_ = pageHeader; - dictionaryEncoding_ = pageHeader.dictionary_page_header.encoding; - VELOX_CHECK( - dictionaryEncoding_ == Encoding::PLAIN_DICTIONARY || - dictionaryEncoding_ == Encoding::PLAIN); - dictPageData_ = readBytes(pageHeader.compressed_page_size, pageBuffer_); - - pre_decompress_dict = iaaDecompressGzip( - dictPageData_, - pageHeader.compressed_page_size, - pageHeader.uncompressed_page_size, - uncompressedDictData_, - dict_qpl_job_id); - - return; -} - -const bool PageReader::iaaDecompressGzip( - const char* pageData, - uint32_t compressedSize, - uint32_t uncompressedSize, - BufferPtr& uncompressedData, - int& qpl_job_id) { - dwio::common::ensureCapacity( - uncompressedData, uncompressedSize, &pool_); - - if (!isWinSizeFit) { - // window size should be 4KB for IAA - if (dwio::common::compression::Compressor::PARQUET_ZLIB_WINDOW_BITS_4KB == - dwio::common::compression::getZlibWindowBits( - (const uint8_t*)pageData, uncompressedSize)) { - isWinSizeFit = true; - } else { - qpl_job_id = -1; - return false; - } - } - auto streamDebugInfo = - fmt::format("Page Reader: Stream {}", inputStream_->getName()); - std::unique_ptr decompressor = - dwio::common::compression::createAsyncDecompressor( - thriftCodecToCompressionKind(), uncompressedSize, streamDebugInfo); - if (decompressor == nullptr) { - return false; - } - qpl_job_id = decompressor->decompress( - (const char*)pageData, - compressedSize, - (char*)uncompressedData->asMutable(), - uncompressedSize); - if (qpl_job_id < 0) { - return false; - } - return true; -} - -const bool PageReader::getDecompRes(int job_id) { - auto streamDebugInfo = - fmt::format("Page Reader: Stream {}", inputStream_->getName()); - std::unique_ptr decompressor = - dwio::common::compression::createAsyncDecompressor( - thriftCodecToCompressionKind(), 0, streamDebugInfo); - return decompressor->waitResult(job_id); -} - -PageReader::~PageReader() { - if (data_qpl_job_id > 0 || dict_qpl_job_id > 0) { - auto streamDebugInfo = - fmt::format("Page Reader: Stream {}", inputStream_->getName()); - std::unique_ptr decompressor = - dwio::common::compression::createAsyncDecompressor( - thriftCodecToCompressionKind(), 0, streamDebugInfo); - decompressor->releaseJob(data_qpl_job_id); - decompressor->releaseJob(dict_qpl_job_id); - } -} - } // namespace facebook::velox::parquet diff --git a/velox/dwio/parquet/reader/PageReader.h b/velox/dwio/parquet/reader/PageReader.h index 788765d203528..8a2074064dd45 100644 --- a/velox/dwio/parquet/reader/PageReader.h +++ b/velox/dwio/parquet/reader/PageReader.h @@ -22,6 +22,7 @@ #include "velox/dwio/common/SelectiveColumnReader.h" #include "velox/dwio/common/compression/Compression.h" #include "velox/dwio/parquet/reader/BooleanDecoder.h" +#include "velox/dwio/parquet/reader/PageReaderBase.h" #include "velox/dwio/parquet/reader/ParquetTypeWithId.h" #include "velox/dwio/parquet/reader/RleBpDataDecoder.h" #include "velox/dwio/parquet/reader/StringDecoder.h" @@ -30,19 +31,10 @@ namespace facebook::velox::parquet { -struct PreDecompPageInfo { - int64_t numValues; // Number of values in this row group - int64_t visitedRows; // rows already read - uint64_t pageStart{0}; - thrift::PageHeader dataPageHeader; - const char* FOLLY_NULLABLE dataPageData{nullptr}; - BufferPtr uncompressedDataV1Data; -}; - /// Manages access to pages inside a ColumnChunk. Interprets page headers and /// encodings and presents the combination of pages and encoded values as a /// continuous stream accessible via readWithVisitor(). -class PageReader { +class PageReader : public PageReaderBase { public: PageReader( std::unique_ptr stream, @@ -60,10 +52,6 @@ class PageReader { chunkSize_(chunkSize), nullConcatenation_(pool_) { type_->makeLevelInfo(leafInfo_); - dict_qpl_job_id = 0; - data_qpl_job_id = 0; - uncompressedDictData_ = nullptr; - uncompressedDataV1Data_ = nullptr; } // This PageReader constructor is for unit test only. @@ -80,26 +68,14 @@ class PageReader { codec_(codec), chunkSize_(chunkSize), nullConcatenation_(pool_) {} - ~PageReader(); + + PageReaderType getType() { + return PageReaderType::Common; + }; /// Advances 'numRows' top level rows. void skip(int64_t numRows); - /// Pre-decompress GZIP page with IAA - void preDecompressPage(bool& need_pre_decompress, int64_t numValues); - void prefetchDataPageV1(const thrift::PageHeader& pageHeader); - void prefetchDataPageV2(const thrift::PageHeader& pageHeader); - void prefetchDictionary(const thrift::PageHeader& pageHeader); - const bool getDecompRes(int job_id); - void prefetchNextPage(); - bool seekToPreDecompPage(int64_t row); - const bool iaaDecompressGzip( - const char* FOLLY_NONNULL pageData, - uint32_t compressedSize, - uint32_t uncompressedSize, - BufferPtr& uncompressedData, - int& qpl_job_id); - /// Decodes repdefs for 'numTopLevelRows'. Use getLengthsAndNulls() /// to access the lengths and nulls for the different nesting /// levels. @@ -161,7 +137,7 @@ class PageReader { // bufferEnd_ to the corresponding positions. thrift::PageHeader readPageHeader(); - private: + protected: // Indicates that we only want the repdefs for the next page. Used when // prereading repdefs with seekToPage. static constexpr int64_t kRepDefOnly = -1; @@ -193,7 +169,7 @@ class PageReader { // is interpreted in terms of leaf rows, including leaf // nulls. Seeking ahead of pages covered by decodeRepDefs is not // allowed for non-top level columns. - void seekToPage(int64_t row); + virtual void seekToPage(int64_t row); // Preloads the repdefs for the column chunk. To avoid preloading, // would need a way too clone the input stream so that one stream @@ -212,14 +188,9 @@ class PageReader { // next page. void updateRowInfoAfterPageSkipped(); - void prepareDataPageV1( - const thrift::PageHeader& pageHeader, - int64_t row, - bool job_success = false); + void prepareDataPageV1(const thrift::PageHeader& pageHeader, int64_t row); void prepareDataPageV2(const thrift::PageHeader& pageHeader, int64_t row); - void prepareDictionary( - const thrift::PageHeader& pageHeader, - bool job_success = false); + void prepareDictionary(const thrift::PageHeader& pageHeader); void makeDecoder(); // For a non-top level leaf, reads the defs and sets 'leafNulls_' and @@ -235,12 +206,12 @@ class PageReader { common::CompressionKind thriftCodecToCompressionKind(); // Decompresses data starting at 'pageData_', consuming 'compressedsize' and - // producing up to 'decompressedSize' bytes. The The start of the decoding + // producing up to 'uncompressedSize' bytes. The start of the decoding // result is returned. an intermediate copy may be made in 'decompresseddata_' const char* FOLLY_NONNULL decompressData( const char* FOLLY_NONNULL pageData, uint32_t compressedSize, - uint32_t decompressedSize); + uint32_t uncompressedSize); template T readField(const char* FOLLY_NONNULL& ptr) { @@ -511,23 +482,6 @@ class PageReader { std::unique_ptr stringDecoder_; std::unique_ptr booleanDecoder_; // Add decoders for other encodings here. - // Used for pre-decompress - BufferPtr uncompressedDictData_; - BufferPtr uncompressedDataV1Data_; - thrift::PageHeader dictPageHeader_; - const char* FOLLY_NULLABLE dictPageData_{nullptr}; - bool needUncompressDict; - - thrift::PageHeader dataPageHeader_; - const char* FOLLY_NULLABLE dataPageData_{nullptr}; - - int dict_qpl_job_id; - int data_qpl_job_id; - - bool pre_decompress_dict = false; - bool pre_decompress_data = false; - bool isWinSizeFit = false; - PreDecompPageInfo rowGroupPageInfo_; }; FOLLY_ALWAYS_INLINE dwio::common::compression::CompressionOptions diff --git a/velox/dwio/parquet/reader/PageReaderBase.h b/velox/dwio/parquet/reader/PageReaderBase.h new file mode 100644 index 0000000000000..da1847e0a4439 --- /dev/null +++ b/velox/dwio/parquet/reader/PageReaderBase.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +namespace facebook::velox::parquet { + +enum PageReaderType { + Common = 0, + IAA = 1, +}; + +class PageReaderBase { + public: + // explicit PageReaderBase(){}; + + virtual ~PageReaderBase(){}; + + virtual PageReaderType getType() = 0; + + /** + * skips 'numValues' top level rows, touching null flags only. + * Non-null values are not prepared for reading. + * @param numValues + * @return void + */ + virtual void skipNullsOnly(int64_t numValues) = 0; + + /** + * Reads 'numValues' null flags into 'nulls' and advances the + * decoders by as much. The read may span several pages. If there + * are no nulls, buffer may be set to nullptr. + * @param numValues + * @param buffer + * @return void + */ + virtual void readNullsOnly(int64_t numValues, BufferPtr& buffer) = 0; + + /** + * Advances 'numRows' top level rows. + * @param numRows + * @return void + */ + virtual void skip(int64_t numRows) = 0; + + /* Returns the current string dictionary as a FlatVector. + * @param type + * @return VectorPtr + */ + virtual const VectorPtr& dictionaryValues(const TypePtr& type) = 0; + + virtual bool isDictionary() const = 0; + + virtual void clearDictionary() = 0; +}; + +} // namespace facebook::velox::parquet \ No newline at end of file diff --git a/velox/dwio/parquet/reader/ParquetData.cpp b/velox/dwio/parquet/reader/ParquetData.cpp index ec67ab06ff8cf..69d3c979f1a28 100644 --- a/velox/dwio/parquet/reader/ParquetData.cpp +++ b/velox/dwio/parquet/reader/ParquetData.cpp @@ -18,9 +18,6 @@ #include "velox/dwio/common/BufferedInput.h" #include "velox/dwio/parquet/reader/Statistics.h" -#ifdef VELOX_ENABLE_INTEL_IAA -#include "velox/dwio/common/QplJobPool.h" -#endif namespace facebook::velox::parquet { @@ -88,11 +85,7 @@ bool ParquetData::rowGroupMatches( } bool ParquetData::preDecompRowGroup(uint32_t index) { -#ifdef VELOX_ENABLE_INTEL_IAA - if (!dwio::common::QplJobHWPool::getInstance().job_ready()) { - return false; - } -#else +#ifndef VELOX_ENABLE_INTEL_IAA return false; #endif auto& metaData = rowGroups_[index].columns[type_->column()].meta_data; @@ -103,7 +96,7 @@ bool ParquetData::preDecompRowGroup(uint32_t index) { } pageReaders_.resize(rowGroups_.size()); - auto iaaPageReader = std::make_unique( + auto iaaPageReader = std::make_unique( std::move(streams_[index]), pool_, type_, diff --git a/velox/dwio/parquet/reader/ParquetData.h b/velox/dwio/parquet/reader/ParquetData.h index 328013a342c08..6b67ee128e57d 100644 --- a/velox/dwio/parquet/reader/ParquetData.h +++ b/velox/dwio/parquet/reader/ParquetData.h @@ -17,6 +17,7 @@ #pragma once #include "velox/dwio/common/BufferUtil.h" +#include "velox/dwio/parquet/reader/IAAPageReader.h" #include "velox/dwio/parquet/reader/PageReader.h" #include "velox/dwio/parquet/thrift/ParquetThriftTypes.h" @@ -77,7 +78,7 @@ class ParquetData : public dwio::common::FormatData { FilterRowGroupsResult&) override; PageReader* FOLLY_NONNULL reader() const { - return reader_.get(); + return dynamic_cast(reader_.get()); } // Reads null flags for 'numValues' next top level rows. The first 'numValues' @@ -163,7 +164,11 @@ class ParquetData : public dwio::common::FormatData { /// PageReader::readWithVisitor(). template void readWithVisitor(Visitor visitor) { - reader_->readWithVisitor(visitor); + if (reader_->getType() == PageReaderType::IAA) { + dynamic_cast(reader_.get())->readWithVisitor(visitor); + } else { + dynamic_cast(reader_.get())->readWithVisitor(visitor); + } } const VectorPtr& dictionaryValues(const TypePtr& type) { @@ -201,8 +206,8 @@ class ParquetData : public dwio::common::FormatData { const uint32_t maxDefine_; const uint32_t maxRepeat_; int64_t rowsInRowGroup_; - std::unique_ptr reader_; - std::vector> pageReaders_; + std::unique_ptr reader_; + std::vector> pageReaders_; bool needPreDecomp = true; // Nulls derived from leaf repdefs for non-leaf readers. BufferPtr presetNulls_;