diff --git a/cpp/src/arrow/csv/writer.cc b/cpp/src/arrow/csv/writer.cc index 4b5252076af53..5513007aff627 100644 --- a/cpp/src/arrow/csv/writer.cc +++ b/cpp/src/arrow/csv/writer.cc @@ -22,7 +22,6 @@ #include "arrow/ipc/writer.h" #include "arrow/record_batch.h" #include "arrow/result.h" -#include "arrow/result_internal.h" #include "arrow/stl_allocator.h" #include "arrow/util/iterator.h" #include "arrow/util/logging.h" @@ -129,15 +128,15 @@ class ColumnPopulator { // threading overhead would not be justified. ctx.set_use_threads(false); if (data.type() && is_large_binary_like(data.type()->id())) { - ASSIGN_OR_RAISE(array_, compute::Cast(data, /*to_type=*/large_utf8(), - compute::CastOptions(), &ctx)); + ARROW_ASSIGN_OR_RAISE(array_, compute::Cast(data, /*to_type=*/large_utf8(), + compute::CastOptions(), &ctx)); } else { auto casted = compute::Cast(data, /*to_type=*/utf8(), compute::CastOptions(), &ctx); if (casted.ok()) { array_ = std::move(casted).ValueOrDie(); } else if (casted.status().IsCapacityError()) { - ASSIGN_OR_RAISE(array_, compute::Cast(data, /*to_type=*/large_utf8(), - compute::CastOptions(), &ctx)); + ARROW_ASSIGN_OR_RAISE(array_, compute::Cast(data, /*to_type=*/large_utf8(), + compute::CastOptions(), &ctx)); } else { return casted.status(); } @@ -501,8 +500,8 @@ class CSVWriterImpl : public ipc::RecordBatchWriter { return Status::Invalid("Null string cannot contain quotes."); } - ASSIGN_OR_RAISE(std::shared_ptr null_string, - arrow::AllocateBuffer(options.null_string.length())); + ARROW_ASSIGN_OR_RAISE(std::shared_ptr null_string, + arrow::AllocateBuffer(options.null_string.length())); memcpy(null_string->mutable_data(), options.null_string.data(), options.null_string.length()); @@ -511,7 +510,7 @@ class CSVWriterImpl : public ipc::RecordBatchWriter { for (int col = 0; col < schema->num_fields(); col++) { const std::string& end_chars = col < schema->num_fields() - 1 ? delimiter : options.eol; - ASSIGN_OR_RAISE( + ARROW_ASSIGN_OR_RAISE( populators[col], MakePopulator(*schema->field(col), end_chars, options.delimiter, null_string, options.quoting_style, options.io_context.pool())); @@ -528,7 +527,7 @@ class CSVWriterImpl : public ipc::RecordBatchWriter { Status WriteRecordBatch(const RecordBatch& batch) override { RecordBatchIterator iterator = RecordBatchSliceIterator(batch, options_.batch_size); for (auto maybe_slice : iterator) { - ASSIGN_OR_RAISE(std::shared_ptr slice, maybe_slice); + ARROW_ASSIGN_OR_RAISE(std::shared_ptr slice, maybe_slice); RETURN_NOT_OK(TranslateMinimalBatch(*slice)); RETURN_NOT_OK(sink_->Write(data_buffer_)); stats_.num_record_batches++; @@ -570,10 +569,11 @@ class CSVWriterImpl : public ipc::RecordBatchWriter { Status PrepareForContentsWrite() { // Only called once, as part of initialization if (data_buffer_ == nullptr) { - ASSIGN_OR_RAISE(data_buffer_, - AllocateResizableBuffer( - options_.batch_size * schema_->num_fields() * kColumnSizeGuess, - options_.io_context.pool())); + ARROW_ASSIGN_OR_RAISE( + data_buffer_, + AllocateResizableBuffer( + options_.batch_size * schema_->num_fields() * kColumnSizeGuess, + options_.io_context.pool())); } return Status::OK(); } @@ -665,24 +665,24 @@ class CSVWriterImpl : public ipc::RecordBatchWriter { Status WriteCSV(const Table& table, const WriteOptions& options, arrow::io::OutputStream* output) { - ASSIGN_OR_RAISE(auto writer, MakeCSVWriter(output, table.schema(), options)); + ARROW_ASSIGN_OR_RAISE(auto writer, MakeCSVWriter(output, table.schema(), options)); RETURN_NOT_OK(writer->WriteTable(table)); return writer->Close(); } Status WriteCSV(const RecordBatch& batch, const WriteOptions& options, arrow::io::OutputStream* output) { - ASSIGN_OR_RAISE(auto writer, MakeCSVWriter(output, batch.schema(), options)); + ARROW_ASSIGN_OR_RAISE(auto writer, MakeCSVWriter(output, batch.schema(), options)); RETURN_NOT_OK(writer->WriteRecordBatch(batch)); return writer->Close(); } Status WriteCSV(const std::shared_ptr& reader, const WriteOptions& options, arrow::io::OutputStream* output) { - ASSIGN_OR_RAISE(auto writer, MakeCSVWriter(output, reader->schema(), options)); + ARROW_ASSIGN_OR_RAISE(auto writer, MakeCSVWriter(output, reader->schema(), options)); std::shared_ptr batch; while (true) { - ASSIGN_OR_RAISE(batch, reader->Next()); + ARROW_ASSIGN_OR_RAISE(batch, reader->Next()); if (batch == nullptr) break; RETURN_NOT_OK(writer->WriteRecordBatch(*batch)); } diff --git a/cpp/src/arrow/csv/writer_test.cc b/cpp/src/arrow/csv/writer_test.cc index 703179da94093..4fccf4ddbbb48 100644 --- a/cpp/src/arrow/csv/writer_test.cc +++ b/cpp/src/arrow/csv/writer_test.cc @@ -27,7 +27,7 @@ #include "arrow/io/memory.h" #include "arrow/ipc/writer.h" #include "arrow/record_batch.h" -#include "arrow/result_internal.h" +#include "arrow/result.h" #include "arrow/testing/gtest_util.h" #include "arrow/testing/matchers.h" #include "arrow/type.h" @@ -287,19 +287,19 @@ class TestWriteCSV : public ::testing::TestWithParam { template Result ToCsvString(const Data& data, const WriteOptions& options) { std::shared_ptr out; - ASSIGN_OR_RAISE(out, io::BufferOutputStream::Create()); + ARROW_ASSIGN_OR_RAISE(out, io::BufferOutputStream::Create()); RETURN_NOT_OK(WriteCSV(data, options, out.get())); - ASSIGN_OR_RAISE(std::shared_ptr buffer, out->Finish()); + ARROW_ASSIGN_OR_RAISE(std::shared_ptr buffer, out->Finish()); return std::string(reinterpret_cast(buffer->data()), buffer->size()); } Result ToCsvStringUsingWriter(const Table& data, const WriteOptions& options) { std::shared_ptr out; - ASSIGN_OR_RAISE(out, io::BufferOutputStream::Create()); + ARROW_ASSIGN_OR_RAISE(out, io::BufferOutputStream::Create()); // Write row-by-row - ASSIGN_OR_RAISE(auto writer, MakeCSVWriter(out, data.schema(), options)); + ARROW_ASSIGN_OR_RAISE(auto writer, MakeCSVWriter(out, data.schema(), options)); TableBatchReader reader(data); reader.set_chunksize(1); std::shared_ptr batch; @@ -310,7 +310,7 @@ class TestWriteCSV : public ::testing::TestWithParam { } RETURN_NOT_OK(writer->Close()); EXPECT_EQ(data.num_rows(), writer->stats().num_record_batches); - ASSIGN_OR_RAISE(std::shared_ptr buffer, out->Finish()); + ARROW_ASSIGN_OR_RAISE(std::shared_ptr buffer, out->Finish()); return std::string(reinterpret_cast(buffer->data()), buffer->size()); } }; diff --git a/cpp/src/arrow/ipc/writer.cc b/cpp/src/arrow/ipc/writer.cc index 88aa3f3f8a47a..8cb0f5625760f 100644 --- a/cpp/src/arrow/ipc/writer.cc +++ b/cpp/src/arrow/ipc/writer.cc @@ -41,7 +41,7 @@ #include "arrow/ipc/metadata_internal.h" #include "arrow/ipc/util.h" #include "arrow/record_batch.h" -#include "arrow/result_internal.h" +#include "arrow/result.h" #include "arrow/sparse_tensor.h" #include "arrow/status.h" #include "arrow/table.h" @@ -840,8 +840,8 @@ Status WriteRecordBatch(const RecordBatch& batch, int64_t buffer_start_offset, Status WriteRecordBatchStream(const std::vector>& batches, const IpcWriteOptions& options, io::OutputStream* dst) { - ASSIGN_OR_RAISE(std::shared_ptr writer, - MakeStreamWriter(dst, batches[0]->schema(), options)); + ARROW_ASSIGN_OR_RAISE(std::shared_ptr writer, + MakeStreamWriter(dst, batches[0]->schema(), options)); for (const auto& batch : batches) { DCHECK(batch->schema()->Equals(*batches[0]->schema())) << "Schemas unequal"; RETURN_NOT_OK(writer->WriteRecordBatch(*batch)); diff --git a/cpp/src/arrow/result_internal.h b/cpp/src/arrow/result_internal.h deleted file mode 100644 index 134902e1b75ad..0000000000000 --- a/cpp/src/arrow/result_internal.h +++ /dev/null @@ -1,22 +0,0 @@ -// -// Copyright 2017 Asylo authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -#pragma once - -#include "arrow/result.h" - -#ifndef ASSIGN_OR_RAISE -# define ASSIGN_OR_RAISE(lhs, rhs) ARROW_ASSIGN_OR_RAISE(lhs, rhs) -#endif diff --git a/cpp/src/parquet/arrow/schema.cc b/cpp/src/parquet/arrow/schema.cc index 0ee595508fec4..c19e2b9e48bb3 100644 --- a/cpp/src/parquet/arrow/schema.cc +++ b/cpp/src/parquet/arrow/schema.cc @@ -25,7 +25,7 @@ #include "arrow/extension_type.h" #include "arrow/io/memory.h" #include "arrow/ipc/api.h" -#include "arrow/result_internal.h" +#include "arrow/result.h" #include "arrow/type.h" #include "arrow/util/base64.h" #include "arrow/util/checked_cast.h" @@ -484,8 +484,8 @@ bool IsDictionaryReadSupported(const ArrowType& type) { ::arrow::Result> GetTypeForNode( int column_index, const schema::PrimitiveNode& primitive_node, SchemaTreeContext* ctx) { - ASSIGN_OR_RAISE(std::shared_ptr storage_type, - GetArrowType(primitive_node, ctx->properties)); + ARROW_ASSIGN_OR_RAISE(std::shared_ptr storage_type, + GetArrowType(primitive_node, ctx->properties)); if (ctx->properties.read_dictionary(column_index) && IsDictionaryReadSupported(*storage_type)) { return ::arrow::dictionary(::arrow::int32(), storage_type); @@ -723,8 +723,8 @@ Status ListToSchemaField(const GroupNode& group, LevelInfo current_levels, // yields list ?nullable const auto& primitive_node = static_cast(list_node); int column_index = ctx->schema->GetColumnIndex(primitive_node); - ASSIGN_OR_RAISE(std::shared_ptr type, - GetTypeForNode(column_index, primitive_node, ctx)); + ARROW_ASSIGN_OR_RAISE(std::shared_ptr type, + GetTypeForNode(column_index, primitive_node, ctx)); auto item_field = ::arrow::field(list_node.name(), type, /*nullable=*/false, FieldIdMetadata(list_node.field_id())); RETURN_NOT_OK( @@ -799,8 +799,8 @@ Status NodeToSchemaField(const Node& node, LevelInfo current_levels, // repeated $TYPE $FIELD_NAME const auto& primitive_node = static_cast(node); int column_index = ctx->schema->GetColumnIndex(primitive_node); - ASSIGN_OR_RAISE(std::shared_ptr type, - GetTypeForNode(column_index, primitive_node, ctx)); + ARROW_ASSIGN_OR_RAISE(std::shared_ptr type, + GetTypeForNode(column_index, primitive_node, ctx)); if (node.is_repeated()) { // One-level list encoding, e.g. // a: repeated int32;