From ce005423739dac347df529a838ad83ddb9c7b917 Mon Sep 17 00:00:00 2001 From: Piotr Findeisen Date: Wed, 10 Jan 2024 15:54:25 +0100 Subject: [PATCH 1/2] Reformat array initializers Prepare for upcoming airbase change requiring exactly one space in array initializers. This is to help ensure that the code that passes CI does not change upon automatic formatting. --- .../main/java/io/trino/operator/FlatHash.java | 2 +- .../main/java/io/trino/operator/FlatSet.java | 2 +- .../AbstractMapAggregationState.java | 2 +- .../aggregation/histogram/TypedHistogram.java | 2 +- .../AbstractMultimapAggregationState.java | 2 +- .../sql/rewrite/DescribeInputRewrite.java | 2 +- .../sql/rewrite/DescribeOutputRewrite.java | 2 +- .../java/io/trino/block/TestRowBlock.java | 8 ++-- .../operator/TestSimplePagesHashStrategy.java | 40 +++++++++---------- .../operator/exchange/TestLocalExchange.java | 2 +- .../TestPositionsAppenderPageBuilder.java | 10 ++--- .../TestDeleteAndInsertMergeProcessor.java | 14 +++---- .../spi/block/VariableWidthBlockBuilder.java | 2 +- .../java/io/trino/spi/type/BooleanType.java | 2 +- .../spi/block/BenchmarkCopyPositions.java | 2 +- .../io/trino/spi/block/TestColumnarMap.java | 2 +- .../io/trino/spi/block/TestLazyBlock.java | 2 +- .../trino/hive/formats/avro/TestAvroBase.java | 10 ++--- .../TestTupleDomainParquetPredicate.java | 4 +- .../plugin/deltalake/DeltaLakeWriter.java | 2 +- .../checkpoint/TestCheckpointWriter.java | 8 ++-- .../plugin/hive/coercions/CoercionUtils.java | 2 +- .../iceberg/procedure/MigrateProcedure.java | 2 +- .../BaseIcebergConnectorSmokeTest.java | 2 +- .../trino/plugin/iceberg/TestIcebergV2.java | 20 +++++----- .../kafka/protobuf/TestProtobufEncoder.java | 6 +-- .../plugin/postgresql/PostgreSqlClient.java | 2 +- .../TestHiveRequireQueryPartitionsFilter.java | 2 +- 28 files changed, 80 insertions(+), 78 deletions(-) diff --git a/core/trino-main/src/main/java/io/trino/operator/FlatHash.java b/core/trino-main/src/main/java/io/trino/operator/FlatHash.java index 3086d9594032..36119075a682 100644 --- a/core/trino-main/src/main/java/io/trino/operator/FlatHash.java +++ b/core/trino-main/src/main/java/io/trino/operator/FlatHash.java @@ -342,7 +342,7 @@ private void rehash(int minimumRequiredCapacity) // we incrementally allocate the record groups to smooth out memory allocation if (capacity <= RECORDS_PER_GROUP) { - recordGroups = new byte[][]{new byte[multiplyExact(capacity, recordSize)]}; + recordGroups = new byte[][] {new byte[multiplyExact(capacity, recordSize)]}; } else { recordGroups = new byte[(capacity + 1) >> RECORDS_PER_GROUP_SHIFT][]; diff --git a/core/trino-main/src/main/java/io/trino/operator/FlatSet.java b/core/trino-main/src/main/java/io/trino/operator/FlatSet.java index 5b5c298fdd28..fc6b968f4481 100644 --- a/core/trino-main/src/main/java/io/trino/operator/FlatSet.java +++ b/core/trino-main/src/main/java/io/trino/operator/FlatSet.java @@ -99,7 +99,7 @@ public FlatSet( private static byte[][] createRecordGroups(int capacity, int recordSize) { if (capacity < RECORDS_PER_GROUP) { - return new byte[][]{new byte[multiplyExact(capacity, recordSize)]}; + return new byte[][] {new byte[multiplyExact(capacity, recordSize)]}; } byte[][] groups = new byte[(capacity + 1) >> RECORDS_PER_GROUP_SHIFT][]; diff --git a/core/trino-main/src/main/java/io/trino/operator/aggregation/AbstractMapAggregationState.java b/core/trino-main/src/main/java/io/trino/operator/aggregation/AbstractMapAggregationState.java index 4d4bfc76934f..a18380534ad5 100644 --- a/core/trino-main/src/main/java/io/trino/operator/aggregation/AbstractMapAggregationState.java +++ b/core/trino-main/src/main/java/io/trino/operator/aggregation/AbstractMapAggregationState.java @@ -186,7 +186,7 @@ public AbstractMapAggregationState(AbstractMapAggregationState state) private static byte[][] createRecordGroups(int capacity, int recordSize) { if (capacity < RECORDS_PER_GROUP) { - return new byte[][]{new byte[multiplyExact(capacity, recordSize)]}; + return new byte[][] {new byte[multiplyExact(capacity, recordSize)]}; } byte[][] groups = new byte[(capacity + 1) >> RECORDS_PER_GROUP_SHIFT][]; diff --git a/core/trino-main/src/main/java/io/trino/operator/aggregation/histogram/TypedHistogram.java b/core/trino-main/src/main/java/io/trino/operator/aggregation/histogram/TypedHistogram.java index e40f503047a0..f4763b6c0037 100644 --- a/core/trino-main/src/main/java/io/trino/operator/aggregation/histogram/TypedHistogram.java +++ b/core/trino-main/src/main/java/io/trino/operator/aggregation/histogram/TypedHistogram.java @@ -139,7 +139,7 @@ public TypedHistogram( private static byte[][] createRecordGroups(int capacity, int recordSize) { if (capacity < RECORDS_PER_GROUP) { - return new byte[][]{new byte[multiplyExact(capacity, recordSize)]}; + return new byte[][] {new byte[multiplyExact(capacity, recordSize)]}; } byte[][] groups = new byte[(capacity + 1) >> RECORDS_PER_GROUP_SHIFT][]; diff --git a/core/trino-main/src/main/java/io/trino/operator/aggregation/multimapagg/AbstractMultimapAggregationState.java b/core/trino-main/src/main/java/io/trino/operator/aggregation/multimapagg/AbstractMultimapAggregationState.java index 5a69677e9168..0d2d57fffd9a 100644 --- a/core/trino-main/src/main/java/io/trino/operator/aggregation/multimapagg/AbstractMultimapAggregationState.java +++ b/core/trino-main/src/main/java/io/trino/operator/aggregation/multimapagg/AbstractMultimapAggregationState.java @@ -191,7 +191,7 @@ public AbstractMultimapAggregationState(AbstractMultimapAggregationState state) private static byte[][] createRecordGroups(int capacity, int recordSize) { if (capacity < RECORDS_PER_GROUP) { - return new byte[][]{new byte[multiplyExact(capacity, recordSize)]}; + return new byte[][] {new byte[multiplyExact(capacity, recordSize)]}; } byte[][] groups = new byte[(capacity + 1) >> RECORDS_PER_GROUP_SHIFT][]; diff --git a/core/trino-main/src/main/java/io/trino/sql/rewrite/DescribeInputRewrite.java b/core/trino-main/src/main/java/io/trino/sql/rewrite/DescribeInputRewrite.java index 9f917d2e6d44..fe524be5f298 100644 --- a/core/trino-main/src/main/java/io/trino/sql/rewrite/DescribeInputRewrite.java +++ b/core/trino-main/src/main/java/io/trino/sql/rewrite/DescribeInputRewrite.java @@ -88,7 +88,7 @@ private static final class Visitor extends AstVisitor { private static final Query EMPTY_INPUT = createDesctibeInputQuery( - new Row[]{row( + new Row[] {row( new Cast(new NullLiteral(), toSqlType(BIGINT)), new Cast(new NullLiteral(), toSqlType(VARCHAR)))}, Optional.of(new Limit(new LongLiteral("0")))); diff --git a/core/trino-main/src/main/java/io/trino/sql/rewrite/DescribeOutputRewrite.java b/core/trino-main/src/main/java/io/trino/sql/rewrite/DescribeOutputRewrite.java index b8a78d502e29..9acab16abe27 100644 --- a/core/trino-main/src/main/java/io/trino/sql/rewrite/DescribeOutputRewrite.java +++ b/core/trino-main/src/main/java/io/trino/sql/rewrite/DescribeOutputRewrite.java @@ -88,7 +88,7 @@ private static final class Visitor extends AstVisitor { private static final Query EMPTY_OUTPUT = createDesctibeOutputQuery( - new Row[]{row( + new Row[] {row( new Cast(new NullLiteral(), toSqlType(VARCHAR)), new Cast(new NullLiteral(), toSqlType(VARCHAR)), new Cast(new NullLiteral(), toSqlType(VARCHAR)), diff --git a/core/trino-main/src/test/java/io/trino/block/TestRowBlock.java b/core/trino-main/src/test/java/io/trino/block/TestRowBlock.java index 399d57adc485..a980da71f20c 100644 --- a/core/trino-main/src/test/java/io/trino/block/TestRowBlock.java +++ b/core/trino-main/src/test/java/io/trino/block/TestRowBlock.java @@ -69,9 +69,11 @@ public void testFromFieldBlocksNoNullsDetection() { // Blocks does not discard the null mask during creation if no values are null boolean[] rowIsNull = new boolean[5]; - assertThat(fromNotNullSuppressedFieldBlocks(5, Optional.of(rowIsNull), new Block[] {new ByteArrayBlock(5, Optional.empty(), createExpectedValue(5).getBytes())}).mayHaveNull()).isTrue(); + assertThat(fromNotNullSuppressedFieldBlocks(5, Optional.of(rowIsNull), new Block[] { + new ByteArrayBlock(5, Optional.empty(), createExpectedValue(5).getBytes())}).mayHaveNull()).isTrue(); rowIsNull[rowIsNull.length - 1] = true; - assertThat(fromNotNullSuppressedFieldBlocks(5, Optional.of(rowIsNull), new Block[] {new ByteArrayBlock(5, Optional.of(rowIsNull), createExpectedValue(5).getBytes())}).mayHaveNull()).isTrue(); + assertThat(fromNotNullSuppressedFieldBlocks(5, Optional.of(rowIsNull), new Block[] { + new ByteArrayBlock(5, Optional.of(rowIsNull), createExpectedValue(5).getBytes())}).mayHaveNull()).isTrue(); // Empty blocks have no nulls and can also discard their null mask assertThat(fromNotNullSuppressedFieldBlocks(0, Optional.of(new boolean[0]), new Block[] {new ByteArrayBlock(0, Optional.empty(), new byte[0])}).mayHaveNull()).isFalse(); @@ -101,7 +103,7 @@ public void testCompactBlock() // NOTE: nested row blocks are required to have the exact same size so they are always compact assertCompact(fromFieldBlocks(0, new Block[] {emptyBlock, emptyBlock})); - assertCompact(fromNotNullSuppressedFieldBlocks(rowIsNull.length, Optional.of(rowIsNull), new Block[]{ + assertCompact(fromNotNullSuppressedFieldBlocks(rowIsNull.length, Optional.of(rowIsNull), new Block[] { new ByteArrayBlock(6, Optional.of(rowIsNull), createExpectedValue(6).getBytes()), new ByteArrayBlock(6, Optional.of(rowIsNull), createExpectedValue(6).getBytes())})); } diff --git a/core/trino-main/src/test/java/io/trino/operator/TestSimplePagesHashStrategy.java b/core/trino-main/src/test/java/io/trino/operator/TestSimplePagesHashStrategy.java index 379dcd0a1fcd..bb6957e05a7e 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestSimplePagesHashStrategy.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestSimplePagesHashStrategy.java @@ -37,7 +37,7 @@ public class TestSimplePagesHashStrategy @Test public void testHashRowWithIntegerType() { - Block block = new IntArrayBlock(1, Optional.empty(), new int[]{1234}); + Block block = new IntArrayBlock(1, Optional.empty(), new int[] {1234}); SimplePagesHashStrategy strategy = createSimplePagesHashStrategy(INTEGER, ImmutableList.of(block)); Page page = new Page(block); @@ -51,9 +51,9 @@ public void testHashRowWithMapType() MapType mapType = new MapType(INTEGER, INTEGER, new TypeOperators()); Block block = mapType.createBlockFromKeyValue( Optional.empty(), - new int[]{0, 1}, - new IntArrayBlock(1, Optional.empty(), new int[]{1234}), - new IntArrayBlock(1, Optional.empty(), new int[]{5678})); + new int[] {0, 1}, + new IntArrayBlock(1, Optional.empty(), new int[] {1234}), + new IntArrayBlock(1, Optional.empty(), new int[] {5678})); SimplePagesHashStrategy strategy = createSimplePagesHashStrategy(mapType, ImmutableList.of(block)); Page page = new Page(block); @@ -67,9 +67,9 @@ public void testRowEqualsRowWithIntegerType() { SimplePagesHashStrategy strategy = createSimplePagesHashStrategy(INTEGER, ImmutableList.of()); - Page leftPage = new Page(new IntArrayBlock(1, Optional.empty(), new int[]{1234})); - Page rightPage1 = new Page(new IntArrayBlock(1, Optional.empty(), new int[]{1234})); - Page rightPage2 = new Page(new IntArrayBlock(1, Optional.empty(), new int[]{5678})); + Page leftPage = new Page(new IntArrayBlock(1, Optional.empty(), new int[] {1234})); + Page rightPage1 = new Page(new IntArrayBlock(1, Optional.empty(), new int[] {1234})); + Page rightPage2 = new Page(new IntArrayBlock(1, Optional.empty(), new int[] {5678})); // This works because IntegerType is comparable. assertThat(strategy.rowEqualsRow(0, leftPage, 0, rightPage1)).isTrue(); @@ -84,21 +84,21 @@ public void testRowEqualsRowWithMapType() Page leftPage = new Page(mapType.createBlockFromKeyValue( Optional.empty(), - new int[]{0, 1}, - new IntArrayBlock(1, Optional.empty(), new int[]{1234}), - new IntArrayBlock(1, Optional.empty(), new int[]{5678}))); + new int[] {0, 1}, + new IntArrayBlock(1, Optional.empty(), new int[] {1234}), + new IntArrayBlock(1, Optional.empty(), new int[] {5678}))); Page rightPage1 = new Page(mapType.createBlockFromKeyValue( Optional.empty(), - new int[]{0, 1}, - new IntArrayBlock(1, Optional.empty(), new int[]{1234}), - new IntArrayBlock(1, Optional.empty(), new int[]{5678}))); + new int[] {0, 1}, + new IntArrayBlock(1, Optional.empty(), new int[] {1234}), + new IntArrayBlock(1, Optional.empty(), new int[] {5678}))); Page rightPage2 = new Page(mapType.createBlockFromKeyValue( Optional.empty(), - new int[]{0, 1}, - new IntArrayBlock(1, Optional.empty(), new int[]{1234}), - new IntArrayBlock(1, Optional.empty(), new int[]{1234}))); + new int[] {0, 1}, + new IntArrayBlock(1, Optional.empty(), new int[] {1234}), + new IntArrayBlock(1, Optional.empty(), new int[] {1234}))); // This works because MapType is comparable. assertThat(strategy.rowEqualsRow(0, leftPage, 0, rightPage1)).isTrue(); @@ -108,7 +108,7 @@ public void testRowEqualsRowWithMapType() @Test public void testCompareSortChannelPositionsWithIntegerType() { - Block block = new IntArrayBlock(3, Optional.empty(), new int[]{1234, 5678, 1234}); + Block block = new IntArrayBlock(3, Optional.empty(), new int[] {1234, 5678, 1234}); SimplePagesHashStrategy strategy = createSimplePagesHashStrategy(INTEGER, ImmutableList.of(block)); // This works because IntegerType is orderable. @@ -123,9 +123,9 @@ public void testCompareSortChannelPositionsWithMapType() MapType mapType = new MapType(INTEGER, INTEGER, new TypeOperators()); Block block = mapType.createBlockFromKeyValue( Optional.empty(), - new int[]{0, 1}, - new IntArrayBlock(1, Optional.empty(), new int[]{1234}), - new IntArrayBlock(1, Optional.empty(), new int[]{5678})); + new int[] {0, 1}, + new IntArrayBlock(1, Optional.empty(), new int[] {1234}), + new IntArrayBlock(1, Optional.empty(), new int[] {5678})); SimplePagesHashStrategy strategy = createSimplePagesHashStrategy(mapType, ImmutableList.of(block)); diff --git a/core/trino-main/src/test/java/io/trino/operator/exchange/TestLocalExchange.java b/core/trino-main/src/test/java/io/trino/operator/exchange/TestLocalExchange.java index 9f8cdedef0c0..fa35534e9ae4 100644 --- a/core/trino-main/src/test/java/io/trino/operator/exchange/TestLocalExchange.java +++ b/core/trino-main/src/test/java/io/trino/operator/exchange/TestLocalExchange.java @@ -1448,7 +1448,7 @@ private static void assertPartitionedRemovePage(LocalExchangeSource source, int Page page = source.removePage(); assertThat(page).isNotNull(); - LocalPartitionGenerator partitionGenerator = new LocalPartitionGenerator(createChannelsHashGenerator(TYPES, new int[]{0}, TYPE_OPERATORS), partitionCount); + LocalPartitionGenerator partitionGenerator = new LocalPartitionGenerator(createChannelsHashGenerator(TYPES, new int[] {0}, TYPE_OPERATORS), partitionCount); for (int position = 0; position < page.getPositionCount(); position++) { assertThat(partitionGenerator.getPartition(page, position)).isEqualTo(partition); } diff --git a/core/trino-main/src/test/java/io/trino/operator/output/TestPositionsAppenderPageBuilder.java b/core/trino-main/src/test/java/io/trino/operator/output/TestPositionsAppenderPageBuilder.java index 542cea8944be..197e59f951a1 100644 --- a/core/trino-main/src/test/java/io/trino/operator/output/TestPositionsAppenderPageBuilder.java +++ b/core/trino-main/src/test/java/io/trino/operator/output/TestPositionsAppenderPageBuilder.java @@ -50,7 +50,7 @@ public void testFullOnPositionCountLimit() Block rleBlock = RunLengthEncodedBlock.create(VARCHAR, Slices.utf8Slice("test"), 10); Page inputPage = new Page(rleBlock); - IntArrayList positions = IntArrayList.wrap(new int[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + IntArrayList positions = IntArrayList.wrap(new int[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); // Append 32760 positions, just less than MAX_POSITION_COUNT assertEquals(32768, PositionsAppenderPageBuilder.MAX_POSITION_COUNT, "expected MAX_POSITION_COUNT to be 32768"); for (int i = 0; i < 3276; i++) { @@ -85,7 +85,7 @@ public void testFullOnDirectSizeInBytes() Block rleBlock = RunLengthEncodedBlock.create(VARCHAR, Slices.utf8Slice("test"), 10); Page inputPage = new Page(rleBlock); - IntArrayList positions = IntArrayList.wrap(new int[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + IntArrayList positions = IntArrayList.wrap(new int[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); pageBuilder.appendToOutputPartition(inputPage, positions); // 10 positions inserted, size in bytes is still the same since we're in RLE mode but direct size is 10x sizeAccumulator = pageBuilder.computeAppenderSizes(); @@ -124,7 +124,7 @@ public void testFlushUsefulDictionariesOnRelease() Block dictionaryBlock = DictionaryBlock.create(10, valueBlock, new int[10]); Page inputPage = new Page(dictionaryBlock); - pageBuilder.appendToOutputPartition(inputPage, IntArrayList.wrap(new int[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})); + pageBuilder.appendToOutputPartition(inputPage, IntArrayList.wrap(new int[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9})); // Dictionary mode appender should report the size of the ID's, but doesn't currently track // the per-position size at all because it would be inefficient assertEquals(Integer.BYTES * 10, pageBuilder.getSizeInBytes()); @@ -140,7 +140,7 @@ public void testFlattenUnhelpfulDictionariesOnRelease() { // Create unhelpful dictionary wrapping Block valueBlock = createRandomBlockForType(VARCHAR, 10, 0.25f); - Block dictionaryBlock = DictionaryBlock.create(10, valueBlock, new int[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Block dictionaryBlock = DictionaryBlock.create(10, valueBlock, new int[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); Page inputPage = new Page(dictionaryBlock); // Ensure the builder allows the entire value block to be inserted without being full @@ -152,7 +152,7 @@ public void testFlattenUnhelpfulDictionariesOnRelease() List.of(VARCHAR), new PositionsAppenderFactory(new BlockTypeOperators())); - pageBuilder.appendToOutputPartition(inputPage, IntArrayList.wrap(new int[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})); + pageBuilder.appendToOutputPartition(inputPage, IntArrayList.wrap(new int[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9})); assertEquals(Integer.BYTES * 10, pageBuilder.getSizeInBytes()); assertFalse(pageBuilder.isFull()); diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/TestDeleteAndInsertMergeProcessor.java b/core/trino-main/src/test/java/io/trino/sql/planner/TestDeleteAndInsertMergeProcessor.java index 3a144d52b8b7..ad0ac8eed07d 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/TestDeleteAndInsertMergeProcessor.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/TestDeleteAndInsertMergeProcessor.java @@ -104,10 +104,10 @@ public void testUpdateAndDeletedMerge() Page inputPage = makePageFromBlocks( 5, Optional.of(rowIdNulls), - new Block[]{ - new LongArrayBlock(5, Optional.of(rowIdNulls), new long[]{2, 0, 1, 2, 2}), // TransactionId - new LongArrayBlock(5, Optional.of(rowIdNulls), new long[]{0, 0, 3, 1, 2}), // rowId - new IntArrayBlock(5, Optional.of(rowIdNulls), new int[]{536870912, 0, 536870912, 536870912, 536870912})}, // bucket + new Block[] { + new LongArrayBlock(5, Optional.of(rowIdNulls), new long[] {2, 0, 1, 2, 2}), // TransactionId + new LongArrayBlock(5, Optional.of(rowIdNulls), new long[] {0, 0, 3, 1, 2}), // rowId + new IntArrayBlock(5, Optional.of(rowIdNulls), new int[] {536870912, 0, 536870912, 536870912, 536870912})}, // bucket new Block[] { // customer makeVarcharArrayBlock("Aaron", "Carol", "Dave", "Dave", "Ed"), @@ -145,9 +145,9 @@ public void testAnotherMergeCase() 5, Optional.of(rowIdNulls), new Block[] { - new LongArrayBlock(5, Optional.of(rowIdNulls), new long[]{2, 0, 1, 2, 2}), // TransactionId - new LongArrayBlock(5, Optional.of(rowIdNulls), new long[]{0, 0, 3, 1, 2}), // rowId - new IntArrayBlock(5, Optional.of(rowIdNulls), new int[]{536870912, 0, 536870912, 536870912, 536870912})}, // bucket + new LongArrayBlock(5, Optional.of(rowIdNulls), new long[] {2, 0, 1, 2, 2}), // TransactionId + new LongArrayBlock(5, Optional.of(rowIdNulls), new long[] {0, 0, 3, 1, 2}), // rowId + new IntArrayBlock(5, Optional.of(rowIdNulls), new int[] {536870912, 0, 536870912, 536870912, 536870912})}, // bucket new Block[] { // customer makeVarcharArrayBlock("Aaron", "Carol", "Dave", "Dave", "Ed"), diff --git a/core/trino-spi/src/main/java/io/trino/spi/block/VariableWidthBlockBuilder.java b/core/trino-spi/src/main/java/io/trino/spi/block/VariableWidthBlockBuilder.java index 59aca4f3b550..58d819995e58 100644 --- a/core/trino-spi/src/main/java/io/trino/spi/block/VariableWidthBlockBuilder.java +++ b/core/trino-spi/src/main/java/io/trino/spi/block/VariableWidthBlockBuilder.java @@ -32,7 +32,7 @@ public class VariableWidthBlockBuilder implements BlockBuilder { private static final int INSTANCE_SIZE = instanceSize(VariableWidthBlockBuilder.class); - private static final Block NULL_VALUE_BLOCK = new VariableWidthBlock(0, 1, EMPTY_SLICE, new int[]{0, 0}, new boolean[]{true}); + private static final Block NULL_VALUE_BLOCK = new VariableWidthBlock(0, 1, EMPTY_SLICE, new int[] {0, 0}, new boolean[] {true}); private static final int SIZE_IN_BYTES_PER_POSITION = Integer.BYTES + Byte.BYTES; private final BlockBuilderStatus blockBuilderStatus; diff --git a/core/trino-spi/src/main/java/io/trino/spi/type/BooleanType.java b/core/trino-spi/src/main/java/io/trino/spi/type/BooleanType.java index d2195f2c1619..2c8442f26ec1 100644 --- a/core/trino-spi/src/main/java/io/trino/spi/type/BooleanType.java +++ b/core/trino-spi/src/main/java/io/trino/spi/type/BooleanType.java @@ -64,7 +64,7 @@ public static Block wrapByteArrayAsBooleanBlockWithoutNulls(byte[] booleansAsByt public static Block createBlockForSingleNonNullValue(boolean value) { byte byteValue = value ? (byte) 1 : 0; - return new ByteArrayBlock(1, Optional.empty(), new byte[]{byteValue}); + return new ByteArrayBlock(1, Optional.empty(), new byte[] {byteValue}); } private BooleanType() diff --git a/core/trino-spi/src/test/java/io/trino/spi/block/BenchmarkCopyPositions.java b/core/trino-spi/src/test/java/io/trino/spi/block/BenchmarkCopyPositions.java index c66eade9807b..10166979629f 100644 --- a/core/trino-spi/src/test/java/io/trino/spi/block/BenchmarkCopyPositions.java +++ b/core/trino-spi/src/test/java/io/trino/spi/block/BenchmarkCopyPositions.java @@ -102,7 +102,7 @@ public void setup() else if (type.equals("ROW(BIGINT)")) { Optional rowIsNull = nullsAllowed ? Optional.of(generateIsNull(POSITIONS)) : Optional.empty(); LongArrayBlock randomLongArrayBlock = new LongArrayBlock(POSITIONS, rowIsNull, new Random(SEED).longs().limit(POSITIONS).toArray()); - block = RowBlock.fromNotNullSuppressedFieldBlocks(POSITIONS, rowIsNull, new Block[]{randomLongArrayBlock}); + block = RowBlock.fromNotNullSuppressedFieldBlocks(POSITIONS, rowIsNull, new Block[] {randomLongArrayBlock}); } } diff --git a/core/trino-spi/src/test/java/io/trino/spi/block/TestColumnarMap.java b/core/trino-spi/src/test/java/io/trino/spi/block/TestColumnarMap.java index 7a625a7f2c31..2b4db2558b21 100644 --- a/core/trino-spi/src/test/java/io/trino/spi/block/TestColumnarMap.java +++ b/core/trino-spi/src/test/java/io/trino/spi/block/TestColumnarMap.java @@ -37,7 +37,7 @@ public class TestColumnarMap { private static final TypeOperators TYPE_OPERATORS = new TypeOperators(); private static final MapType MAP_TYPE = new MapType(VARCHAR, VARCHAR, TYPE_OPERATORS); - private static final int[] MAP_SIZES = new int[]{16, 0, 13, 1, 2, 11, 4, 7}; + private static final int[] MAP_SIZES = new int[] {16, 0, 13, 1, 2, 11, 4, 7}; @Test public void test() diff --git a/core/trino-spi/src/test/java/io/trino/spi/block/TestLazyBlock.java b/core/trino-spi/src/test/java/io/trino/spi/block/TestLazyBlock.java index 30d27f8a34ee..f0d7293a01ea 100644 --- a/core/trino-spi/src/test/java/io/trino/spi/block/TestLazyBlock.java +++ b/core/trino-spi/src/test/java/io/trino/spi/block/TestLazyBlock.java @@ -66,7 +66,7 @@ public void testNestedGetLoadedBlock() List actualNotifications = new ArrayList<>(); Block arrayBlock = new IntArrayBlock(2, Optional.empty(), new int[] {0, 1}); LazyBlock lazyArrayBlock = new LazyBlock(2, () -> arrayBlock); - Block rowBlock = RowBlock.fromFieldBlocks(2, new Block[]{lazyArrayBlock}); + Block rowBlock = RowBlock.fromFieldBlocks(2, new Block[] {lazyArrayBlock}); LazyBlock lazyBlock = new LazyBlock(2, () -> rowBlock); LazyBlock.listenForLoads(lazyBlock, actualNotifications::add); diff --git a/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/avro/TestAvroBase.java b/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/avro/TestAvroBase.java index 81426283e012..988dfdff7c59 100644 --- a/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/avro/TestAvroBase.java +++ b/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/avro/TestAvroBase.java @@ -160,15 +160,15 @@ public abstract class TestAvroBase ALL_TYPES_GENERIC_RECORD = new GenericData.Record(ALL_TYPES_RECORD_SCHEMA); ALL_TYPES_GENERIC_RECORD.put("aBoolean", true); - allTypeBlocks.add(new ByteArrayBlock(1, Optional.empty(), new byte[]{1})); + allTypeBlocks.add(new ByteArrayBlock(1, Optional.empty(), new byte[] {1})); ALL_TYPES_GENERIC_RECORD.put("aInt", 42); - allTypeBlocks.add(new IntArrayBlock(1, Optional.empty(), new int[]{42})); + allTypeBlocks.add(new IntArrayBlock(1, Optional.empty(), new int[] {42})); ALL_TYPES_GENERIC_RECORD.put("aLong", 3400L); - allTypeBlocks.add(new LongArrayBlock(1, Optional.empty(), new long[]{3400L})); + allTypeBlocks.add(new LongArrayBlock(1, Optional.empty(), new long[] {3400L})); ALL_TYPES_GENERIC_RECORD.put("aFloat", 3.14f); - allTypeBlocks.add(new IntArrayBlock(1, Optional.empty(), new int[]{floatToIntBits(3.14f)})); + allTypeBlocks.add(new IntArrayBlock(1, Optional.empty(), new int[] {floatToIntBits(3.14f)})); ALL_TYPES_GENERIC_RECORD.put("aDouble", 9.81); - allTypeBlocks.add(new LongArrayBlock(1, Optional.empty(), new long[]{doubleToLongBits(9.81)})); + allTypeBlocks.add(new LongArrayBlock(1, Optional.empty(), new long[] {doubleToLongBits(9.81)})); ALL_TYPES_GENERIC_RECORD.put("aString", A_STRING_VALUE); allTypeBlocks.add(new VariableWidthBlock(1, Slices.utf8Slice(A_STRING_VALUE), new int[] {0, Slices.utf8Slice(A_STRING_VALUE).length()}, Optional.empty())); ALL_TYPES_GENERIC_RECORD.put("aBytes", A_BYTES_VALUE); diff --git a/lib/trino-parquet/src/test/java/io/trino/parquet/TestTupleDomainParquetPredicate.java b/lib/trino-parquet/src/test/java/io/trino/parquet/TestTupleDomainParquetPredicate.java index 04cefc95771c..0e6e7ada8017 100644 --- a/lib/trino-parquet/src/test/java/io/trino/parquet/TestTupleDomainParquetPredicate.java +++ b/lib/trino-parquet/src/test/java/io/trino/parquet/TestTupleDomainParquetPredicate.java @@ -474,7 +474,7 @@ private void testTimestampInt64(TimeUnit timeUnit, int precision, LocalDateTime .as(LogicalTypeAnnotation.timestampType(false, timeUnit)) .named("TimestampColumn"); - ColumnDescriptor columnDescriptor = new ColumnDescriptor(new String[]{}, type, 0, 0); + ColumnDescriptor columnDescriptor = new ColumnDescriptor(new String[] {}, type, 0, 0); TimestampType timestampType = createTimestampType(precision); assertThat(getDomain(columnDescriptor, timestampType, 0, null, ID, UTC)).isEqualTo(all(timestampType)); LocalDateTime maxTime = baseTime.plus(Duration.ofMillis(50)); @@ -719,7 +719,7 @@ public void testColumnIndexWithNoNullsCount() private ColumnDescriptor createColumnDescriptor(PrimitiveTypeName typeName, String columnName) { - return new ColumnDescriptor(new String[]{}, new PrimitiveType(REQUIRED, typeName, columnName), 0, 0); + return new ColumnDescriptor(new String[] {}, new PrimitiveType(REQUIRED, typeName, columnName), 0, 0); } private TupleDomain getEffectivePredicate(ColumnDescriptor column, VarcharType type, Slice value) diff --git a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeWriter.java b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeWriter.java index 7c27b8d151cd..9fa930f2297f 100644 --- a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeWriter.java +++ b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeWriter.java @@ -350,7 +350,7 @@ public Block apply(Block block) RowBlock rowBlock = (RowBlock) runLengthEncodedBlock.getValue(); RowBlock newRowBlock = RowBlock.fromNotNullSuppressedFieldBlocks( 1, - rowBlock.isNull(0) ? Optional.of(new boolean[]{true}) : Optional.empty(), + rowBlock.isNull(0) ? Optional.of(new boolean[] {true}) : Optional.empty(), coerceFields(rowBlock.getFieldBlocks())); return RunLengthEncodedBlock.create(newRowBlock, runLengthEncodedBlock.getPositionCount()); } diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/transactionlog/checkpoint/TestCheckpointWriter.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/transactionlog/checkpoint/TestCheckpointWriter.java index b928ae46be54..0ad6e8b06f35 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/transactionlog/checkpoint/TestCheckpointWriter.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/transactionlog/checkpoint/TestCheckpointWriter.java @@ -242,11 +242,11 @@ public void testCheckpointWriteReadParquetStatisticsRoundtrip() ProtocolEntry protocolEntry = new ProtocolEntry(10, 20, Optional.empty(), Optional.empty()); TransactionEntry transactionEntry = new TransactionEntry("appId", 1, 1001); - Block[] minMaxRowFieldBlocks = new Block[]{ + Block[] minMaxRowFieldBlocks = new Block[] { nativeValueToBlock(IntegerType.INTEGER, 1L), nativeValueToBlock(createUnboundedVarcharType(), utf8Slice("a")) }; - Block[] nullCountRowFieldBlocks = new Block[]{ + Block[] nullCountRowFieldBlocks = new Block[] { nativeValueToBlock(BigintType.BIGINT, 0L), nativeValueToBlock(BigintType.BIGINT, 15L) }; @@ -359,11 +359,11 @@ public void testDisablingRowStatistics() ImmutableMap.of(), 1000); ProtocolEntry protocolEntry = new ProtocolEntry(10, 20, Optional.empty(), Optional.empty()); - Block[] minMaxRowFieldBlocks = new Block[]{ + Block[] minMaxRowFieldBlocks = new Block[] { nativeValueToBlock(IntegerType.INTEGER, 1L), nativeValueToBlock(createUnboundedVarcharType(), utf8Slice("a")) }; - Block[] nullCountRowFieldBlocks = new Block[]{ + Block[] nullCountRowFieldBlocks = new Block[] { nativeValueToBlock(BigintType.BIGINT, 0L), nativeValueToBlock(BigintType.BIGINT, 15L) }; diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/coercions/CoercionUtils.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/coercions/CoercionUtils.java index fcbe2aa6fc25..24c39905dcb2 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/coercions/CoercionUtils.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/coercions/CoercionUtils.java @@ -440,7 +440,7 @@ public Block apply(Block block) RowBlock rowBlock = (RowBlock) runLengthEncodedBlock.getValue(); RowBlock newRowBlock = RowBlock.fromNotNullSuppressedFieldBlocks( 1, - rowBlock.isNull(0) ? Optional.of(new boolean[]{true}) : Optional.empty(), + rowBlock.isNull(0) ? Optional.of(new boolean[] {true}) : Optional.empty(), coerceFields(rowBlock.getFieldBlocks())); return RunLengthEncodedBlock.create(newRowBlock, runLengthEncodedBlock.getPositionCount()); } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/MigrateProcedure.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/MigrateProcedure.java index 91283376cbb6..7ff3a65bf742 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/MigrateProcedure.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/MigrateProcedure.java @@ -232,7 +232,7 @@ public void doMigrate(ConnectorSession session, String schemaName, String tableN ImmutableList.Builder dataFilesBuilder = ImmutableList.builder(); if (hiveTable.getPartitionColumns().isEmpty()) { log.debug("Building data files from %s", location); - dataFilesBuilder.addAll(buildDataFiles(session, recursive, storageFormat, location, partitionSpec, new PartitionData(new Object[]{}), schema)); + dataFilesBuilder.addAll(buildDataFiles(session, recursive, storageFormat, location, partitionSpec, new PartitionData(new Object[0]), schema)); } else { Map> partitions = listAllPartitions(metastore, hiveTable); diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorSmokeTest.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorSmokeTest.java index df10f40055a1..d196f209612e 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorSmokeTest.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorSmokeTest.java @@ -132,7 +132,7 @@ public void testDeleteRowsConcurrently() ExecutorService executor = newFixedThreadPool(threads); List rows = ImmutableList.of("(1, 0, 0, 0)", "(0, 1, 0, 0)", "(0, 0, 1, 0)", "(0, 0, 0, 1)"); - String[] expectedErrors = new String[]{"Failed to commit Iceberg update to table:", "Failed to replace table due to concurrent updates:"}; + String[] expectedErrors = new String[] {"Failed to commit Iceberg update to table:", "Failed to replace table due to concurrent updates:"}; try (TestTable table = new TestTable( getQueryRunner()::execute, "test_concurrent_delete", diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergV2.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergV2.java index a7e9b8f42650..b75eef99f21d 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergV2.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergV2.java @@ -206,7 +206,7 @@ public void testV2TableWithEqualityDelete() String tableName = "test_v2_equality_delete" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName + " AS SELECT * FROM tpch.tiny.nation", 25); Table icebergTable = loadTable(tableName); - writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[]{1L}))); + writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[] {1L}))); assertQuery("SELECT * FROM " + tableName, "SELECT * FROM nation WHERE regionkey != 1"); // nationkey is before the equality delete column in the table schema, comment is after assertQuery("SELECT nationkey, comment FROM " + tableName, "SELECT nationkey, comment FROM nation WHERE regionkey != 1"); @@ -235,7 +235,7 @@ public void testV2TableWithEqualityDeleteWhenColumnIsNested() "SELECT regionkey, ARRAY[1,2] array_column, MAP(ARRAY[1], ARRAY[2]) map_column, " + "CAST(ROW(1, 2e0) AS ROW(x BIGINT, y DOUBLE)) row_column FROM tpch.tiny.nation", 25); Table icebergTable = loadTable(tableName); - writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[]{1L}))); + writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[] {1L}))); assertQuery("SELECT array_column[1], map_column[1], row_column.x FROM " + tableName, "SELECT 1, 2, 1 FROM nation WHERE regionkey != 1"); } @@ -247,7 +247,7 @@ public void testOptimizingV2TableRemovesEqualityDeletesWhenWholeTableIsScanned() assertUpdate("CREATE TABLE " + tableName + " WITH (partitioning = ARRAY['regionkey']) AS SELECT * FROM tpch.tiny.nation", 25); Table icebergTable = loadTable(tableName); assertThat(icebergTable.currentSnapshot().summary()).containsEntry("total-equality-deletes", "0"); - writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[]{1L}))); + writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[] {1L}))); List initialActiveFiles = getActiveFiles(tableName); query("ALTER TABLE " + tableName + " EXECUTE OPTIMIZE"); assertQuery("SELECT * FROM " + tableName, "SELECT * FROM nation WHERE regionkey != 1"); @@ -267,7 +267,7 @@ public void testOptimizingV2TableDoesntRemoveEqualityDeletesWhenOnlyPartOfTheTab Table icebergTable = loadTable(tableName); assertThat(icebergTable.currentSnapshot().summary()).containsEntry("total-equality-deletes", "0"); List initialActiveFiles = getActiveFiles(tableName); - writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[]{1L}))); + writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[] {1L}))); query("ALTER TABLE " + tableName + " EXECUTE OPTIMIZE WHERE regionkey != 1"); assertQuery("SELECT * FROM " + tableName, "SELECT * FROM nation WHERE regionkey != 1"); // nationkey is before the equality delete column in the table schema, comment is after @@ -284,7 +284,7 @@ public void testSelectivelyOptimizingLeavesEqualityDeletes() String tableName = "test_selectively_optimizing_leaves_eq_deletes_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName + " WITH (partitioning = ARRAY['nationkey']) AS SELECT * FROM tpch.tiny.nation", 25); Table icebergTable = loadTable(tableName); - writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[]{1L}))); + writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[] {1L}))); query("ALTER TABLE " + tableName + " EXECUTE OPTIMIZE WHERE nationkey < 5"); assertQuery("SELECT * FROM " + tableName, "SELECT * FROM nation WHERE regionkey != 1 OR nationkey != 1"); assertThat(loadTable(tableName).currentSnapshot().summary()).containsEntry("total-equality-deletes", "1"); @@ -420,7 +420,7 @@ public void testOptimizingWholeTableRemovesEqualityDeletes() String tableName = "test_optimizing_whole_table_removes_eq_deletes_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName + " WITH (partitioning = ARRAY['nationkey']) AS SELECT * FROM tpch.tiny.nation", 25); Table icebergTable = loadTable(tableName); - writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[]{1L}))); + writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[] {1L}))); query("ALTER TABLE " + tableName + " EXECUTE OPTIMIZE"); assertQuery("SELECT * FROM " + tableName, "SELECT * FROM nation WHERE regionkey != 1 OR nationkey != 1"); assertThat(loadTable(tableName).currentSnapshot().summary()).containsEntry("total-equality-deletes", "0"); @@ -453,7 +453,7 @@ public void testOptimizingPartitionsOfV2TableWithGlobalEqualityDeleteFile() assertUpdate("CREATE TABLE " + tableName + " WITH (partitioning = ARRAY['regionkey']) AS SELECT * FROM tpch.tiny.nation", 25); Table icebergTable = loadTable(tableName); assertThat(icebergTable.currentSnapshot().summary()).containsEntry("total-equality-deletes", "0"); - writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[]{1L}))); + writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[] {1L}))); List initialActiveFiles = getActiveFiles(tableName); assertQuery("SELECT * FROM " + tableName, "SELECT * FROM nation WHERE regionkey != 1"); query("ALTER TABLE " + tableName + " EXECUTE OPTIMIZE WHERE regionkey != 1"); @@ -464,8 +464,8 @@ public void testOptimizingPartitionsOfV2TableWithGlobalEqualityDeleteFile() List updatedFiles = getActiveFiles(tableName); assertThat(updatedFiles) .doesNotContain(initialActiveFiles.stream() - .filter(path -> !path.contains("regionkey=1")) - .toArray(String[]::new)); + .filter(path -> !path.contains("regionkey=1")) + .toArray(String[]::new)); } @Test @@ -955,7 +955,7 @@ private void writeEqualityDeleteToNationTableWithDeleteColumns( List equalityDeleteFieldIds = deleteColumns.stream() .map(name -> deleteRowSchema.findField(name).fieldId()) .collect(toImmutableList()); - writeEqualityDeleteToNationTableWithDeleteColumns(icebergTable, partitionSpec, partitionData, overwriteValues, deleteRowSchema, equalityDeleteFieldIds); + writeEqualityDeleteToNationTableWithDeleteColumns(icebergTable, partitionSpec, partitionData, overwriteValues, deleteRowSchema, equalityDeleteFieldIds); } private void writeEqualityDeleteToNationTableWithDeleteColumns( diff --git a/plugin/trino-kafka/src/test/java/io/trino/plugin/kafka/protobuf/TestProtobufEncoder.java b/plugin/trino-kafka/src/test/java/io/trino/plugin/kafka/protobuf/TestProtobufEncoder.java index 41dc2fae1c8e..da9b8bae14ca 100644 --- a/plugin/trino-kafka/src/test/java/io/trino/plugin/kafka/protobuf/TestProtobufEncoder.java +++ b/plugin/trino-kafka/src/test/java/io/trino/plugin/kafka/protobuf/TestProtobufEncoder.java @@ -396,7 +396,7 @@ private void testNestedStructuralDataTypes(String stringData, Integer integerDat BlockBuilder mapBlockBuilder = mapType.createBlockBuilder(null, 1); Block mapBlock = mapType.createBlockFromKeyValue( Optional.empty(), - new int[]{0, 1}, + new int[] {0, 1}, nativeValueToBlock(VARCHAR, utf8Slice("Key")), rowBlockBuilder.build()); mapType.appendTo( @@ -409,12 +409,12 @@ private void testNestedStructuralDataTypes(String stringData, Integer integerDat Block arrayBlock = fromElementBlock( 1, Optional.empty(), - new int[]{0, rowBlockBuilder.getPositionCount()}, + new int[] {0, rowBlockBuilder.getPositionCount()}, rowBlockBuilder.build()); listType.appendTo(arrayBlock, 0, listBlockBuilder); BlockBuilder nestedBlockBuilder = nestedRowType.createBlockBuilder(null, 1); - Block rowBlock = fromFieldBlocks(1, new Block[]{listBlockBuilder.build(), mapBlockBuilder.build(), rowBlockBuilder.build()}); + Block rowBlock = fromFieldBlocks(1, new Block[] {listBlockBuilder.build(), mapBlockBuilder.build(), rowBlockBuilder.build()}); nestedRowType.appendTo(rowBlock, 0, nestedBlockBuilder); rowEncoder.appendColumnValue(nestedBlockBuilder.build(), 0); diff --git a/plugin/trino-postgresql/src/main/java/io/trino/plugin/postgresql/PostgreSqlClient.java b/plugin/trino-postgresql/src/main/java/io/trino/plugin/postgresql/PostgreSqlClient.java index 28ddf9fd6c56..048ea750fcd7 100644 --- a/plugin/trino-postgresql/src/main/java/io/trino/plugin/postgresql/PostgreSqlClient.java +++ b/plugin/trino-postgresql/src/main/java/io/trino/plugin/postgresql/PostgreSqlClient.java @@ -1354,7 +1354,7 @@ private ObjectReadFunction varcharMapReadFunction() varcharMapType.getValueType().writeSlice(valueBlockBuilder, utf8Slice(entry.getValue())); } } - MapBlock mapBlock = varcharMapType.createBlockFromKeyValue(Optional.empty(), new int[]{0, map.size()}, keyBlockBuilder.build(), valueBlockBuilder.build()); + MapBlock mapBlock = varcharMapType.createBlockFromKeyValue(Optional.empty(), new int[] {0, map.size()}, keyBlockBuilder.build(), valueBlockBuilder.build()); return varcharMapType.getObject(mapBlock, 0); }); } diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/hive/TestHiveRequireQueryPartitionsFilter.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/hive/TestHiveRequireQueryPartitionsFilter.java index 31e6ebefa986..8d996b190079 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/hive/TestHiveRequireQueryPartitionsFilter.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/hive/TestHiveRequireQueryPartitionsFilter.java @@ -71,7 +71,7 @@ public void testRequiresQueryPartitionFilterOnSpecificSchema(String queryPartiti @DataProvider public Object[][] queryPartitionFilterRequiredSchemasDataProvider() { - return new Object[][]{ + return new Object[][] { {"ARRAY['default']"}, {"ARRAY['DEFAULT']"}, {"ARRAY['deFAUlt']"} From 3b77bc45084a92a862c63e5d128f90dc5901d4b7 Mon Sep 17 00:00:00 2001 From: Piotr Findeisen Date: Wed, 10 Jan 2024 15:55:53 +0100 Subject: [PATCH 2/2] Remove redundant lambda braces --- .../trino/spiller/TestFileSingleStreamSpillerFactory.java | 2 +- .../trino/sql/planner/assertions/BasePushdownPlanTest.java | 2 +- .../java/io/trino/plugin/jdbc/TestJdbcTableProperties.java | 4 ++-- .../plugin/iceberg/TestIcebergOrcMetricsCollection.java | 6 ++---- 4 files changed, 6 insertions(+), 8 deletions(-) diff --git a/core/trino-main/src/test/java/io/trino/spiller/TestFileSingleStreamSpillerFactory.java b/core/trino-main/src/test/java/io/trino/spiller/TestFileSingleStreamSpillerFactory.java index c53d8bfcf75d..1d4d283bf0e2 100644 --- a/core/trino-main/src/test/java/io/trino/spiller/TestFileSingleStreamSpillerFactory.java +++ b/core/trino-main/src/test/java/io/trino/spiller/TestFileSingleStreamSpillerFactory.java @@ -218,7 +218,7 @@ public void testCacheInvalidatedOnBadDisk() // Set second spiller path to read-only after initialization to emulate a disk failing during runtime setPosixFilePermissions(spillPath2.toPath(), ImmutableSet.of(PosixFilePermission.OWNER_READ)); - assertThatThrownBy(() -> { getUnchecked(singleStreamSpiller2.spill(page)); }) + assertThatThrownBy(() -> getUnchecked(singleStreamSpiller2.spill(page))) .isInstanceOf(com.google.common.util.concurrent.UncheckedExecutionException.class) .hasMessageContaining("Failed to spill pages"); spillers.add(singleStreamSpiller2); diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/BasePushdownPlanTest.java b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/BasePushdownPlanTest.java index 325b4a5d2631..9ba223af6a66 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/BasePushdownPlanTest.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/BasePushdownPlanTest.java @@ -29,7 +29,7 @@ public abstract class BasePushdownPlanTest { protected Optional getTableHandle(Session session, QualifiedObjectName objectName) { - return getQueryRunner().inTransaction(session, transactionSession -> { return getQueryRunner().getMetadata().getTableHandle(transactionSession, objectName); }); + return getQueryRunner().inTransaction(session, transactionSession -> getQueryRunner().getMetadata().getTableHandle(transactionSession, objectName)); } protected Map getColumnHandles(Session session, QualifiedObjectName tableName) diff --git a/plugin/trino-base-jdbc/src/test/java/io/trino/plugin/jdbc/TestJdbcTableProperties.java b/plugin/trino-base-jdbc/src/test/java/io/trino/plugin/jdbc/TestJdbcTableProperties.java index 8c69032467c7..8dc5ab420eaf 100644 --- a/plugin/trino-base-jdbc/src/test/java/io/trino/plugin/jdbc/TestJdbcTableProperties.java +++ b/plugin/trino-base-jdbc/src/test/java/io/trino/plugin/jdbc/TestJdbcTableProperties.java @@ -56,7 +56,7 @@ public Map getTableProperties(ConnectorSession session, JdbcTabl @Test public void testGetTablePropertiesIsNotCalledForSelect() { - onGetTableProperties = () -> { fail("Unexpected call of: getTableProperties"); }; + onGetTableProperties = () -> fail("Unexpected call of: getTableProperties"); assertUpdate("CREATE TABLE copy_of_nation AS SELECT * FROM nation", 25); assertQuerySucceeds("SELECT * FROM copy_of_nation"); assertQuerySucceeds("SELECT nationkey FROM copy_of_nation"); @@ -66,7 +66,7 @@ public void testGetTablePropertiesIsNotCalledForSelect() public void testGetTablePropertiesIsCalled() { AtomicInteger counter = new AtomicInteger(); - onGetTableProperties = () -> { counter.incrementAndGet(); }; + onGetTableProperties = () -> counter.incrementAndGet(); assertQuerySucceeds("SHOW CREATE TABLE nation"); assertThat(counter.get()).isOne(); } diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergOrcMetricsCollection.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergOrcMetricsCollection.java index fd003829577a..03f440472ac5 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergOrcMetricsCollection.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergOrcMetricsCollection.java @@ -180,10 +180,8 @@ public void testMetrics() assertThat(datafile.getRecordCount()).isEqualTo(1); assertThat(datafile.getValueCounts().size()).isEqualTo(1); assertThat(datafile.getNullValueCounts().size()).isEqualTo(1); - datafile.getUpperBounds().forEach((k, v) -> { - assertThat(v.length()).isEqualTo(10); }); - datafile.getLowerBounds().forEach((k, v) -> { - assertThat(v.length()).isEqualTo(10); }); + datafile.getUpperBounds().forEach((k, v) -> assertThat(v.length()).isEqualTo(10)); + datafile.getLowerBounds().forEach((k, v) -> assertThat(v.length()).isEqualTo(10)); // keep both c1 and c2 metrics assertUpdate("create table c_metrics (c1 varchar, c2 varchar)");