Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[SPARK-50463][SQL][3.5] Fix ConstantColumnVector with Columnar to Row conversion #49131

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -68,14 +68,14 @@ public abstract class ColumnVector implements AutoCloseable {
public abstract void close();

/**
* Cleans up memory for this column vector if it's not writable. The column vector is not usable
* after this.
* Cleans up memory for this column vector if it's resources are freeable between batches.
* The column vector is not usable after this.
*
* If this is a writable column vector, it is a no-op.
* If this is a writable column vector or constant column vector, it is a no-op.
*/
public void closeIfNotWritable() {
// By default, we just call close() for all column vectors. If a column vector is writable, it
// should override this method and do nothing.
public void closeIfFreeable() {
// By default, we just call close() for all column vectors. If a column vector is writable or
// constant, it should override this method and do nothing.
close();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,12 +46,12 @@ public void close() {
}

/**
* Called to close all the columns if they are not writable. This is used to clean up memory
* allocated during columnar processing.
* Called to close all the columns if their resources are freeable between batches.
* This is used to clean up memory allocated during columnar processing.
*/
public void closeIfNotWritable() {
public void closeIfFreeable() {
for (ColumnVector c: columns) {
c.closeIfNotWritable();
c.closeIfFreeable();
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,11 @@ public ConstantColumnVector(int numRows, DataType type) {
}
}

public void closeIfFreeable() {
// no-op: `ConstantColumnVector`s reuse the data backing its value across multiple batches and
// are freed at the end of execution in `close`.
}

@Override
public void close() {
stringData = null;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ public void close() {
}

@Override
public void closeIfNotWritable() {
public void closeIfFreeable() {
// no-op
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ case class ColumnarToRowExec(child: SparkPlan) extends ColumnarToRowTransition w
| $shouldStop
| }
| $idx = $numRows;
| $batch.closeIfNotWritable();
| $batch.closeIfFreeable();
| $batch = null;
| $nextBatchFuncName();
|}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -482,6 +482,26 @@ abstract class ParquetQuerySuite extends QueryTest with ParquetTest with SharedS
}
}

test("SPARK-50463: Partition values can be read over multiple batches") {
withTempDir { dir =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_BATCH_SIZE.key -> "1") {
val path = dir.getAbsolutePath
spark.range(0, 5)
.selectExpr("concat(cast(id % 2 as string), 'a') as partCol", "id")
.write
.format("parquet")
.mode("overwrite")
.partitionBy("partCol").save(path)
val df = spark.read.format("parquet").load(path).selectExpr("partCol")
val expected = spark.range(0, 5)
.selectExpr("concat(cast(id % 2 as string), 'a') as partCol")
.collect()

checkAnswer(df, expected)
}
}
}

test("SPARK-10301 requested schema clipping - same schema") {
withTempPath { dir =>
val path = dir.getCanonicalPath
Expand Down
Loading