Skip to content

Commit

Permalink
[SPARK-37506][CORE][SQL][DSTREAM][GRAPHX][ML][MLLIB][SS][EXAMPLES] Ch…
Browse files Browse the repository at this point in the history
…ange the never changed 'var' to 'val'

### What changes were proposed in this pull request?
Similar to SPARK-33346, there are still some `var` that can be replaced by `val` in the current code base, so this pr turn these  from `var` to  `val`.

### Why are the changes needed?
Use `val` instead of `var` when possible.

### Does this PR introduce _any_ user-facing change?
No

### How was this patch tested?

- Pass the Jenkins or GitHub Action

- Manual test: compile a client and manually execute `bin/run-example MiniReadWriteTest README.md`,  the behavior is consistent before and after this PR

Closes apache#34760 from LuciferYang/var-to-val-2.

Authored-by: yangjie01 <[email protected]>
Signed-off-by: Sean Owen <[email protected]>
  • Loading branch information
LuciferYang authored and srowen committed Dec 7, 2021
1 parent bde47c8 commit 116255d
Show file tree
Hide file tree
Showing 25 changed files with 38 additions and 39 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ private[deploy] class ClientArguments(args: Array[String]) {
var supervise: Boolean = DEFAULT_SUPERVISE
var memory: Int = DEFAULT_MEMORY
var cores: Int = DEFAULT_CORES
private var _driverOptions = ListBuffer[String]()
private val _driverOptions = ListBuffer[String]()
def driverOptions: Seq[String] = _driverOptions.toSeq

// kill parameters
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ private class DefaultPartitionCoalescer(val balanceSlack: Double = 0.10)
val partIter = partitionLocs.partsWithLocs.iterator
groupArr.filter(pg => pg.numPartitions == 0).foreach { pg =>
while (partIter.hasNext && pg.numPartitions == 0) {
var (_, nxt_part) = partIter.next()
val (_, nxt_part) = partIter.next()
if (!initialHash.contains(nxt_part)) {
pg.partitions += nxt_part
initialHash += nxt_part
Expand Down
3 changes: 1 addition & 2 deletions core/src/main/scala/org/apache/spark/status/LiveEntity.scala
Original file line number Diff line number Diff line change
Expand Up @@ -908,7 +908,6 @@ private[spark] class LiveMiscellaneousProcess(val processId: String,
var isActive = true
var totalCores = 0
val addTime = new Date(creationTime)
var removeTime: Date = null
var processLogs = Map[String, String]()

override protected def doUpdate(): Any = {
Expand All @@ -919,7 +918,7 @@ private[spark] class LiveMiscellaneousProcess(val processId: String,
isActive,
totalCores,
addTime,
Option(removeTime),
None,
processLogs)
new ProcessSummaryWrapper(info)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -798,7 +798,7 @@ final class ShuffleBlockFetcherIterator(
}

val in = try {
var bufIn = buf.createInputStream()
val bufIn = buf.createInputStream()
if (checksumEnabled) {
val checksum = ShuffleChecksumHelper.getChecksumByAlgorithm(checksumAlgorithm)
checkedIn = new CheckedInputStream(bufIn, checksum)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ trait TestPrematureExit {

/** Simple PrintStream that reads data into a buffer */
private class BufferPrintStream extends PrintStream(noOpOutputStream) {
var lineBuffer = ArrayBuffer[String]()
val lineBuffer = ArrayBuffer[String]()
// scalastyle:off println
override def println(line: String): Unit = {
lineBuffer += line
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class SparkSubmitUtilsSuite extends SparkFunSuite with BeforeAndAfterAll {

/** Simple PrintStream that reads data into a buffer */
private class BufferPrintStream extends PrintStream(noOpOutputStream) {
var lineBuffer = ArrayBuffer[String]()
val lineBuffer = ArrayBuffer[String]()
// scalastyle:off println
override def println(line: String): Unit = {
lineBuffer += line
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1560,7 +1560,7 @@ class TaskSetManagerSuite

// Keep track of the index of tasks that are resubmitted,
// so that the test can check that task is resubmitted correctly
var resubmittedTasks = new mutable.HashSet[Int]
val resubmittedTasks = new mutable.HashSet[Int]
val dagScheduler = new FakeDAGScheduler(sc, sched) {
override def taskEnded(
task: Task[_],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,17 +59,17 @@ object MiniReadWriteTest {
System.exit(1)
}

var i = 0
val filePath = args(0)

val localFilePath = new File(args(i))
val localFilePath = new File(filePath)
if (!localFilePath.exists) {
System.err.println(s"Given path (${args(i)}) does not exist")
System.err.println(s"Given path ($filePath) does not exist")
printUsage()
System.exit(1)
}

if (!localFilePath.isFile) {
System.err.println(s"Given path (${args(i)}) is not a file")
System.err.println(s"Given path ($filePath) is not a file")
printUsage()
System.exit(1)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ private[kafka010] class KafkaOffsetReaderAdmin(
* latest offset (offset in `knownOffsets` is great than the one in `partitionOffsets`).
*/
def findIncorrectOffsets(): Seq[(TopicPartition, Long, Long)] = {
var incorrectOffsets = ArrayBuffer[(TopicPartition, Long, Long)]()
val incorrectOffsets = ArrayBuffer[(TopicPartition, Long, Long)]()
partitionOffsets.foreach { case (tp, offset) =>
knownOffsets.foreach(_.get(tp).foreach { knownOffset =>
if (knownOffset > offset) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ private[kafka010] class KafkaOffsetReaderConsumer(
* latest offset (offset in `knownOffsets` is great than the one in `partitionOffsets`).
*/
def findIncorrectOffsets(): Seq[(TopicPartition, Long, Long)] = {
var incorrectOffsets = ArrayBuffer[(TopicPartition, Long, Long)]()
val incorrectOffsets = ArrayBuffer[(TopicPartition, Long, Long)]()
partitionOffsets.foreach { case (tp, offset) =>
knownOffsets.foreach(_.get(tp).foreach { knownOffset =>
if (knownOffset > offset) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ object GraphGenerators extends Logging {
throw new IllegalArgumentException(
s"numEdges must be <= $numEdgesUpperBound but was $numEdges")
}
var edges = mutable.Set.empty[Edge[Int]]
val edges = mutable.Set.empty[Edge[Int]]
while (edges.size < numEdges) {
if (edges.size % 100 == 0) {
logDebug(edges.size + " edges")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ object BLASBenchmark extends BenchmarkBase {
val ldb = k
val beta = rnd.nextDouble
val c = Array.fill(m * n) { rnd.nextDouble }
var ldc = m
val ldc = m

runBLASBenchmark("dgemm[N,N]", m * n * k) { impl =>
impl.dgemm("N", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc)
Expand All @@ -312,7 +312,7 @@ object BLASBenchmark extends BenchmarkBase {
val ldb = n
val beta = rnd.nextDouble
val c = Array.fill(m * n) { rnd.nextDouble }
var ldc = m
val ldc = m

runBLASBenchmark("dgemm[N,T]", m * n * k) { impl =>
impl.dgemm("N", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc)
Expand All @@ -330,7 +330,7 @@ object BLASBenchmark extends BenchmarkBase {
val ldb = k
val beta = rnd.nextDouble
val c = Array.fill(m * n) { rnd.nextDouble }
var ldc = m
val ldc = m

runBLASBenchmark("dgemm[T,N]", m * n * k) { impl =>
impl.dgemm("T", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc)
Expand All @@ -348,7 +348,7 @@ object BLASBenchmark extends BenchmarkBase {
val ldb = n
val beta = rnd.nextDouble
val c = Array.fill(m * n) { rnd.nextDouble }
var ldc = m
val ldc = m

runBLASBenchmark("dgemm[T,T]", m * n * k) { impl =>
impl.dgemm("T", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc)
Expand All @@ -366,7 +366,7 @@ object BLASBenchmark extends BenchmarkBase {
val ldb = k
val beta = rnd.nextFloat
val c = Array.fill(m * n) { rnd.nextFloat }
var ldc = m
val ldc = m

runBLASBenchmark("sgemm[N,N]", m * n * k) { impl =>
impl.sgemm("N", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc)
Expand All @@ -384,7 +384,7 @@ object BLASBenchmark extends BenchmarkBase {
val ldb = n
val beta = rnd.nextFloat
val c = Array.fill(m * n) { rnd.nextFloat }
var ldc = m
val ldc = m

runBLASBenchmark("sgemm[N,T]", m * n * k) { impl =>
impl.sgemm("N", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc)
Expand All @@ -402,7 +402,7 @@ object BLASBenchmark extends BenchmarkBase {
val ldb = k
val beta = rnd.nextFloat
val c = Array.fill(m * n) { rnd.nextFloat }
var ldc = m
val ldc = m

runBLASBenchmark("sgemm[T,N]", m * n * k) { impl =>
impl.sgemm("T", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc)
Expand All @@ -420,7 +420,7 @@ object BLASBenchmark extends BenchmarkBase {
val ldb = n
val beta = rnd.nextFloat
val c = Array.fill(m * n) { rnd.nextFloat }
var ldc = m
val ldc = m

runBLASBenchmark("sgemm[T,T]", m * n * k) { impl =>
impl.sgemm("T", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ class Word2Vec extends Serializable with Logging {
private var trainWordsCount = 0L
private var vocabSize = 0
@transient private var vocab: Array[VocabWord] = null
@transient private var vocabHash = mutable.HashMap.empty[String, Int]
@transient private val vocabHash = mutable.HashMap.empty[String, Int]

private def learnVocab[S <: Iterable[String]](dataset: RDD[S]): Unit = {
val words = dataset.flatMap(x => x)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ class InteractionSuite extends MLTest with DefaultReadWriteTest {

test("feature encoder") {
def encode(cardinalities: Array[Int], value: Any): Vector = {
var indices = ArrayBuilder.make[Int]
var values = ArrayBuilder.make[Double]
val indices = ArrayBuilder.make[Int]
val values = ArrayBuilder.make[Double]
val encoder = new FeatureEncoder(cardinalities)
encoder.foreachNonzeroOutput(value, (i, v) => {
indices += i
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ class ALSSuite extends MLTest with DefaultReadWriteTest with Logging {
assert(compressed.size === 5)
assert(compressed.srcIds.toSeq === Seq(0, 1, 2, 3))
assert(compressed.dstPtrs.toSeq === Seq(0, 2, 3, 4, 5))
var decompressed = ArrayBuffer.empty[(Int, Int, Int, Float)]
val decompressed = ArrayBuffer.empty[(Int, Int, Int, Float)]
var i = 0
while (i < compressed.srcIds.length) {
var j = compressed.dstPtrs(i)
Expand Down
2 changes: 1 addition & 1 deletion sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala
Original file line number Diff line number Diff line change
Expand Up @@ -603,7 +603,7 @@ trait Row extends Serializable {

// Convert the row fields to json
var n = 0
var elements = new mutable.ListBuffer[JField]
val elements = new mutable.ListBuffer[JField]
val len = length
while (n < len) {
val field = schema(n)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -962,7 +962,7 @@ case class JsonObjectKeys(child: Expression) extends UnaryExpression with Codege
}

private def getJsonKeys(parser: JsonParser, input: InternalRow): GenericArrayData = {
var arrayBufferOfKeys = ArrayBuffer.empty[UTF8String]
val arrayBufferOfKeys = ArrayBuffer.empty[UTF8String]

// traverse until the end of input and ensure it returns valid key
while(parser.nextValue() != null && parser.currentName() != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -544,7 +544,7 @@ object DateTimeUtils {
def stringToDate(s: UTF8String): Option[Int] = {
def isValidDigits(segment: Int, digits: Int): Boolean = {
// An integer is able to represent a date within [+-]5 million years.
var maxDigitsYear = 7
val maxDigitsYear = 7
(segment == 0 && digits >= 4 && digits <= maxDigitsYear) ||
(segment != 0 && digits > 0 && digits <= 2)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ private[columnar] case object DictionaryEncoding extends CompressionScheme {
private var count = 0

// The reverse mapping of _dictionary, i.e. mapping encoded integer to the value itself.
private var values = new mutable.ArrayBuffer[T#InternalType](1024)
private val values = new mutable.ArrayBuffer[T#InternalType](1024)

// The dictionary that maps a value to the encoded short integer.
private val dictionary = mutable.HashMap.empty[Any, Short]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ class BasicWriteJobStatsTracker(

override def processStats(stats: Seq[WriteTaskStats], jobCommitTime: Long): Unit = {
val sparkContext = SparkContext.getActive.get
var partitionsSet: mutable.Set[InternalRow] = mutable.HashSet.empty
val partitionsSet: mutable.Set[InternalRow] = mutable.HashSet.empty
var numFiles: Long = 0L
var totalNumBytes: Long = 0L
var totalNumOutput: Long = 0L
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1409,7 +1409,7 @@ class SubquerySuite extends QueryTest with SharedSparkSession with AdaptiveSpark

test("Scalar subquery name should start with scalar-subquery#") {
val df = sql("SELECT a FROM l WHERE a = (SELECT max(c) FROM r WHERE c = 1)".stripMargin)
var subqueryExecs: ArrayBuffer[SubqueryExec] = ArrayBuffer.empty
val subqueryExecs: ArrayBuffer[SubqueryExec] = ArrayBuffer.empty
df.queryExecution.executedPlan.transformAllExpressions {
case s @ ScalarSubquery(p: SubqueryExec, _) =>
subqueryExecs += p
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ class ParquetColumnIndexSuite extends QueryTest with ParquetTest with SharedSpar
test("SPARK-36123: reading from unaligned pages - test filters with nulls") {
// insert 50 null values in [400, 450) to verify that they are skipped during processing row
// range [500, 1000) against the second page of col_2 [400, 800)
var df = spark.range(0, 2000).map { i =>
val df = spark.range(0, 2000).map { i =>
val strVal = if (i >= 400 && i < 450) null else i + ":" + "o" * (i / 100).toInt
(i, strVal)
}.toDF()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ import org.apache.spark.util.Utils

object LastOptions {

var mockStreamSourceProvider = mock(classOf[StreamSourceProvider])
var mockStreamSinkProvider = mock(classOf[StreamSinkProvider])
val mockStreamSourceProvider = mock(classOf[StreamSourceProvider])
val mockStreamSinkProvider = mock(classOf[StreamSinkProvider])
var parameters: Map[String, String] = null
var sinkParameters: Map[String, String] = null
var schema: Option[StructType] = null
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -645,7 +645,7 @@ private[hive] class HiveClientImpl(
}
parts.map(_.getValues)
}.distinct
var droppedParts = ArrayBuffer.empty[java.util.List[String]]
val droppedParts = ArrayBuffer.empty[java.util.List[String]]
matchingParts.foreach { partition =>
try {
shim.dropPartition(client, db, table, partition, !retainData, purge)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ class ReceivedBlockTrackerSuite
val hadoopConf = new Configuration()
val streamId = 1

var allReceivedBlockTrackers = new ArrayBuffer[ReceivedBlockTracker]()
val allReceivedBlockTrackers = new ArrayBuffer[ReceivedBlockTracker]()
var checkpointDirectory: File = null
var conf: SparkConf = null

Expand Down Expand Up @@ -378,7 +378,7 @@ class ReceivedBlockTrackerSuite
recoverFromWriteAheadLog: Boolean = false,
clock: Clock = new SystemClock): ReceivedBlockTracker = {
val cpDirOption = if (setCheckpointDir) Some(checkpointDirectory.toString) else None
var tracker = new ReceivedBlockTracker(
val tracker = new ReceivedBlockTracker(
conf, hadoopConf, Seq(streamId), clock, recoverFromWriteAheadLog, cpDirOption)
allReceivedBlockTrackers += tracker
tracker
Expand Down

0 comments on commit 116255d

Please sign in to comment.