Skip to content

Commit 60abc25

Browse files
ScrapCodespwendell
authored andcommitted
SPARK-1096, a space after comment start style checker.
Author: Prashant Sharma <prashant.s@imaginea.com> Closes #124 from ScrapCodes/SPARK-1096/scalastyle-comment-check and squashes the following commits: 214135a [Prashant Sharma] Review feedback. 5eba88c [Prashant Sharma] Fixed style checks for ///+ comments. e54b2f8 [Prashant Sharma] improved message, work around. 83e7144 [Prashant Sharma] removed dependency on scalastyle in plugin, since scalastyle sbt plugin already depends on the right version. Incase we update the plugin we will have to adjust our spark-style project to depend on right scalastyle version. 810a1d6 [Prashant Sharma] SPARK-1096, a space after comment style checker. ba33193 [Prashant Sharma] scala style as a project
1 parent 632c322 commit 60abc25

File tree

55 files changed

+180
-88
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

55 files changed

+180
-88
lines changed

core/src/main/scala/org/apache/spark/SparkEnv.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ class SparkEnv private[spark] (
8181
// Unfortunately Akka's awaitTermination doesn't actually wait for the Netty server to shut
8282
// down, but let's call it anyway in case it gets fixed in a later release
8383
// UPDATE: In Akka 2.1.x, this hangs if there are remote actors, so we can't call it.
84-
//actorSystem.awaitTermination()
84+
// actorSystem.awaitTermination()
8585
}
8686

8787
private[spark]

core/src/main/scala/org/apache/spark/broadcast/TorrentBroadcast.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ extends Logging {
167167
private var initialized = false
168168
private var conf: SparkConf = null
169169
def initialize(_isDriver: Boolean, conf: SparkConf) {
170-
TorrentBroadcast.conf = conf //TODO: we might have to fix it in tests
170+
TorrentBroadcast.conf = conf // TODO: we might have to fix it in tests
171171
synchronized {
172172
if (!initialized) {
173173
initialized = true

core/src/main/scala/org/apache/spark/deploy/LocalSparkCluster.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,9 @@ class LocalSparkCluster(numWorkers: Int, coresPerWorker: Int, memoryPerWorker: I
6666
// TODO: In Akka 2.1.x, ActorSystem.awaitTermination hangs when you have remote actors!
6767
// This is unfortunate, but for now we just comment it out.
6868
workerActorSystems.foreach(_.shutdown())
69-
//workerActorSystems.foreach(_.awaitTermination())
69+
// workerActorSystems.foreach(_.awaitTermination())
7070
masterActorSystems.foreach(_.shutdown())
71-
//masterActorSystems.foreach(_.awaitTermination())
71+
// masterActorSystems.foreach(_.awaitTermination())
7272
masterActorSystems.clear()
7373
workerActorSystems.clear()
7474
}

core/src/main/scala/org/apache/spark/deploy/master/LeaderElectionAgent.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ import org.apache.spark.deploy.master.MasterMessages.ElectedLeader
3030
* [[org.apache.spark.deploy.master.MasterMessages.RevokedLeadership RevokedLeadership]]
3131
*/
3232
private[spark] trait LeaderElectionAgent extends Actor {
33-
//TODO: LeaderElectionAgent does not necessary to be an Actor anymore, need refactoring.
33+
// TODO: LeaderElectionAgent does not necessary to be an Actor anymore, need refactoring.
3434
val masterActor: ActorRef
3535
}
3636

core/src/main/scala/org/apache/spark/executor/Executor.scala

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,6 @@ private[spark] class Executor(
275275
// have left some weird state around depending on when the exception was thrown, but on
276276
// the other hand, maybe we could detect that when future tasks fail and exit then.
277277
logError("Exception in task ID " + taskId, t)
278-
//System.exit(1)
279278
}
280279
} finally {
281280
// TODO: Unregister shuffle memory only for ResultTask

core/src/main/scala/org/apache/spark/metrics/MetricsConfig.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ private[spark] class MetricsConfig(val configFile: Option[String]) extends Loggi
4242
}
4343

4444
def initialize() {
45-
//Add default properties in case there's no properties file
45+
// Add default properties in case there's no properties file
4646
setDefaultProperties(properties)
4747

4848
// If spark.metrics.conf is not set, try to get file in class path

core/src/main/scala/org/apache/spark/network/Connection.scala

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ abstract class Connection(val channel: SocketChannel, val selector: Selector,
4848
channel.socket.setTcpNoDelay(true)
4949
channel.socket.setReuseAddress(true)
5050
channel.socket.setKeepAlive(true)
51-
/*channel.socket.setReceiveBufferSize(32768) */
51+
/* channel.socket.setReceiveBufferSize(32768) */
5252

5353
@volatile private var closed = false
5454
var onCloseCallback: Connection => Unit = null
@@ -206,12 +206,12 @@ class SendingConnection(val address: InetSocketAddress, selector_ : Selector,
206206

207207
private class Outbox {
208208
val messages = new Queue[Message]()
209-
val defaultChunkSize = 65536 //32768 //16384
209+
val defaultChunkSize = 65536
210210
var nextMessageToBeUsed = 0
211211

212212
def addMessage(message: Message) {
213213
messages.synchronized{
214-
/*messages += message*/
214+
/* messages += message*/
215215
messages.enqueue(message)
216216
logDebug("Added [" + message + "] to outbox for sending to " +
217217
"[" + getRemoteConnectionManagerId() + "]")
@@ -221,8 +221,8 @@ class SendingConnection(val address: InetSocketAddress, selector_ : Selector,
221221
def getChunk(): Option[MessageChunk] = {
222222
messages.synchronized {
223223
while (!messages.isEmpty) {
224-
/*nextMessageToBeUsed = nextMessageToBeUsed % messages.size */
225-
/*val message = messages(nextMessageToBeUsed)*/
224+
/* nextMessageToBeUsed = nextMessageToBeUsed % messages.size */
225+
/* val message = messages(nextMessageToBeUsed)*/
226226
val message = messages.dequeue
227227
val chunk = message.getChunkForSending(defaultChunkSize)
228228
if (chunk.isDefined) {
@@ -262,7 +262,7 @@ class SendingConnection(val address: InetSocketAddress, selector_ : Selector,
262262

263263
val currentBuffers = new ArrayBuffer[ByteBuffer]()
264264

265-
/*channel.socket.setSendBufferSize(256 * 1024)*/
265+
/* channel.socket.setSendBufferSize(256 * 1024)*/
266266

267267
override def getRemoteAddress() = address
268268

@@ -355,7 +355,7 @@ class SendingConnection(val address: InetSocketAddress, selector_ : Selector,
355355
}
356356
case None => {
357357
// changeConnectionKeyInterest(0)
358-
/*key.interestOps(0)*/
358+
/* key.interestOps(0)*/
359359
return false
360360
}
361361
}
@@ -540,10 +540,10 @@ private[spark] class ReceivingConnection(
540540
return false
541541
}
542542

543-
/*logDebug("Read " + bytesRead + " bytes for the buffer")*/
543+
/* logDebug("Read " + bytesRead + " bytes for the buffer")*/
544544

545545
if (currentChunk.buffer.remaining == 0) {
546-
/*println("Filled buffer at " + System.currentTimeMillis)*/
546+
/* println("Filled buffer at " + System.currentTimeMillis)*/
547547
val bufferMessage = inbox.getMessageForChunk(currentChunk).get
548548
if (bufferMessage.isCompletelyReceived) {
549549
bufferMessage.flip

core/src/main/scala/org/apache/spark/network/ConnectionManager.scala

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -505,7 +505,7 @@ private[spark] class ConnectionManager(port: Int, conf: SparkConf,
505505
}
506506
}
507507
handleMessageExecutor.execute(runnable)
508-
/*handleMessage(connection, message)*/
508+
/* handleMessage(connection, message)*/
509509
}
510510

511511
private def handleClientAuthentication(
@@ -733,7 +733,7 @@ private[spark] class ConnectionManager(port: Int, conf: SparkConf,
733733
logTrace("Sending Security [" + message + "] to [" + connManagerId + "]")
734734
val connection = connectionsById.getOrElseUpdate(connManagerId, startNewConnection())
735735

736-
//send security message until going connection has been authenticated
736+
// send security message until going connection has been authenticated
737737
connection.send(message)
738738

739739
wakeupSelector()
@@ -859,14 +859,14 @@ private[spark] object ConnectionManager {
859859
None
860860
})
861861

862-
/*testSequentialSending(manager)*/
863-
/*System.gc()*/
862+
/* testSequentialSending(manager)*/
863+
/* System.gc()*/
864864

865-
/*testParallelSending(manager)*/
866-
/*System.gc()*/
865+
/* testParallelSending(manager)*/
866+
/* System.gc()*/
867867

868-
/*testParallelDecreasingSending(manager)*/
869-
/*System.gc()*/
868+
/* testParallelDecreasingSending(manager)*/
869+
/* System.gc()*/
870870

871871
testContinuousSending(manager)
872872
System.gc()
@@ -948,7 +948,7 @@ private[spark] object ConnectionManager {
948948
val ms = finishTime - startTime
949949
val tput = mb * 1000.0 / ms
950950
println("--------------------------")
951-
/*println("Started at " + startTime + ", finished at " + finishTime) */
951+
/* println("Started at " + startTime + ", finished at " + finishTime) */
952952
println("Sent " + mb + " MB in " + ms + " ms (" + tput + " MB/s)")
953953
println("--------------------------")
954954
println()

core/src/main/scala/org/apache/spark/network/ConnectionManagerTest.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,8 @@ private[spark] object ConnectionManagerTest extends Logging{
4747
val slaves = slavesFile.mkString.split("\n")
4848
slavesFile.close()
4949

50-
/*println("Slaves")*/
51-
/*slaves.foreach(println)*/
50+
/* println("Slaves")*/
51+
/* slaves.foreach(println)*/
5252
val tasknum = if (args.length > 2) args(2).toInt else slaves.length
5353
val size = ( if (args.length > 3) (args(3).toInt) else 10 ) * 1024 * 1024
5454
val count = if (args.length > 4) args(4).toInt else 3

core/src/main/scala/org/apache/spark/network/ReceiverTest.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ private[spark] object ReceiverTest {
2727
println("Started connection manager with id = " + manager.id)
2828

2929
manager.onReceiveMessage((msg: Message, id: ConnectionManagerId) => {
30-
/*println("Received [" + msg + "] from [" + id + "] at " + System.currentTimeMillis)*/
30+
/* println("Received [" + msg + "] from [" + id + "] at " + System.currentTimeMillis)*/
3131
val buffer = ByteBuffer.wrap("response".getBytes)
3232
Some(Message.createBufferMessage(buffer, msg.id))
3333
})

core/src/main/scala/org/apache/spark/network/SenderTest.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ private[spark] object SenderTest {
5050
(0 until count).foreach(i => {
5151
val dataMessage = Message.createBufferMessage(buffer.duplicate)
5252
val startTime = System.currentTimeMillis
53-
/*println("Started timer at " + startTime)*/
53+
/* println("Started timer at " + startTime)*/
5454
val responseStr = manager.sendMessageReliablySync(targetConnectionManagerId, dataMessage)
5555
.map { response =>
5656
val buffer = response.asInstanceOf[BufferMessage].buffers(0)

core/src/main/scala/org/apache/spark/network/netty/FileHeader.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ private[spark] class FileHeader (
3232
buf.writeInt(fileLen)
3333
buf.writeInt(blockId.name.length)
3434
blockId.name.foreach((x: Char) => buf.writeByte(x))
35-
//padding the rest of header
35+
// padding the rest of header
3636
if (FileHeader.HEADER_SIZE - buf.readableBytes > 0 ) {
3737
buf.writeZero(FileHeader.HEADER_SIZE - buf.readableBytes)
3838
} else {

core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -753,7 +753,7 @@ class DAGScheduler(
753753
val properties = if (stageIdToActiveJob.contains(jobId)) {
754754
stageIdToActiveJob(stage.jobId).properties
755755
} else {
756-
//this stage will be assigned to "default" pool
756+
// this stage will be assigned to "default" pool
757757
null
758758
}
759759

core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, actorSystem: A
172172
properties += ((key, value))
173173
}
174174
}
175-
//TODO (prashant) send conf instead of properties
175+
// TODO (prashant) send conf instead of properties
176176
driverActor = actorSystem.actorOf(
177177
Props(new DriverActor(properties)), name = CoarseGrainedSchedulerBackend.ACTOR_NAME)
178178
}

core/src/main/scala/org/apache/spark/storage/BlockFetcherIterator.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ object BlockFetcherIterator {
284284
}
285285
} catch {
286286
case x: InterruptedException => logInfo("Copier Interrupted")
287-
//case _ => throw new SparkException("Exception Throw in Shuffle Copier")
287+
// case _ => throw new SparkException("Exception Throw in Shuffle Copier")
288288
}
289289
}
290290
}

core/src/main/scala/org/apache/spark/util/ClosureCleaner.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ private[spark] object ClosureCleaner extends Logging {
112112
accessedFields(cls) = Set[String]()
113113
for (cls <- func.getClass :: innerClasses)
114114
getClassReader(cls).accept(new FieldAccessFinder(accessedFields), 0)
115-
//logInfo("accessedFields: " + accessedFields)
115+
// logInfo("accessedFields: " + accessedFields)
116116

117117
val inInterpreter = {
118118
try {
@@ -139,21 +139,21 @@ private[spark] object ClosureCleaner extends Logging {
139139
val field = cls.getDeclaredField(fieldName)
140140
field.setAccessible(true)
141141
val value = field.get(obj)
142-
//logInfo("1: Setting " + fieldName + " on " + cls + " to " + value);
142+
// logInfo("1: Setting " + fieldName + " on " + cls + " to " + value);
143143
field.set(outer, value)
144144
}
145145
}
146146

147147
if (outer != null) {
148-
//logInfo("2: Setting $outer on " + func.getClass + " to " + outer);
148+
// logInfo("2: Setting $outer on " + func.getClass + " to " + outer);
149149
val field = func.getClass.getDeclaredField("$outer")
150150
field.setAccessible(true)
151151
field.set(func, outer)
152152
}
153153
}
154154

155155
private def instantiateClass(cls: Class[_], outer: AnyRef, inInterpreter: Boolean): AnyRef = {
156-
//logInfo("Creating a " + cls + " with outer = " + outer)
156+
// logInfo("Creating a " + cls + " with outer = " + outer)
157157
if (!inInterpreter) {
158158
// This is a bona fide closure class, whose constructor has no effects
159159
// other than to set its fields, so use its constructor
@@ -170,7 +170,7 @@ private[spark] object ClosureCleaner extends Logging {
170170
val newCtor = rf.newConstructorForSerialization(cls, parentCtor)
171171
val obj = newCtor.newInstance().asInstanceOf[AnyRef]
172172
if (outer != null) {
173-
//logInfo("3: Setting $outer on " + cls + " to " + outer);
173+
// logInfo("3: Setting $outer on " + cls + " to " + outer);
174174
val field = cls.getDeclaredField("$outer")
175175
field.setAccessible(true)
176176
field.set(obj, outer)

core/src/main/scala/org/apache/spark/util/IndestructibleActorSystem.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ private[akka] class IndestructibleActorSystemImpl(
4949
if (isFatalError(cause) && !settings.JvmExitOnFatalError) {
5050
log.error(cause, "Uncaught fatal error from thread [{}] not shutting down " +
5151
"ActorSystem [{}] tolerating and continuing.... ", thread.getName, name)
52-
//shutdown() //TODO make it configurable
52+
// shutdown() //TODO make it configurable
5353
} else {
5454
fallbackHandler.uncaughtException(thread, cause)
5555
}

core/src/main/scala/org/apache/spark/util/MutablePair.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ package org.apache.spark.util
2424
* @param _1 Element 1 of this MutablePair
2525
* @param _2 Element 2 of this MutablePair
2626
*/
27-
case class MutablePair[@specialized(Int, Long, Double, Char, Boolean/*, AnyRef*/) T1,
28-
@specialized(Int, Long, Double, Char, Boolean/*, AnyRef*/) T2]
27+
case class MutablePair[@specialized(Int, Long, Double, Char, Boolean/* , AnyRef*/) T1,
28+
@specialized(Int, Long, Double, Char, Boolean/* , AnyRef*/) T2]
2929
(var _1: T1, var _2: T2)
3030
extends Product2[T1, T2]
3131
{

core/src/test/scala/org/apache/spark/AccumulatorSuite.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ class AccumulatorSuite extends FunSuite with ShouldMatchers with LocalSparkConte
6666

6767
test ("add value to collection accumulators") {
6868
val maxI = 1000
69-
for (nThreads <- List(1, 10)) { //test single & multi-threaded
69+
for (nThreads <- List(1, 10)) { // test single & multi-threaded
7070
sc = new SparkContext("local[" + nThreads + "]", "test")
7171
val acc: Accumulable[mutable.Set[Any], Any] = sc.accumulable(new mutable.HashSet[Any]())
7272
val d = sc.parallelize(1 to maxI)
@@ -83,7 +83,7 @@ class AccumulatorSuite extends FunSuite with ShouldMatchers with LocalSparkConte
8383

8484
test ("value not readable in tasks") {
8585
val maxI = 1000
86-
for (nThreads <- List(1, 10)) { //test single & multi-threaded
86+
for (nThreads <- List(1, 10)) { // test single & multi-threaded
8787
sc = new SparkContext("local[" + nThreads + "]", "test")
8888
val acc: Accumulable[mutable.Set[Any], Any] = sc.accumulable(new mutable.HashSet[Any]())
8989
val d = sc.parallelize(1 to maxI)
@@ -124,7 +124,7 @@ class AccumulatorSuite extends FunSuite with ShouldMatchers with LocalSparkConte
124124

125125
test ("localValue readable in tasks") {
126126
val maxI = 1000
127-
for (nThreads <- List(1, 10)) { //test single & multi-threaded
127+
for (nThreads <- List(1, 10)) { // test single & multi-threaded
128128
sc = new SparkContext("local[" + nThreads + "]", "test")
129129
val acc: Accumulable[mutable.Set[Any], Any] = sc.accumulable(new mutable.HashSet[Any]())
130130
val groupedInts = (1 to (maxI/20)).map {x => (20 * (x - 1) to 20 * x).toSet}

core/src/test/scala/org/apache/spark/CheckpointSuite.scala

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -432,7 +432,6 @@ object CheckpointSuite {
432432
// This is a custom cogroup function that does not use mapValues like
433433
// the PairRDDFunctions.cogroup()
434434
def cogroup[K, V](first: RDD[(K, V)], second: RDD[(K, V)], part: Partitioner) = {
435-
//println("First = " + first + ", second = " + second)
436435
new CoGroupedRDD[K](
437436
Seq(first.asInstanceOf[RDD[(K, _)]], second.asInstanceOf[RDD[(K, _)]]),
438437
part

core/src/test/scala/org/apache/spark/PartitioningSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ class PartitioningSuite extends FunSuite with SharedSparkContext with PrivateMet
146146

147147
assert(intercept[SparkException]{ arrs.distinct() }.getMessage.contains("array"))
148148
// We can't catch all usages of arrays, since they might occur inside other collections:
149-
//assert(fails { arrPairs.distinct() })
149+
// assert(fails { arrPairs.distinct() })
150150
assert(intercept[SparkException]{ arrPairs.partitionBy(new HashPartitioner(2)) }.getMessage.contains("array"))
151151
assert(intercept[SparkException]{ arrPairs.join(arrPairs) }.getMessage.contains("array"))
152152
assert(intercept[SparkException]{ arrPairs.leftOuterJoin(arrPairs) }.getMessage.contains("array"))

core/src/test/scala/org/apache/spark/scheduler/SparkListenerSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ class SparkListenerSuite extends FunSuite with LocalSparkContext with ShouldMatc
111111
val listener = new SaveStageAndTaskInfo
112112
sc.addSparkListener(listener)
113113
sc.addSparkListener(new StatsReportListener)
114-
//just to make sure some of the tasks take a noticeable amount of time
114+
// just to make sure some of the tasks take a noticeable amount of time
115115
val w = {i:Int =>
116116
if (i == 0)
117117
Thread.sleep(100)

core/src/test/scala/org/apache/spark/util/UtilsSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ class UtilsSuite extends FunSuite {
3939
}
4040

4141
test("copyStream") {
42-
//input array initialization
42+
// input array initialization
4343
val bytes = Array.ofDim[Byte](9000)
4444
Random.nextBytes(bytes)
4545

examples/src/main/scala/org/apache/spark/examples/LocalALS.scala

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,6 @@ object LocalALS {
5353
for (i <- 0 until M; j <- 0 until U) {
5454
r.set(i, j, blas.ddot(ms(i), us(j)))
5555
}
56-
//println("R: " + r)
5756
blas.daxpy(-1, targetR, r)
5857
val sumSqs = r.aggregate(Functions.plus, Functions.square)
5958
sqrt(sumSqs / (M * U))

examples/src/main/scala/org/apache/spark/examples/SimpleSkewedGroupByTest.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ object SimpleSkewedGroupByTest {
6161

6262
println("RESULT: " + pairs1.groupByKey(numReducers).count)
6363
// Print how many keys each reducer got (for debugging)
64-
//println("RESULT: " + pairs1.groupByKey(numReducers)
64+
// println("RESULT: " + pairs1.groupByKey(numReducers)
6565
// .map{case (k,v) => (k, v.size)}
6666
// .collectAsMap)
6767

examples/src/main/scala/org/apache/spark/examples/SparkALS.scala

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,6 @@ object SparkALS {
5454
for (i <- 0 until M; j <- 0 until U) {
5555
r.set(i, j, blas.ddot(ms(i), us(j)))
5656
}
57-
//println("R: " + r)
5857
blas.daxpy(-1, targetR, r)
5958
val sumSqs = r.aggregate(Functions.plus, Functions.square)
6059
sqrt(sumSqs / (M * U))

examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,6 @@ object SparkHdfsLR {
3434
case class DataPoint(x: Vector, y: Double)
3535

3636
def parsePoint(line: String): DataPoint = {
37-
//val nums = line.split(' ').map(_.toDouble)
38-
//return DataPoint(new Vector(nums.slice(1, D+1)), nums(0))
3937
val tok = new java.util.StringTokenizer(line, " ")
4038
var y = tok.nextToken.toDouble
4139
var x = new Array[Double](D)

0 commit comments

Comments
 (0)