@@ -145,7 +145,7 @@ class EMLDAOptimizer extends LDAOptimizer {
145
145
this
146
146
}
147
147
148
- private [clustering] override def next (): EMLDAOptimizer = {
148
+ override private [clustering] def next (): EMLDAOptimizer = {
149
149
require(graph != null , " graph is null, EMLDAOptimizer not initialized." )
150
150
151
151
val eta = topicConcentration
@@ -202,7 +202,7 @@ class EMLDAOptimizer extends LDAOptimizer {
202
202
graph.vertices.filter(isTermVertex).values.fold(BDV .zeros[Double ](numTopics))(_ += _)
203
203
}
204
204
205
- private [clustering] override def getLDAModel (iterationTimes : Array [Double ]): LDAModel = {
205
+ override private [clustering] def getLDAModel (iterationTimes : Array [Double ]): LDAModel = {
206
206
require(graph != null , " graph is null, EMLDAOptimizer not initialized." )
207
207
this .graphCheckpointer.deleteAllCheckpoints()
208
208
new DistributedLDAModel (this , iterationTimes)
@@ -295,7 +295,7 @@ class OnlineLDAOptimizer extends LDAOptimizer {
295
295
this
296
296
}
297
297
298
- private [clustering] override def initialize (docs : RDD [(Long , Vector )], lda : LDA ): LDAOptimizer = {
298
+ override private [clustering] def initialize (docs : RDD [(Long , Vector )], lda : LDA ): LDAOptimizer = {
299
299
this .k = lda.getK
300
300
this .corpusSize = docs.count()
301
301
this .vocabSize = docs.first()._2.size
@@ -318,7 +318,7 @@ class OnlineLDAOptimizer extends LDAOptimizer {
318
318
* model, and it will update the topic distribution adaptively for the terms appearing in the
319
319
* subset.
320
320
*/
321
- private [clustering] override def next (): OnlineLDAOptimizer = {
321
+ override private [clustering] def next (): OnlineLDAOptimizer = {
322
322
iteration += 1
323
323
val batch = docs.sample(withReplacement = true , miniBatchFraction, randomGenerator.nextLong())
324
324
if (batch.isEmpty()) return this
0 commit comments