File tree Expand file tree Collapse file tree 1 file changed +9
-9
lines changed
mllib/src/main/scala/org/apache/spark/mllib/clustering Expand file tree Collapse file tree 1 file changed +9
-9
lines changed Original file line number Diff line number Diff line change @@ -273,9 +273,9 @@ class LocalLDAModel private[clustering] (
273
273
274
274
/**
275
275
* Estimate the variational likelihood bound of from `documents`:
276
- * log p(documents) >= E_q[log p(documents)] - E_q[log q(documents)]
276
+ * log p(documents) >= E_q[log p(documents)] - E_q[log q(documents)]
277
277
* This bound is derived by decomposing the LDA model to:
278
- * log p(documents) = E_q[log p(documents)] - E_q[log q(documents)] + D(q|p)
278
+ * log p(documents) = E_q[log p(documents)] - E_q[log q(documents)] + D(q|p)
279
279
* and noting that the KL-divergence D(q|p) >= 0.
280
280
*
281
281
* See Equation (16) in original Online LDA paper, as well as Appendix A.3 in the JMLR version of
@@ -290,13 +290,13 @@ class LocalLDAModel private[clustering] (
290
290
* @param vocabSize number of unique terms in the entire test corpus
291
291
*/
292
292
private def logLikelihoodBound (
293
- documents : RDD [(Long , Vector )],
294
- alpha : Vector ,
295
- eta : Double ,
296
- lambda : BDM [Double ],
297
- gammaShape : Double ,
298
- k : Int ,
299
- vocabSize : Long ): Double = {
293
+ documents : RDD [(Long , Vector )],
294
+ alpha : Vector ,
295
+ eta : Double ,
296
+ lambda : BDM [Double ],
297
+ gammaShape : Double ,
298
+ k : Int ,
299
+ vocabSize : Long ): Double = {
300
300
val brzAlpha = alpha.toBreeze.toDenseVector
301
301
// transpose because dirichletExpectation normalizes by row and we need to normalize
302
302
// by topic (columns of lambda)
You can’t perform that action at this time.
0 commit comments