Skip to content

Commit 2ca74db

Browse files
committed
Couple minor fixes.
1 parent ea98ea1 commit 2ca74db

File tree

1 file changed

+12
-6
lines changed

1 file changed

+12
-6
lines changed

sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -118,8 +118,14 @@ class DataFrame protected[sql](
118118
}
119119

120120
/** Left here for compatibility reasons. */
121+
@deprecated("1.3.0", "use toDataFrame")
121122
def toSchemaRDD: DataFrame = this
122123

124+
/**
125+
* Return the object itself. Used to force an implicit conversion from RDD to DataFrame in Scala.
126+
*/
127+
def toDF: DataFrame = this
128+
123129
/** Return the schema of this [[DataFrame]]. */
124130
override def schema: StructType = queryExecution.analyzed.schema
125131

@@ -501,7 +507,7 @@ class DataFrame protected[sql](
501507

502508
/**
503509
* Registers this RDD as a temporary table using the given name. The lifetime of this temporary
504-
* table is tied to the [[SQLContext]] that was used to create this SchemaRDD.
510+
* table is tied to the [[SQLContext]] that was used to create this DataFrame.
505511
*
506512
* @group schema
507513
*/
@@ -510,20 +516,20 @@ class DataFrame protected[sql](
510516
}
511517

512518
/**
513-
* Saves the contents of this [[DataFrame]] as a parquet file, preserving the schema. Files that
514-
* are written out using this method can be read back in as a [[DataFrame]] using the `parquetFile`
515-
* function.
519+
* Saves the contents of this [[DataFrame]] as a parquet file, preserving the schema.
520+
* Files that are written out using this method can be read back in as a [[DataFrame]]
521+
* using the `parquetFile` function in [[SQLContext]].
516522
*/
517523
override def saveAsParquetFile(path: String): Unit = {
518524
sqlContext.executePlan(WriteToFile(path, logicalPlan)).toRdd
519525
}
520526

521527
/**
522528
* :: Experimental ::
523-
* Creates a table from the the contents of this SchemaRDD. This will fail if the table already
529+
* Creates a table from the the contents of this DataFrame. This will fail if the table already
524530
* exists.
525531
*
526-
* Note that this currently only works with SchemaRDDs that are created from a HiveContext as
532+
* Note that this currently only works with DataFrame that are created from a HiveContext as
527533
* there is no notion of a persisted catalog in a standard SQL context. Instead you can write
528534
* an RDD out to a parquet file, and then register that file as a table. This "table" can then
529535
* be the target of an `insertInto`.

0 commit comments

Comments
 (0)