Skip to content

Commit 8f632d7

Browse files
dengzimingHyukjinKwon
authored andcommitted
[MINOR][DOCS] Fix few typos in the java docs
JIRA :https://issues.apache.org/jira/browse/SPARK-29050 'a hdfs' change into 'an hdfs' 'an unique' change into 'a unique' 'an url' change into 'a url' 'a error' change into 'an error' Closes #25756 from dengziming/feature_fix_typos. Authored-by: dengziming <dengziming@growingio.com> Signed-off-by: HyukjinKwon <gurwls223@apache.org>
1 parent eec728a commit 8f632d7

File tree

13 files changed

+17
-17
lines changed

13 files changed

+17
-17
lines changed

R/pkg/R/context.R

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -301,7 +301,7 @@ broadcastRDD <- function(sc, object) {
301301
#' Set the checkpoint directory
302302
#'
303303
#' Set the directory under which RDDs are going to be checkpointed. The
304-
#' directory must be a HDFS path if running on a cluster.
304+
#' directory must be an HDFS path if running on a cluster.
305305
#'
306306
#' @param sc Spark Context to use
307307
#' @param dirName Directory path
@@ -446,7 +446,7 @@ setLogLevel <- function(level) {
446446
#' Set checkpoint directory
447447
#'
448448
#' Set the directory under which SparkDataFrame are going to be checkpointed. The directory must be
449-
#' a HDFS path if running on a cluster.
449+
#' an HDFS path if running on a cluster.
450450
#'
451451
#' @rdname setCheckpointDir
452452
#' @param directory Directory path to checkpoint to

core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -609,7 +609,7 @@ class JavaSparkContext(val sc: SparkContext) extends Closeable {
609609

610610
/**
611611
* Set the directory under which RDDs are going to be checkpointed. The directory must
612-
* be a HDFS path if running on a cluster.
612+
* be an HDFS path if running on a cluster.
613613
*/
614614
def setCheckpointDir(dir: String) {
615615
sc.setCheckpointDir(dir)

core/src/main/scala/org/apache/spark/metrics/MetricsSystem.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ private[spark] class MetricsSystem private (
124124
* If either ID is not available, this defaults to just using <source name>.
125125
*
126126
* @param source Metric source to be named by this method.
127-
* @return An unique metric name for each combination of
127+
* @return A unique metric name for each combination of
128128
* application, executor/driver and metric source.
129129
*/
130130
private[spark] def buildRegistryName(source: Source): String = {

core/src/main/scala/org/apache/spark/storage/BlockManagerId.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ import org.apache.spark.util.Utils
2727

2828
/**
2929
* :: DeveloperApi ::
30-
* This class represent an unique identifier for a BlockManager.
30+
* This class represent a unique identifier for a BlockManager.
3131
*
3232
* The first 2 constructors of this class are made private to ensure that BlockManagerId objects
3333
* can be created only using the apply method in the companion object. This allows de-duplication

core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -600,7 +600,7 @@ class SparkSubmitSuite
600600
}
601601

602602
// TODO(SPARK-9603): Building a package is flaky on Jenkins Maven builds.
603-
// See https://gist.github.com/shivaram/3a2fecce60768a603dac for a error log
603+
// See https://gist.github.com/shivaram/3a2fecce60768a603dac for an error log
604604
ignore("correctly builds R packages included in a jar with --packages") {
605605
assume(RUtils.isRInstalled, "R isn't installed on this machine.")
606606
assume(RUtils.isSparkRInstalled, "SparkR is not installed in this build.")

docs/spark-standalone.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -432,7 +432,7 @@ In addition, detailed log output for each job is also written to the work direct
432432

433433
# Running Alongside Hadoop
434434

435-
You can run Spark alongside your existing Hadoop cluster by just launching it as a separate service on the same machines. To access Hadoop data from Spark, just use a hdfs:// URL (typically `hdfs://<namenode>:9000/path`, but you can find the right URL on your Hadoop Namenode's web UI). Alternatively, you can set up a separate cluster for Spark, and still have it access HDFS over the network; this will be slower than disk-local access, but may not be a concern if you are still running in the same local area network (e.g. you place a few Spark machines on each rack that you have Hadoop on).
435+
You can run Spark alongside your existing Hadoop cluster by just launching it as a separate service on the same machines. To access Hadoop data from Spark, just use an hdfs:// URL (typically `hdfs://<namenode>:9000/path`, but you can find the right URL on your Hadoop Namenode's web UI). Alternatively, you can set up a separate cluster for Spark, and still have it access HDFS over the network; this will be slower than disk-local access, but may not be a concern if you are still running in the same local area network (e.g. you place a few Spark machines on each rack that you have Hadoop on).
436436

437437

438438
# Configuring Ports for Network Security

external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisCheckpointer.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ private[kinesis] class KinesisCheckpointer(
6868
if (checkpointer != null) {
6969
try {
7070
// We must call `checkpoint()` with no parameter to finish reading shards.
71-
// See an URL below for details:
71+
// See a URL below for details:
7272
// https://forums.aws.amazon.com/thread.jspa?threadID=244218
7373
KinesisRecordProcessor.retryRandom(checkpointer.checkpoint(), 4, 100)
7474
} catch {

python/pyspark/context.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -950,7 +950,7 @@ def addPyFile(self, path):
950950
def setCheckpointDir(self, dirName):
951951
"""
952952
Set the directory under which RDDs are going to be checkpointed. The
953-
directory must be a HDFS path if running on a cluster.
953+
directory must be an HDFS path if running on a cluster.
954954
"""
955955
self._jsc.sc().setCheckpointDir(dirName)
956956

sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ import org.apache.spark.util.{SizeEstimator, Utils}
4343

4444
/**
4545
* An implementation of [[StateStoreProvider]] and [[StateStore]] in which all the data is backed
46-
* by files in a HDFS-compatible file system. All updates to the store has to be done in sets
46+
* by files in an HDFS-compatible file system. All updates to the store has to be done in sets
4747
* transactionally, and each set of updates increments the store's version. These versions can
4848
* be used to re-execute the updates (by retries in RDD operations) on the correct version of
4949
* the store, and regenerate the store version.
@@ -79,7 +79,7 @@ private[state] class HDFSBackedStateStoreProvider extends StateStoreProvider wit
7979
// java.util.ConcurrentModificationException
8080
type MapType = java.util.concurrent.ConcurrentHashMap[UnsafeRow, UnsafeRow]
8181

82-
/** Implementation of [[StateStore]] API which is backed by a HDFS-compatible file system */
82+
/** Implementation of [[StateStore]] API which is backed by an HDFS-compatible file system */
8383
class HDFSBackedStateStore(val version: Long, mapToUpdate: MapType)
8484
extends StateStore {
8585

sql/core/src/main/scala/org/apache/spark/sql/streaming/StreamingQueryListener.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ object StreamingQueryListener {
7979

8080
/**
8181
* Event representing the start of a query
82-
* @param id An unique query id that persists across restarts. See `StreamingQuery.id()`.
82+
* @param id A unique query id that persists across restarts. See `StreamingQuery.id()`.
8383
* @param runId A query id that is unique for every start/restart. See `StreamingQuery.runId()`.
8484
* @param name User-specified name of the query, null if not specified.
8585
* @since 2.1.0
@@ -101,7 +101,7 @@ object StreamingQueryListener {
101101
/**
102102
* Event representing that termination of a query.
103103
*
104-
* @param id An unique query id that persists across restarts. See `StreamingQuery.id()`.
104+
* @param id A unique query id that persists across restarts. See `StreamingQuery.id()`.
105105
* @param runId A query id that is unique for every start/restart. See `StreamingQuery.runId()`.
106106
* @param exception The exception message of the query if the query was terminated
107107
* with an exception. Otherwise, it will be `None`.

0 commit comments

Comments
 (0)