Skip to content

[SPARK-14355][BUILD] Fix typos in Exception/Testcase/Comments and static analysis results #12139

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ public TransportClientFactory(
this.context = Preconditions.checkNotNull(context);
this.conf = context.getConf();
this.clientBootstraps = Lists.newArrayList(Preconditions.checkNotNull(clientBootstraps));
this.connectionPool = new ConcurrentHashMap<SocketAddress, ClientPool>();
this.connectionPool = new ConcurrentHashMap<>();
this.numConnectionsPerPeer = conf.numConnectionsPerPeer();
this.rand = new Random();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,9 @@ public class TransportResponseHandler extends MessageHandler<ResponseMessage> {

public TransportResponseHandler(Channel channel) {
this.channel = channel;
this.outstandingFetches = new ConcurrentHashMap<StreamChunkId, ChunkReceivedCallback>();
this.outstandingRpcs = new ConcurrentHashMap<Long, RpcResponseCallback>();
this.streamCallbacks = new ConcurrentLinkedQueue<StreamCallback>();
this.outstandingFetches = new ConcurrentHashMap<>();
this.outstandingRpcs = new ConcurrentHashMap<>();
this.streamCallbacks = new ConcurrentLinkedQueue<>();
this.timeOfLastRequestNs = new AtomicLong(0);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ public OneForOneStreamManager() {
// For debugging purposes, start with a random stream id to help identifying different streams.
// This does not need to be globally unique, only unique to this class.
nextStreamId = new AtomicLong((long) new Random().nextInt(Integer.MAX_VALUE) * 1000);
streams = new ConcurrentHashMap<Long, StreamState>();
streams = new ConcurrentHashMap<>();
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ public class ShuffleSecretManager implements SecretKeyHolder {
private static final String SPARK_SASL_USER = "sparkSaslUser";

public ShuffleSecretManager() {
shuffleSecretMap = new ConcurrentHashMap<String, String>();
shuffleSecretMap = new ConcurrentHashMap<>();
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ public int compare(UnsafeSorterIterator left, UnsafeSorterIterator right) {
}
}
};
priorityQueue = new PriorityQueue<UnsafeSorterIterator>(numSpills, comparator);
priorityQueue = new PriorityQueue<>(numSpills, comparator);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion core/src/main/scala/org/apache/spark/api/r/RRunner.scala
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ private[spark] class RRunner[U](
}
stream.flush()
} catch {
// TODO: We should propogate this error to the task thread
// TODO: We should propagate this error to the task thread
case e: Exception =>
logError("R Writer thread got an exception", e)
} finally {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ class GapSamplingReplacement(
/**
* Skip elements with replication factor zero (i.e. elements that won't be sampled).
* Samples 'k' from geometric distribution P(k) = (1-q)(q)^k, where q = e^(-f), that is
* q is the probabililty of Poisson(0; f)
* q is the probability of Poisson(0; f)
*/
private def advance(): Unit = {
val u = math.max(rng.nextDouble(), epsilon)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -170,11 +170,11 @@ public Tuple2<TempShuffleBlockId, File> answer(
private UnsafeShuffleWriter<Object, Object> createWriter(
boolean transferToEnabled) throws IOException {
conf.set("spark.file.transferTo", String.valueOf(transferToEnabled));
return new UnsafeShuffleWriter<Object, Object>(
return new UnsafeShuffleWriter<>(
blockManager,
shuffleBlockResolver,
taskMemoryManager,
new SerializedShuffleHandle<Object, Object>(0, 1, shuffleDep),
new SerializedShuffleHandle<>(0, 1, shuffleDep),
0, // map id
taskContext,
conf
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,10 +82,10 @@ public static Tuple3<String, String, String> extractKey(String line) {
String user = m.group(3);
String query = m.group(5);
if (!user.equalsIgnoreCase("-")) {
return new Tuple3<String, String, String>(ip, user, query);
return new Tuple3<>(ip, user, query);
}
}
return new Tuple3<String, String, String>(null, null, null);
return new Tuple3<>(null, null, null);
}

public static Stats extractStats(String line) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,13 @@ public static void main(String[] args) {
JavaSparkContext sc = new JavaSparkContext(conf);
// $example on$
List<Tuple2<double[], double[]>> data = Arrays.asList(
new Tuple2<double[], double[]>(new double[]{0.0, 1.0}, new double[]{0.0, 2.0}),
new Tuple2<double[], double[]>(new double[]{0.0, 2.0}, new double[]{0.0, 1.0}),
new Tuple2<double[], double[]>(new double[]{}, new double[]{0.0}),
new Tuple2<double[], double[]>(new double[]{2.0}, new double[]{2.0}),
new Tuple2<double[], double[]>(new double[]{2.0, 0.0}, new double[]{2.0, 0.0}),
new Tuple2<double[], double[]>(new double[]{0.0, 1.0, 2.0}, new double[]{0.0, 1.0}),
new Tuple2<double[], double[]>(new double[]{1.0}, new double[]{1.0, 2.0})
new Tuple2<>(new double[]{0.0, 1.0}, new double[]{0.0, 2.0}),
new Tuple2<>(new double[]{0.0, 2.0}, new double[]{0.0, 1.0}),
new Tuple2<>(new double[]{}, new double[]{0.0}),
new Tuple2<>(new double[]{2.0}, new double[]{2.0}),
new Tuple2<>(new double[]{2.0, 0.0}, new double[]{2.0, 0.0}),
new Tuple2<>(new double[]{0.0, 1.0, 2.0}, new double[]{0.0, 1.0}),
new Tuple2<>(new double[]{1.0}, new double[]{1.0, 2.0})
);
JavaRDD<Tuple2<double[], double[]>> scoreAndLabels = sc.parallelize(data);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,11 @@ public static void main(String[] args) {
@SuppressWarnings("unchecked")
// $example on$
JavaRDD<Tuple3<Long, Long, Double>> similarities = sc.parallelize(Lists.newArrayList(
new Tuple3<Long, Long, Double>(0L, 1L, 0.9),
new Tuple3<Long, Long, Double>(1L, 2L, 0.9),
new Tuple3<Long, Long, Double>(2L, 3L, 0.9),
new Tuple3<Long, Long, Double>(3L, 4L, 0.1),
new Tuple3<Long, Long, Double>(4L, 5L, 0.9)));
new Tuple3<>(0L, 1L, 0.9),
new Tuple3<>(1L, 2L, 0.9),
new Tuple3<>(2L, 3L, 0.9),
new Tuple3<>(3L, 4L, 0.1),
new Tuple3<>(4L, 5L, 0.9)));

PowerIterationClustering pic = new PowerIterationClustering()
.setK(2)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ public static void main(String[] args) {
JavaSparkContext jsc = new JavaSparkContext(conf);

// $example on$
List<Tuple2<Integer, Character>> list = new ArrayList<Tuple2<Integer, Character>>(
List<Tuple2<Integer, Character>> list = new ArrayList<>(
Arrays.<Tuple2<Integer, Character>>asList(
new Tuple2(1, 'a'),
new Tuple2(1, 'b'),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.examples.streaming.StreamingExamples;
import org.apache.spark.streaming.*;
import org.apache.spark.streaming.api.java.*;
import org.apache.spark.streaming.flume.FlumeUtils;
Expand Down Expand Up @@ -58,7 +57,8 @@ public static void main(String[] args) {
Duration batchInterval = new Duration(2000);
SparkConf sparkConf = new SparkConf().setAppName("JavaFlumeEventCount");
JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, batchInterval);
JavaReceiverInputDStream<SparkFlumeEvent> flumeStream = FlumeUtils.createStream(ssc, host, port);
JavaReceiverInputDStream<SparkFlumeEvent> flumeStream =
FlumeUtils.createStream(ssc, host, port);

flumeStream.count();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,11 @@ public class JavaFlumeStreamSuite extends LocalJavaStreamingContext {
@Test
public void testFlumeStream() {
// tests the API, does not actually test data receiving
JavaReceiverInputDStream<SparkFlumeEvent> test1 = FlumeUtils.createStream(ssc, "localhost", 12345);
JavaReceiverInputDStream<SparkFlumeEvent> test2 = FlumeUtils.createStream(ssc, "localhost", 12345,
StorageLevel.MEMORY_AND_DISK_SER_2());
JavaReceiverInputDStream<SparkFlumeEvent> test3 = FlumeUtils.createStream(ssc, "localhost", 12345,
StorageLevel.MEMORY_AND_DISK_SER_2(), false);
JavaReceiverInputDStream<SparkFlumeEvent> test1 = FlumeUtils.createStream(ssc, "localhost",
12345);
JavaReceiverInputDStream<SparkFlumeEvent> test2 = FlumeUtils.createStream(ssc, "localhost",
12345, StorageLevel.MEMORY_AND_DISK_SER_2());
JavaReceiverInputDStream<SparkFlumeEvent> test3 = FlumeUtils.createStream(ssc, "localhost",
12345, StorageLevel.MEMORY_AND_DISK_SER_2(), false);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class CommandBuilderUtils {
/** The set of known JVM vendors. */
enum JavaVendor {
Oracle, IBM, OpenJDK, Unknown
};
}

/** Returns whether the given string is null or empty. */
static boolean isEmpty(String s) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -477,6 +477,6 @@ protected void handleExtraArgs(List<String> extra) {
// No op.
}

};
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ private static class TestClient extends LauncherConnection {

TestClient(Socket s) throws IOException {
super(s);
this.inbound = new LinkedBlockingQueue<Message>();
this.inbound = new LinkedBlockingQueue<>();
this.clientThread = new Thread(this);
clientThread.setName("TestClient");
clientThread.setDaemon(true);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ public void testExamplesRunner() throws Exception {
"SparkPi",
"42");

Map<String, String> env = new HashMap<String, String>();
Map<String, String> env = new HashMap<>();
List<String> cmd = buildCommand(sparkSubmitArgs, env);
assertEquals("foo", findArgValue(cmd, parser.MASTER));
assertEquals("bar", findArgValue(cmd, parser.DEPLOY_MODE));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ final class DecisionTreeClassificationModel private[ml] (
@Since("2.0.0")
lazy val featureImportances: Vector = TreeEnsembleModel.featureImportances(this, numFeatures)

/** Convert to spark.mllib DecisionTreeModel (losing some infomation) */
/** Convert to spark.mllib DecisionTreeModel (losing some information) */
override private[spark] def toOld: OldDecisionTreeModel = {
new OldDecisionTreeModel(rootNode.toOld(1), OldAlgo.Classification)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ private[shared] object SharedParamsCodeGen {
"every 10 iterations", isValid = "(interval: Int) => interval == -1 || interval >= 1"),
ParamDesc[Boolean]("fitIntercept", "whether to fit an intercept term", Some("true")),
ParamDesc[String]("handleInvalid", "how to handle invalid entries. Options are skip (which " +
"will filter out rows with bad values), or error (which will throw an errror). More " +
"will filter out rows with bad values), or error (which will throw an error). More " +
"options may be added later",
isValid = "ParamValidators.inArray(Array(\"skip\", \"error\"))"),
ParamDesc[Boolean]("standardization", "whether to standardize the training features" +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -270,10 +270,10 @@ private[ml] trait HasFitIntercept extends Params {
private[ml] trait HasHandleInvalid extends Params {

/**
* Param for how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an errror). More options may be added later.
* Param for how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an error). More options may be added later.
* @group param
*/
final val handleInvalid: Param[String] = new Param[String](this, "handleInvalid", "how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an errror). More options may be added later", ParamValidators.inArray(Array("skip", "error")))
final val handleInvalid: Param[String] = new Param[String](this, "handleInvalid", "how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an error). More options may be added later", ParamValidators.inArray(Array("skip", "error")))

/** @group getParam */
final def getHandleInvalid: String = $(handleInvalid)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ final class DecisionTreeRegressionModel private[ml] (
@Since("2.0.0")
lazy val featureImportances: Vector = TreeEnsembleModel.featureImportances(this, numFeatures)

/** Convert to spark.mllib DecisionTreeModel (losing some infomation) */
/** Convert to spark.mllib DecisionTreeModel (losing some information) */
override private[spark] def toOld: OldDecisionTreeModel = {
new OldDecisionTreeModel(rootNode.toOld(1), OldAlgo.Regression)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ private[spark] trait DecisionTreeModel {
*/
private[ml] def maxSplitFeatureIndex(): Int = rootNode.maxSplitFeatureIndex()

/** Convert to spark.mllib DecisionTreeModel (losing some infomation) */
/** Convert to spark.mllib DecisionTreeModel (losing some information) */
private[spark] def toOld: OldDecisionTreeModel
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -418,7 +418,7 @@ class LogisticRegressionWithLBFGS

private def run(input: RDD[LabeledPoint], initialWeights: Vector, userSuppliedWeights: Boolean):
LogisticRegressionModel = {
// ml's Logisitic regression only supports binary classifcation currently.
// ml's Logistic regression only supports binary classification currently.
if (numOfLinearPredictor == 1) {
def runWithMlLogisitcRegression(elasticNetParam: Double) = {
// Prepare the ml LogisticRegression based on our settings
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ private void init() {
myDoubleParam_ = new DoubleParam(this, "myDoubleParam", "this is a double param",
ParamValidators.inRange(0.0, 1.0));
List<String> validStrings = Arrays.asList("a", "b");
myStringParam_ = new Param<String>(this, "myStringParam", "this is a string param",
myStringParam_ = new Param<>(this, "myStringParam", "this is a string param",
ParamValidators.inArray(validStrings));
myDoubleArrayParam_ =
new DoubleArrayParam(this, "myDoubleArrayParam", "this is a double param");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ public void javaAPI() {
JavaDStream<LabeledPoint> training =
attachTestInputStream(ssc, Arrays.asList(trainingBatch, trainingBatch), 2);
List<Tuple2<Integer, Vector>> testBatch = Arrays.asList(
new Tuple2<Integer, Vector>(10, Vectors.dense(1.0)),
new Tuple2<Integer, Vector>(11, Vectors.dense(0.0)));
new Tuple2<>(10, Vectors.dense(1.0)),
new Tuple2<>(11, Vectors.dense(0.0)));
JavaPairDStream<Integer, Vector> test = JavaPairDStream.fromJavaDStream(
attachTestInputStream(ssc, Arrays.asList(testBatch, testBatch), 2));
StreamingLogisticRegressionWithSGD slr = new StreamingLogisticRegressionWithSGD()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ public void javaAPI() {
JavaDStream<Vector> training =
attachTestInputStream(ssc, Arrays.asList(trainingBatch, trainingBatch), 2);
List<Tuple2<Integer, Vector>> testBatch = Arrays.asList(
new Tuple2<Integer, Vector>(10, Vectors.dense(1.0)),
new Tuple2<Integer, Vector>(11, Vectors.dense(0.0)));
new Tuple2<>(10, Vectors.dense(1.0)),
new Tuple2<>(11, Vectors.dense(0.0)));
JavaPairDStream<Integer, Vector> test = JavaPairDStream.fromJavaDStream(
attachTestInputStream(ssc, Arrays.asList(testBatch, testBatch), 2));
StreamingKMeans skmeans = new StreamingKMeans()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ public void denseArrayConstruction() {
public void sparseArrayConstruction() {
@SuppressWarnings("unchecked")
Vector v = Vectors.sparse(3, Arrays.asList(
new Tuple2<Integer, Double>(0, 2.0),
new Tuple2<Integer, Double>(2, 3.0)));
new Tuple2<>(0, 2.0),
new Tuple2<>(2, 3.0)));
assertArrayEquals(new double[]{2.0, 0.0, 3.0}, v.toArray(), 0.0);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,8 @@ public void javaAPI() {
JavaDStream<LabeledPoint> training =
attachTestInputStream(ssc, Arrays.asList(trainingBatch, trainingBatch), 2);
List<Tuple2<Integer, Vector>> testBatch = Arrays.asList(
new Tuple2<Integer, Vector>(10, Vectors.dense(1.0)),
new Tuple2<Integer, Vector>(11, Vectors.dense(0.0)));
new Tuple2<>(10, Vectors.dense(1.0)),
new Tuple2<>(11, Vectors.dense(0.0)));
JavaPairDStream<Integer, Vector> test = JavaPairDStream.fromJavaDStream(
attachTestInputStream(ssc, Arrays.asList(testBatch, testBatch), 2));
StreamingLinearRegressionWithSGD slr = new StreamingLinearRegressionWithSGD()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -759,7 +759,7 @@ class LinearRegressionSuite
.sliding(2)
.forall(x => x(0) >= x(1)))
} else {
// To clalify that the normal solver is used here.
// To clarify that the normal solver is used here.
assert(model.summary.objectiveHistory.length == 1)
assert(model.summary.objectiveHistory(0) == 0.0)
val devianceResidualsR = Array(-0.47082, 0.34635)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ public UnsafeRow next() {
Platform.throwException(e);
}
throw new RuntimeException("Exception should have been re-thrown in next()");
};
}
};
} catch (IOException e) {
cleanupResources();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ private[spark] trait CatalystConf {
def groupByOrdinal: Boolean

/**
* Returns the [[Resolver]] for the current configuration, which can be used to determin if two
* Returns the [[Resolver]] for the current configuration, which can be used to determine if two
* identifiers are equal.
*/
def resolver: Resolver = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,8 +153,8 @@ abstract class Expression extends TreeNode[Expression] {
* evaluate to the same result.
*/
lazy val canonicalized: Expression = {
val canonicalizedChildred = children.map(_.canonicalized)
Canonicalize.execute(withNewChildren(canonicalizedChildred))
val canonicalizedChildren = children.map(_.canonicalized)
Canonicalize.execute(withNewChildren(canonicalizedChildren))
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -509,7 +509,7 @@ class CodegenContext {

/**
* Checks and sets up the state and codegen for subexpression elimination. This finds the
* common subexpresses, generates the functions that evaluate those expressions and populates
* common subexpressions, generates the functions that evaluate those expressions and populates
* the mapping of common subexpressions to the generated functions.
*/
private def subexpressionElimination(expressions: Seq[Expression]) = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ object CaseWhen {
}

/**
* A factory method to faciliate the creation of this expression when used in parsers.
* A factory method to facilitate the creation of this expression when used in parsers.
* @param branches Expressions at even position are the branch conditions, and expressions at odd
* position are branch values.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -965,7 +965,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging {

/**
* Create a binary arithmetic expression. The following arithmetic operators are supported:
* - Mulitplication: '*'
* - Multiplication: '*'
* - Division: '/'
* - Hive Long Division: 'DIV'
* - Modulo: '%'
Expand Down Expand Up @@ -1270,7 +1270,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging {
}

/**
* Create a double literal for a number denoted in scientifc notation.
* Create a double literal for a number denoted in scientific notation.
*/
override def visitScientificDecimalLiteral(
ctx: ScientificDecimalLiteralContext): Literal = withOrigin(ctx) {
Expand Down
Loading