Skip to content

Commit

Permalink
Fix various typos
Browse files Browse the repository at this point in the history
This closes apache#49

(Fonso Li via Mike Percy)
  • Loading branch information
lfzCarlosC authored and mpercy committed Jul 29, 2016
1 parent 988ede9 commit dff1505
Show file tree
Hide file tree
Showing 19 changed files with 22 additions and 22 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG
Original file line number Diff line number Diff line change
Expand Up @@ -720,7 +720,7 @@ Release Notes - Flume - Version v1.2.0
* [FLUME-1017] - syslog classes missing
* [FLUME-1026] - Document Thread Safety Guarantees
* [FLUME-1027] - Missing log4j library in Flume distribution
* [FLUME-1031] - Depricate code generated by Thrift and Avro OG sources that is under com.cloudera package
* [FLUME-1031] - Deprecate code generated by Thrift and Avro OG sources that is under com.cloudera package
* [FLUME-1035] - slf4j error in flume sdk unit tests
* [FLUME-1036] - Reconfiguration of AVRO or NETCAT source causes port bind exception
* [FLUME-1037] - NETCAT handler theads terminate under stress test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -514,7 +514,7 @@ public void serializeAndWrite() throws Exception {
+ values.size()) * 8) //Event pointers
+ 16; //Checksum
//There is no real need of filling the channel with 0s, since we
//will write the exact nummber of bytes as expected file size.
//will write the exact number of bytes as expected file size.
file.setLength(expectedFileSize);
Preconditions.checkState(file.length() == expectedFileSize,
"Expected File size of inflight events file does not match the "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1141,7 +1141,7 @@ private void removeOldLogs(SortedSet<Integer> fileIDs) {
* <p> Locking is not supported by all file systems.
* E.g., NFS does not consistently support exclusive locks.
* <p>
* <p> If locking is supported we guarantee exculsive access to the
* <p> If locking is supported we guarantee exclusive access to the
* storage directory. Otherwise, no guarantee is given.
*
* @throws IOException if locking fails
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@

/**
*
* Appends Log4j Events to an external Flume client which is decribed by
* Appends Log4j Events to an external Flume client which is described by
* the Log4j configuration file. The appender takes two required parameters:
*<p>
*<strong>Hostname</strong> : This is the hostname of the first hop
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ public void setSinks(Set<String> sinks) {

public enum SinkProcessorConfigurationType {
/**
* Load balacing channel selector
* Load balancing channel selector
*/
LOAD_BALANCE("org.apache.flume.conf.sink.LoadBalancingSinkProcessorConfiguration"),
/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ public enum SourceConfigurationType {
EXEC("org.apache.flume.conf.source.ExecSourceConfiguration"),

/**
* Avro soruce.
* Avro source.
*
* @see AvroSource
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ public interface Configurable {
* reflected by the component asap.
* </p>
* <p>
* There are no thread safety guarrantees on when configure might be called.
* There are no thread safety guarantees on when configure might be called.
* </p>
* @param context
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -433,10 +433,10 @@ public static String escapeString(String in, Map<String, String> headers,
// The replacement string must have '$' and '\' chars escaped. This
// replacement string is pretty arcane.
//
// replacee : '$' -> for java '\$' -> for regex "\\$"
// replace : '$' -> for java '\$' -> for regex "\\$"
// replacement: '\$' -> for regex '\\\$' -> for java "\\\\\\$"
//
// replacee : '\' -> for java "\\" -> for regex "\\\\"
// replace : '\' -> for java "\\" -> for regex "\\\\"
// replacement: '\\' -> for regex "\\\\" -> for java "\\\\\\\\"

// note: order matters
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ public class ResettableFileInputStream extends ResettableInputStream
private boolean hasLowSurrogate = false;

/**
* A low surrrgate character read from a surrogate pair.
* A low surrogate character read from a surrogate pair.
* When a surrogate pair is found, the high (first) surrogate pair
* is returned upon a call to {@link #read()},
* while the low (second) surrogate remains stored in memory,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
*
* The failover mechanism works by relegating failed sinks to a pool
* where they are assigned a cooldown period, increasing with sequential
* failures before they are retried. Once a sink succesfully sends an
* failures before they are retried. Once a sink successfully sends an
* event it is restored to the live pool.
*
* FailoverSinkProcessor is in no way thread safe and expects to be run via
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ public void testShutdown() throws Exception {
}

// yes in the mean time someone could use our sleep time
// but this should be a fairly rare scenerio
// but this should be a fairly rare scenario

String command = "sleep " + seconds;
Pattern pattern = Pattern.compile("\b" + command + "\b");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ public void testMaxSuccessfulEvents() throws InterruptedException,
source.process();
}

// 1 failed call, 50 succesful
// 1 failed call, 50 successful
doThrow(new ChannelException("stub")).when(
mockProcessor).processEvent(getEvent(source));
source.process();
Expand Down
4 changes: 2 additions & 2 deletions flume-ng-doc/sphinx/FlumeUserGuide.rst
Original file line number Diff line number Diff line change
Expand Up @@ -900,7 +900,7 @@ Property Name Default Description
**channels** --
**type** -- The component type name, needs to be ``jms``
**initialContextFactory** -- Inital Context Factory, e.g: org.apache.activemq.jndi.ActiveMQInitialContextFactory
**connectionFactory** -- The JNDI name the connection factory shoulld appear as
**connectionFactory** -- The JNDI name the connection factory should appear as
**providerURL** -- The JMS provider URL
**destinationName** -- Destination name
**destinationType** -- Destination type (queue or topic)
Expand Down Expand Up @@ -1176,7 +1176,7 @@ Property Name Default Description
**consumerKey** -- OAuth consumer key
**consumerSecret** -- OAuth consumer secret
**accessToken** -- OAuth access token
**accessTokenSecret** -- OAuth toekn secret
**accessTokenSecret** -- OAuth token secret
maxBatchSize 1000 Maximum number of twitter messages to put in a single batch
maxBatchDurationMillis 1000 Maximum number of milliseconds to wait before closing a batch
====================== =========== ===================================================
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@
* belonging to it. These cannot be shared by multiple groups.
* Further, one can set a processor and behavioral parameters to determine
* how sink selection is made via <tt>&lt;agent name&gt;.sinkgroups.&lt;
* group name&lt.processor</tt>. For further detail refer to inividual processor
* group name&lt.processor</tt>. For further detail refer to individual processor
* documentation</li>
* <li>Sinks not assigned to a group will be assigned to default single sink
* groups.</li>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -617,7 +617,7 @@ public void testRoundRobinBackoffIncreasingBackoffs() throws Exception {

}
Assert.assertEquals(0, hosts.get(1).getAppendCount());
// after this s2 should be receiving events agains
// after this s2 should be receiving events again
Thread.sleep(2500);
int numEvents = 60;
for (int i = 0; i < numEvents; i++) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -391,7 +391,7 @@ void write(Event event) throws EventDeliveryException {
*/
@VisibleForTesting
void createWriter() throws EventDeliveryException {
// reset the commited flag whenver a new writer is created
// reset the commited flag whenever a new writer is created
committedBatch = false;
try {
View<GenericRecord> view;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ public class ElasticSearchClientFactory {
* @param clientType
* String representation of client type
* @param hostNames
* Array of strings that represents hosntames with ports (hostname:port)
* Array of strings that represents hostnames with ports (hostname:port)
* @param clusterName
* Elasticsearch cluster name used only by Transport Client
* @param serializer
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,9 @@

/**
* A simple sink which reads events from a channel and writes them to HBase.
* This Sink uses an aysnchronous API internally and is likely to
* This Sink uses an asynchronous API internally and is likely to
* perform better.
* The Hbase configution is picked up from the first <tt>hbase-site.xml</tt>
* The Hbase configuration is picked up from the first <tt>hbase-site.xml</tt>
* encountered in the classpath. This sink supports batch reading of
* events from the channel, and writing them to Hbase, to minimize the number
* of flushes on the hbase tables. To use this sink, it has to be configured
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@

/**
* Demo Flume source that connects via Streaming API to the 1% sample twitter
* firehose, continously downloads tweets, converts them to Avro format and
* firehose, continuously downloads tweets, converts them to Avro format and
* sends Avro events to a downstream Flume sink.
*
* Requires the consumer and access tokens and secrets of a Twitter developer
Expand Down

0 comments on commit dff1505

Please sign in to comment.