Skip to content

Commit 018ea22

Browse files
authored
Remove AbstractComponent from AbstractLifecycleComponent (#35560)
AbstractLifecycleComponent now no longer extends AbstractComponent. In order to accomplish this, many, many classes now instantiate their own logger.
1 parent ce5b0f4 commit 018ea22

File tree

42 files changed

+133
-33
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+133
-33
lines changed

modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919

2020
package org.elasticsearch.repositories.url;
2121

22+
import org.apache.logging.log4j.LogManager;
23+
import org.apache.logging.log4j.Logger;
2224
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
2325
import org.elasticsearch.common.blobstore.BlobContainer;
2426
import org.elasticsearch.common.blobstore.BlobPath;
@@ -50,6 +52,7 @@
5052
* </dl>
5153
*/
5254
public class URLRepository extends BlobStoreRepository {
55+
private static final Logger logger = LogManager.getLogger(URLRepository.class);
5356

5457
public static final String TYPE = "url";
5558

modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,8 @@
4242
import io.netty.handler.codec.http.HttpResponseEncoder;
4343
import io.netty.handler.timeout.ReadTimeoutException;
4444
import io.netty.handler.timeout.ReadTimeoutHandler;
45-
import org.apache.logging.log4j.Logger;
4645
import org.apache.logging.log4j.LogManager;
46+
import org.apache.logging.log4j.Logger;
4747
import org.apache.logging.log4j.message.ParameterizedMessage;
4848
import org.apache.logging.log4j.util.Supplier;
4949
import org.elasticsearch.ExceptionsHelper;

modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@
3636
import io.netty.channel.socket.nio.NioSocketChannel;
3737
import io.netty.util.AttributeKey;
3838
import io.netty.util.concurrent.Future;
39+
import org.apache.logging.log4j.LogManager;
40+
import org.apache.logging.log4j.Logger;
3941
import org.apache.logging.log4j.message.ParameterizedMessage;
4042
import org.elasticsearch.ExceptionsHelper;
4143
import org.elasticsearch.Version;
@@ -73,6 +75,7 @@
7375
* sending out ping requests to other nodes.
7476
*/
7577
public class Netty4Transport extends TcpTransport {
78+
private static final Logger logger = LogManager.getLogger(Netty4Transport.class);
7679

7780
static {
7881
Netty4Utils.setup();

modules/tribe/src/main/java/org/elasticsearch/tribe/TribeService.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
package org.elasticsearch.tribe;
2121

2222
import org.apache.logging.log4j.LogManager;
23+
import org.apache.logging.log4j.Logger;
2324
import org.apache.logging.log4j.message.ParameterizedMessage;
2425
import org.apache.logging.log4j.util.Supplier;
2526
import org.apache.lucene.util.BytesRef;
@@ -94,6 +95,7 @@
9495
* to propagate to the relevant cluster.
9596
*/
9697
public class TribeService extends AbstractLifecycleComponent {
98+
private static final Logger logger = LogManager.getLogger(TribeService.class);
9799

98100
public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false,
99101
false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.METADATA_READ, ClusterBlockLevel.METADATA_WRITE));

plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,8 @@
3333
import com.microsoft.windowsazure.management.compute.ComputeManagementService;
3434
import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse;
3535
import com.microsoft.windowsazure.management.configuration.ManagementConfiguration;
36+
import org.apache.logging.log4j.LogManager;
37+
import org.apache.logging.log4j.Logger;
3638
import org.elasticsearch.ElasticsearchException;
3739
import org.elasticsearch.SpecialPermission;
3840
import org.elasticsearch.cloud.azure.classic.AzureServiceRemoteException;
@@ -43,6 +45,8 @@
4345

4446
public class AzureComputeServiceImpl extends AbstractLifecycleComponent
4547
implements AzureComputeService {
48+
private static final Logger logger = LogManager.getLogger(AzureComputeServiceImpl.class);
49+
4650

4751
private final ComputeManagementClient client;
4852
private final String serviceName;

plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,12 +30,15 @@
3030
import com.google.api.client.http.HttpHeaders;
3131
import com.google.api.client.http.HttpResponse;
3232
import com.google.api.client.http.HttpTransport;
33+
import org.apache.logging.log4j.LogManager;
34+
import org.apache.logging.log4j.Logger;
3335
import org.elasticsearch.cloud.gce.util.Access;
3436
import org.elasticsearch.common.component.AbstractLifecycleComponent;
3537
import org.elasticsearch.common.settings.Setting;
3638
import org.elasticsearch.common.settings.Settings;
3739

3840
public class GceMetadataService extends AbstractLifecycleComponent {
41+
private static final Logger logger = LogManager.getLogger(GceMetadataService.class);
3942

4043
// Forcing Google Token API URL as set in GCE SDK to
4144
// http://metadata/computeMetadata/v1/instance/service-accounts/default/token

plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@
2525
import org.elasticsearch.cloud.azure.blobstore.AzureBlobStore;
2626
import org.elasticsearch.cloud.azure.storage.AzureStorageService;
2727

28+
import org.apache.logging.log4j.LogManager;
29+
import org.apache.logging.log4j.Logger;
2830
import org.elasticsearch.cluster.metadata.MetaData;
2931
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
3032
import org.elasticsearch.common.Strings;
@@ -60,6 +62,7 @@
6062
* </dl>
6163
*/
6264
public class AzureRepository extends BlobStoreRepository {
65+
private static final Logger logger = LogManager.getLogger(AzureRepository.class);
6366

6467
public static final String TYPE = "azure";
6568

plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@
1919

2020
package org.elasticsearch.repositories.gcs;
2121

22-
import org.apache.logging.log4j.Logger;
2322
import org.apache.logging.log4j.LogManager;
23+
import org.apache.logging.log4j.Logger;
2424
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
2525
import org.elasticsearch.common.Strings;
2626
import org.elasticsearch.common.blobstore.BlobPath;
@@ -45,8 +45,7 @@
4545
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
4646

4747
class GoogleCloudStorageRepository extends BlobStoreRepository {
48-
49-
private final Logger logger = LogManager.getLogger(GoogleCloudStorageRepository.class);
48+
private static final Logger logger = LogManager.getLogger(GoogleCloudStorageRepository.class);
5049
private final DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
5150

5251
// package private for testing

plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@
5252

5353
public final class HdfsRepository extends BlobStoreRepository {
5454

55-
private static final Logger LOGGER = LogManager.getLogger(HdfsRepository.class);
55+
private static final Logger logger = LogManager.getLogger(HdfsRepository.class);
5656

5757
private static final String CONF_SECURITY_PRINCIPAL = "security.principal";
5858

@@ -104,7 +104,7 @@ private HdfsBlobStore createBlobstore(URI uri, String path, Settings repositoryS
104104

105105
final Settings confSettings = repositorySettings.getByPrefix("conf.");
106106
for (String key : confSettings.keySet()) {
107-
LOGGER.debug("Adding configuration to HDFS Client Configuration : {} = {}", key, confSettings.get(key));
107+
logger.debug("Adding configuration to HDFS Client Configuration : {} = {}", key, confSettings.get(key));
108108
hadoopConfiguration.set(key, confSettings.get(key));
109109
}
110110

@@ -158,7 +158,7 @@ private UserGroupInformation login(Configuration hadoopConfiguration, Settings r
158158

159159
// Check to see if the authentication method is compatible
160160
if (kerberosPrincipal != null && authMethod.equals(AuthenticationMethod.SIMPLE)) {
161-
LOGGER.warn("Hadoop authentication method is set to [SIMPLE], but a Kerberos principal is " +
161+
logger.warn("Hadoop authentication method is set to [SIMPLE], but a Kerberos principal is " +
162162
"specified. Continuing with [KERBEROS] authentication.");
163163
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, hadoopConfiguration);
164164
} else if (kerberosPrincipal == null && authMethod.equals(AuthenticationMethod.KERBEROS)) {
@@ -171,15 +171,15 @@ private UserGroupInformation login(Configuration hadoopConfiguration, Settings r
171171
UserGroupInformation.setConfiguration(hadoopConfiguration);
172172

173173
// Debugging
174-
LOGGER.debug("Hadoop security enabled: [{}]", UserGroupInformation.isSecurityEnabled());
175-
LOGGER.debug("Using Hadoop authentication method: [{}]", SecurityUtil.getAuthenticationMethod(hadoopConfiguration));
174+
logger.debug("Hadoop security enabled: [{}]", UserGroupInformation.isSecurityEnabled());
175+
logger.debug("Using Hadoop authentication method: [{}]", SecurityUtil.getAuthenticationMethod(hadoopConfiguration));
176176

177177
// UserGroupInformation (UGI) instance is just a Hadoop specific wrapper around a Java Subject
178178
try {
179179
if (UserGroupInformation.isSecurityEnabled()) {
180180
String principal = preparePrincipal(kerberosPrincipal);
181181
String keytab = HdfsSecurityContext.locateKeytabFile(environment).toString();
182-
LOGGER.debug("Using kerberos principal [{}] and keytab located at [{}]", principal, keytab);
182+
logger.debug("Using kerberos principal [{}] and keytab located at [{}]", principal, keytab);
183183
return UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab);
184184
}
185185
return UserGroupInformation.getCurrentUser();
@@ -200,7 +200,7 @@ private static String preparePrincipal(String originalPrincipal) {
200200
}
201201

202202
if (originalPrincipal.equals(finalPrincipal) == false) {
203-
LOGGER.debug("Found service principal. Converted original principal name [{}] to server principal [{}]",
203+
logger.debug("Found service principal. Converted original principal name [{}] to server principal [{}]",
204204
originalPrincipal, finalPrincipal);
205205
}
206206
}

server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
*/
1919
package org.elasticsearch.cluster;
2020

21+
import org.apache.logging.log4j.LogManager;
22+
import org.apache.logging.log4j.Logger;
2123
import org.apache.logging.log4j.message.ParameterizedMessage;
2224
import org.elasticsearch.cluster.node.DiscoveryNode;
2325
import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -54,6 +56,7 @@
5456
* is done by {@link MasterFaultDetection}.
5557
*/
5658
public class NodeConnectionsService extends AbstractLifecycleComponent {
59+
private static final Logger logger = LogManager.getLogger(NodeConnectionsService.class);
5760

5861
public static final Setting<TimeValue> CLUSTER_NODE_RECONNECT_INTERVAL_SETTING =
5962
positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), Property.NodeScope);

0 commit comments

Comments
 (0)