From 704123ed65a9b1cd6b1ea2d54c732dff634c6bf2 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Tue, 16 Jul 2024 13:26:34 -0700 Subject: [PATCH 01/53] Upgrade Truth to 1.4.4 --- gradle/libs.versions.toml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 44f3404fb17..78550e9c95e 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -93,9 +93,7 @@ signature-android = "net.sf.androidscents.signature:android-api-level-19:4.4.2_r signature-java = "org.codehaus.mojo.signature:java18:1.0" tomcat-embed-core = "org.apache.tomcat.embed:tomcat-embed-core:10.1.25" tomcat-embed-core9 = "org.apache.tomcat.embed:tomcat-embed-core:9.0.89" -# 1.4.3+ causes "unknown enum constant ElementType.MODULE" warning. -# https://github.com/google/truth/issues/1320 -truth = "com.google.truth:truth:1.4.2" +truth = "com.google.truth:truth:1.4.4" undertow-servlet22 = "io.undertow:undertow-servlet:2.2.32.Final" undertow-servlet = "io.undertow:undertow-servlet:2.3.14.Final" From 0aa976c4eb92464ff177d6679f2349bf52e874c1 Mon Sep 17 00:00:00 2001 From: Kannan J Date: Fri, 19 Jul 2024 19:27:13 +0530 Subject: [PATCH 02/53] V1reflectionservice (#11237) V1 version of the proto reflection service, as the v1.alpha service has been deprecated. * Create V1 alpha service wrapping underlying V1 service, by modifying the ServerServiceDefinition. * Create ProtoReflectionService for the v1alpha proto by producing a ServerServiceDefinition constructed from that of the v1 service but with the service and method names and proto descriptors modified. Issue #6724. --- .../testing/integration/XdsTestClient.java | 2 + .../testing/integration/XdsTestServer.java | 3 + services/BUILD.bazel | 10 + .../reflection/v1/ServerReflectionGrpc.java | 285 ++++++++ .../services/ProtoReflectionService.java | 542 ++------------ .../services/ProtoReflectionServiceV1.java | 539 ++++++++++++++ .../proto/grpc/reflection/v1/reflection.proto | 147 ++++ .../ProtoReflectionServiceV1Test.java | 670 ++++++++++++++++++ 8 files changed, 1700 insertions(+), 498 deletions(-) create mode 100644 services/src/generated/main/grpc/io/grpc/reflection/v1/ServerReflectionGrpc.java create mode 100644 services/src/main/java/io/grpc/protobuf/services/ProtoReflectionServiceV1.java create mode 100644 services/src/main/proto/grpc/reflection/v1/reflection.proto create mode 100644 services/src/test/java/io/grpc/protobuf/services/ProtoReflectionServiceV1Test.java diff --git a/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestClient.java b/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestClient.java index 615fe381f7e..c697bd9f305 100644 --- a/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestClient.java +++ b/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestClient.java @@ -44,6 +44,7 @@ import io.grpc.Status; import io.grpc.gcp.csm.observability.CsmObservability; import io.grpc.protobuf.services.ProtoReflectionService; +import io.grpc.protobuf.services.ProtoReflectionServiceV1; import io.grpc.services.AdminInterface; import io.grpc.stub.StreamObserver; import io.grpc.testing.integration.Messages.ClientConfigureRequest; @@ -277,6 +278,7 @@ private void run() { .addService(new XdsStatsImpl()) .addService(new ConfigureUpdateServiceImpl()) .addService(ProtoReflectionService.newInstance()) + .addService(ProtoReflectionServiceV1.newInstance()) .addServices(AdminInterface.getStandardServices()) .build(); try { diff --git a/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestServer.java b/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestServer.java index 6a42dd62bb9..8c61f2eb2ad 100644 --- a/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestServer.java +++ b/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestServer.java @@ -37,6 +37,7 @@ import io.grpc.netty.NettyServerBuilder; import io.grpc.protobuf.services.HealthStatusManager; import io.grpc.protobuf.services.ProtoReflectionService; +import io.grpc.protobuf.services.ProtoReflectionServiceV1; import io.grpc.services.AdminInterface; import io.grpc.stub.StreamObserver; import io.grpc.testing.integration.Messages.Payload; @@ -220,6 +221,7 @@ void start() throws Exception { .addService(new XdsUpdateHealthServiceImpl(health)) .addService(health.getHealthService()) .addService(ProtoReflectionService.newInstance()) + .addService(ProtoReflectionServiceV1.newInstance()) .addServices(AdminInterface.getStandardServices()) .build(); maintenanceServer.start(); @@ -268,6 +270,7 @@ void start() throws Exception { .addService(new XdsUpdateHealthServiceImpl(health)) .addService(health.getHealthService()) .addService(ProtoReflectionService.newInstance()) + .addService(ProtoReflectionServiceV1.newInstance()) .addServices(AdminInterface.getStandardServices()) .build(); server.start(); diff --git a/services/BUILD.bazel b/services/BUILD.bazel index d0194de9b42..d20e956ed49 100644 --- a/services/BUILD.bazel +++ b/services/BUILD.bazel @@ -108,14 +108,17 @@ java_library( name = "reflection", srcs = [ "src/main/java/io/grpc/protobuf/services/ProtoReflectionService.java", + "src/main/java/io/grpc/protobuf/services/ProtoReflectionServiceV1.java", ], deps = [ ":_reflection_java_grpc", + ":_reflection_v1_java_grpc", "//api", "//protobuf", "//stub", "@com_google_protobuf//:protobuf_java", "@com_google_protobuf//:protobuf_java_util", + "@io_grpc_grpc_proto//:reflection_java_proto", "@io_grpc_grpc_proto//:reflection_java_proto_deprecated", artifact("com.google.code.findbugs:jsr305"), artifact("com.google.guava:guava"), @@ -171,6 +174,13 @@ java_grpc_library( deps = ["@io_grpc_grpc_proto//:reflection_java_proto_deprecated"], ) +java_grpc_library( + name = "_reflection_v1_java_grpc", + srcs = ["@io_grpc_grpc_proto//:reflection_proto"], + visibility = ["//visibility:private"], + deps = ["@io_grpc_grpc_proto//:reflection_java_proto"], +) + java_grpc_library( name = "_channelz_java_grpc", srcs = ["@io_grpc_grpc_proto//:channelz_proto"], diff --git a/services/src/generated/main/grpc/io/grpc/reflection/v1/ServerReflectionGrpc.java b/services/src/generated/main/grpc/io/grpc/reflection/v1/ServerReflectionGrpc.java new file mode 100644 index 00000000000..4f2dce26486 --- /dev/null +++ b/services/src/generated/main/grpc/io/grpc/reflection/v1/ServerReflectionGrpc.java @@ -0,0 +1,285 @@ +package io.grpc.reflection.v1; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + */ +@javax.annotation.Generated( + value = "by gRPC proto compiler", + comments = "Source: grpc/reflection/v1/reflection.proto") +@io.grpc.stub.annotations.GrpcGenerated +public final class ServerReflectionGrpc { + + private ServerReflectionGrpc() {} + + public static final java.lang.String SERVICE_NAME = "grpc.reflection.v1.ServerReflection"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor getServerReflectionInfoMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ServerReflectionInfo", + requestType = io.grpc.reflection.v1.ServerReflectionRequest.class, + responseType = io.grpc.reflection.v1.ServerReflectionResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + public static io.grpc.MethodDescriptor getServerReflectionInfoMethod() { + io.grpc.MethodDescriptor getServerReflectionInfoMethod; + if ((getServerReflectionInfoMethod = ServerReflectionGrpc.getServerReflectionInfoMethod) == null) { + synchronized (ServerReflectionGrpc.class) { + if ((getServerReflectionInfoMethod = ServerReflectionGrpc.getServerReflectionInfoMethod) == null) { + ServerReflectionGrpc.getServerReflectionInfoMethod = getServerReflectionInfoMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ServerReflectionInfo")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + io.grpc.reflection.v1.ServerReflectionRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + io.grpc.reflection.v1.ServerReflectionResponse.getDefaultInstance())) + .setSchemaDescriptor(new ServerReflectionMethodDescriptorSupplier("ServerReflectionInfo")) + .build(); + } + } + } + return getServerReflectionInfoMethod; + } + + /** + * Creates a new async stub that supports all call types for the service + */ + public static ServerReflectionStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public ServerReflectionStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new ServerReflectionStub(channel, callOptions); + } + }; + return ServerReflectionStub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static ServerReflectionBlockingStub newBlockingStub( + io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public ServerReflectionBlockingStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new ServerReflectionBlockingStub(channel, callOptions); + } + }; + return ServerReflectionBlockingStub.newStub(factory, channel); + } + + /** + * Creates a new ListenableFuture-style stub that supports unary calls on the service + */ + public static ServerReflectionFutureStub newFutureStub( + io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public ServerReflectionFutureStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new ServerReflectionFutureStub(channel, callOptions); + } + }; + return ServerReflectionFutureStub.newStub(factory, channel); + } + + /** + */ + public interface AsyncService { + + /** + *
+     * The reflection service is structured as a bidirectional stream, ensuring
+     * all related requests go to a single server.
+     * 
+ */ + default io.grpc.stub.StreamObserver serverReflectionInfo( + io.grpc.stub.StreamObserver responseObserver) { + return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getServerReflectionInfoMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service ServerReflection. + */ + public static abstract class ServerReflectionImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { + return ServerReflectionGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service ServerReflection. + */ + public static final class ServerReflectionStub + extends io.grpc.stub.AbstractAsyncStub { + private ServerReflectionStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected ServerReflectionStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new ServerReflectionStub(channel, callOptions); + } + + /** + *
+     * The reflection service is structured as a bidirectional stream, ensuring
+     * all related requests go to a single server.
+     * 
+ */ + public io.grpc.stub.StreamObserver serverReflectionInfo( + io.grpc.stub.StreamObserver responseObserver) { + return io.grpc.stub.ClientCalls.asyncBidiStreamingCall( + getChannel().newCall(getServerReflectionInfoMethod(), getCallOptions()), responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service ServerReflection. + */ + public static final class ServerReflectionBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private ServerReflectionBlockingStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected ServerReflectionBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new ServerReflectionBlockingStub(channel, callOptions); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service ServerReflection. + */ + public static final class ServerReflectionFutureStub + extends io.grpc.stub.AbstractFutureStub { + private ServerReflectionFutureStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected ServerReflectionFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new ServerReflectionFutureStub(channel, callOptions); + } + } + + private static final int METHODID_SERVER_REFLECTION_INFO = 0; + + private static final class MethodHandlers implements + io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_SERVER_REFLECTION_INFO: + return (io.grpc.stub.StreamObserver) serviceImpl.serverReflectionInfo( + (io.grpc.stub.StreamObserver) responseObserver); + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getServerReflectionInfoMethod(), + io.grpc.stub.ServerCalls.asyncBidiStreamingCall( + new MethodHandlers< + io.grpc.reflection.v1.ServerReflectionRequest, + io.grpc.reflection.v1.ServerReflectionResponse>( + service, METHODID_SERVER_REFLECTION_INFO))) + .build(); + } + + private static abstract class ServerReflectionBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier { + ServerReflectionBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return io.grpc.reflection.v1.ServerReflectionProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("ServerReflection"); + } + } + + private static final class ServerReflectionFileDescriptorSupplier + extends ServerReflectionBaseDescriptorSupplier { + ServerReflectionFileDescriptorSupplier() {} + } + + private static final class ServerReflectionMethodDescriptorSupplier + extends ServerReflectionBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + ServerReflectionMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (ServerReflectionGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new ServerReflectionFileDescriptorSupplier()) + .addMethod(getServerReflectionInfoMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/services/src/main/java/io/grpc/protobuf/services/ProtoReflectionService.java b/services/src/main/java/io/grpc/protobuf/services/ProtoReflectionService.java index 4a7840a3ad9..45947ed44ee 100644 --- a/services/src/main/java/io/grpc/protobuf/services/ProtoReflectionService.java +++ b/services/src/main/java/io/grpc/protobuf/services/ProtoReflectionService.java @@ -16,524 +16,70 @@ package io.grpc.protobuf.services; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; - -import com.google.protobuf.Descriptors.Descriptor; -import com.google.protobuf.Descriptors.FieldDescriptor; -import com.google.protobuf.Descriptors.FileDescriptor; -import com.google.protobuf.Descriptors.MethodDescriptor; -import com.google.protobuf.Descriptors.ServiceDescriptor; import io.grpc.BindableService; import io.grpc.ExperimentalApi; -import io.grpc.InternalServer; -import io.grpc.Server; +import io.grpc.MethodDescriptor; +import io.grpc.ServerCallHandler; import io.grpc.ServerServiceDefinition; -import io.grpc.Status; -import io.grpc.protobuf.ProtoFileDescriptorSupplier; -import io.grpc.reflection.v1alpha.ErrorResponse; -import io.grpc.reflection.v1alpha.ExtensionNumberResponse; -import io.grpc.reflection.v1alpha.ExtensionRequest; -import io.grpc.reflection.v1alpha.FileDescriptorResponse; -import io.grpc.reflection.v1alpha.ListServiceResponse; -import io.grpc.reflection.v1alpha.ServerReflectionGrpc; -import io.grpc.reflection.v1alpha.ServerReflectionRequest; -import io.grpc.reflection.v1alpha.ServerReflectionResponse; -import io.grpc.reflection.v1alpha.ServiceResponse; -import io.grpc.stub.ServerCallStreamObserver; -import io.grpc.stub.StreamObserver; -import java.util.ArrayDeque; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.WeakHashMap; -import javax.annotation.Nullable; -import javax.annotation.concurrent.GuardedBy; +import io.grpc.ServiceDescriptor; +import io.grpc.reflection.v1.ServerReflectionGrpc; +import io.grpc.reflection.v1.ServerReflectionRequest; +import io.grpc.reflection.v1.ServerReflectionResponse; /** * Provides a reflection service for Protobuf services (including the reflection service itself). + * Uses the deprecated v1alpha proto. New users should use ProtoReflectionServiceV1 instead. * *

Separately tracks mutable and immutable services. Throws an exception if either group of * services contains multiple Protobuf files with declarations of the same service, method, type, or * extension. */ @ExperimentalApi("https://github.com/grpc/grpc-java/issues/2222") -public final class ProtoReflectionService extends ServerReflectionGrpc.ServerReflectionImplBase { - - private final Object lock = new Object(); - - @GuardedBy("lock") - private final Map serverReflectionIndexes = new WeakHashMap<>(); +public final class ProtoReflectionService implements BindableService { - private ProtoReflectionService() {} + private ProtoReflectionService() { + } - /** - * Creates a instance of {@link ProtoReflectionService}. - */ public static BindableService newInstance() { return new ProtoReflectionService(); } - /** - * Retrieves the index for services of the server that dispatches the current call. Computes - * one if not exist. The index is updated if any changes to the server's mutable services are - * detected. A change is any addition or removal in the set of file descriptors attached to the - * mutable services or a change in the service names. - */ - private ServerReflectionIndex getRefreshedIndex() { - synchronized (lock) { - Server server = InternalServer.SERVER_CONTEXT_KEY.get(); - ServerReflectionIndex index = serverReflectionIndexes.get(server); - if (index == null) { - index = - new ServerReflectionIndex(server.getImmutableServices(), server.getMutableServices()); - serverReflectionIndexes.put(server, index); - return index; - } - - Set serverFileDescriptors = new HashSet<>(); - Set serverServiceNames = new HashSet<>(); - List serverMutableServices = server.getMutableServices(); - for (ServerServiceDefinition mutableService : serverMutableServices) { - io.grpc.ServiceDescriptor serviceDescriptor = mutableService.getServiceDescriptor(); - if (serviceDescriptor.getSchemaDescriptor() instanceof ProtoFileDescriptorSupplier) { - String serviceName = serviceDescriptor.getName(); - FileDescriptor fileDescriptor = - ((ProtoFileDescriptorSupplier) serviceDescriptor.getSchemaDescriptor()) - .getFileDescriptor(); - serverFileDescriptors.add(fileDescriptor); - serverServiceNames.add(serviceName); - } - } - - // Replace the index if the underlying mutable services have changed. Check both the file - // descriptors and the service names, because one file descriptor can define multiple - // services. - FileDescriptorIndex mutableServicesIndex = index.getMutableServicesIndex(); - if (!mutableServicesIndex.getServiceFileDescriptors().equals(serverFileDescriptors) - || !mutableServicesIndex.getServiceNames().equals(serverServiceNames)) { - index = - new ServerReflectionIndex(server.getImmutableServices(), serverMutableServices); - serverReflectionIndexes.put(server, index); - } - - return index; - } - } - @Override - public StreamObserver serverReflectionInfo( - final StreamObserver responseObserver) { - final ServerCallStreamObserver serverCallStreamObserver = - (ServerCallStreamObserver) responseObserver; - ProtoReflectionStreamObserver requestObserver = - new ProtoReflectionStreamObserver(getRefreshedIndex(), serverCallStreamObserver); - serverCallStreamObserver.setOnReadyHandler(requestObserver); - serverCallStreamObserver.disableAutoRequest(); - serverCallStreamObserver.request(1); - return requestObserver; + public ServerServiceDefinition bindService() { + ServerServiceDefinition serverServiceDefinitionV1 = ProtoReflectionServiceV1.newInstance() + .bindService(); + MethodDescriptor methodDescriptorV1 = + ServerReflectionGrpc.getServerReflectionInfoMethod(); + // Retain the v1 proto marshallers but change the method name and schema descriptor to v1alpha. + MethodDescriptor methodDescriptorV1AlphaGenerated = + io.grpc.reflection.v1alpha.ServerReflectionGrpc.getServerReflectionInfoMethod(); + MethodDescriptor methodDescriptorV1Alpha = + methodDescriptorV1.toBuilder() + .setFullMethodName(methodDescriptorV1AlphaGenerated.getFullMethodName()) + .setSchemaDescriptor(methodDescriptorV1AlphaGenerated.getSchemaDescriptor()) + .build(); + // Retain the v1 server call handler but change the service name schema descriptor in the + // service descriptor to v1alpha. + ServiceDescriptor serviceDescriptorV1AlphaGenerated = + io.grpc.reflection.v1alpha.ServerReflectionGrpc.getServiceDescriptor(); + ServiceDescriptor serviceDescriptorV1Alpha = + ServiceDescriptor.newBuilder(serviceDescriptorV1AlphaGenerated.getName()) + .setSchemaDescriptor(serviceDescriptorV1AlphaGenerated.getSchemaDescriptor()) + .addMethod(methodDescriptorV1Alpha) + .build(); + return ServerServiceDefinition.builder(serviceDescriptorV1Alpha) + .addMethod(methodDescriptorV1Alpha, createServerCallHandler(serverServiceDefinitionV1)) + .build(); } - private static class ProtoReflectionStreamObserver - implements Runnable, StreamObserver { - private final ServerReflectionIndex serverReflectionIndex; - private final ServerCallStreamObserver serverCallStreamObserver; - - private boolean closeAfterSend = false; - private ServerReflectionRequest request; - - ProtoReflectionStreamObserver( - ServerReflectionIndex serverReflectionIndex, - ServerCallStreamObserver serverCallStreamObserver) { - this.serverReflectionIndex = serverReflectionIndex; - this.serverCallStreamObserver = checkNotNull(serverCallStreamObserver, "observer"); - } - - @Override - public void run() { - if (request != null) { - handleReflectionRequest(); - } - } - - @Override - public void onNext(ServerReflectionRequest request) { - checkState(this.request == null); - this.request = checkNotNull(request); - handleReflectionRequest(); - } - - private void handleReflectionRequest() { - if (serverCallStreamObserver.isReady()) { - switch (request.getMessageRequestCase()) { - case FILE_BY_FILENAME: - getFileByName(request); - break; - case FILE_CONTAINING_SYMBOL: - getFileContainingSymbol(request); - break; - case FILE_CONTAINING_EXTENSION: - getFileByExtension(request); - break; - case ALL_EXTENSION_NUMBERS_OF_TYPE: - getAllExtensions(request); - break; - case LIST_SERVICES: - listServices(request); - break; - default: - sendErrorResponse( - request, - Status.Code.UNIMPLEMENTED, - "not implemented " + request.getMessageRequestCase()); - } - request = null; - if (closeAfterSend) { - serverCallStreamObserver.onCompleted(); - } else { - serverCallStreamObserver.request(1); - } - } - } - - @Override - public void onCompleted() { - if (request != null) { - closeAfterSend = true; - } else { - serverCallStreamObserver.onCompleted(); - } - } - - @Override - public void onError(Throwable cause) { - serverCallStreamObserver.onError(cause); - } - - private void getFileByName(ServerReflectionRequest request) { - String name = request.getFileByFilename(); - FileDescriptor fd = serverReflectionIndex.getFileDescriptorByName(name); - if (fd != null) { - serverCallStreamObserver.onNext(createServerReflectionResponse(request, fd)); - } else { - sendErrorResponse(request, Status.Code.NOT_FOUND, "File not found."); - } - } - - private void getFileContainingSymbol(ServerReflectionRequest request) { - String symbol = request.getFileContainingSymbol(); - FileDescriptor fd = serverReflectionIndex.getFileDescriptorBySymbol(symbol); - if (fd != null) { - serverCallStreamObserver.onNext(createServerReflectionResponse(request, fd)); - } else { - sendErrorResponse(request, Status.Code.NOT_FOUND, "Symbol not found."); - } - } - - private void getFileByExtension(ServerReflectionRequest request) { - ExtensionRequest extensionRequest = request.getFileContainingExtension(); - String type = extensionRequest.getContainingType(); - int extension = extensionRequest.getExtensionNumber(); - FileDescriptor fd = - serverReflectionIndex.getFileDescriptorByExtensionAndNumber(type, extension); - if (fd != null) { - serverCallStreamObserver.onNext(createServerReflectionResponse(request, fd)); - } else { - sendErrorResponse(request, Status.Code.NOT_FOUND, "Extension not found."); - } - } - - private void getAllExtensions(ServerReflectionRequest request) { - String type = request.getAllExtensionNumbersOfType(); - Set extensions = serverReflectionIndex.getExtensionNumbersOfType(type); - if (extensions != null) { - ExtensionNumberResponse.Builder builder = - ExtensionNumberResponse.newBuilder() - .setBaseTypeName(type) - .addAllExtensionNumber(extensions); - serverCallStreamObserver.onNext( - ServerReflectionResponse.newBuilder() - .setValidHost(request.getHost()) - .setOriginalRequest(request) - .setAllExtensionNumbersResponse(builder) - .build()); - } else { - sendErrorResponse(request, Status.Code.NOT_FOUND, "Type not found."); - } - } - - private void listServices(ServerReflectionRequest request) { - ListServiceResponse.Builder builder = ListServiceResponse.newBuilder(); - for (String serviceName : serverReflectionIndex.getServiceNames()) { - builder.addService(ServiceResponse.newBuilder().setName(serviceName)); - } - serverCallStreamObserver.onNext( - ServerReflectionResponse.newBuilder() - .setValidHost(request.getHost()) - .setOriginalRequest(request) - .setListServicesResponse(builder) - .build()); - } - - private void sendErrorResponse( - ServerReflectionRequest request, Status.Code code, String message) { - ServerReflectionResponse response = - ServerReflectionResponse.newBuilder() - .setValidHost(request.getHost()) - .setOriginalRequest(request) - .setErrorResponse( - ErrorResponse.newBuilder() - .setErrorCode(code.value()) - .setErrorMessage(message)) - .build(); - serverCallStreamObserver.onNext(response); - } - - private ServerReflectionResponse createServerReflectionResponse( - ServerReflectionRequest request, FileDescriptor fd) { - FileDescriptorResponse.Builder fdRBuilder = FileDescriptorResponse.newBuilder(); - - Set seenFiles = new HashSet<>(); - Queue frontier = new ArrayDeque<>(); - seenFiles.add(fd.getName()); - frontier.add(fd); - while (!frontier.isEmpty()) { - FileDescriptor nextFd = frontier.remove(); - fdRBuilder.addFileDescriptorProto(nextFd.toProto().toByteString()); - for (FileDescriptor dependencyFd : nextFd.getDependencies()) { - if (!seenFiles.contains(dependencyFd.getName())) { - seenFiles.add(dependencyFd.getName()); - frontier.add(dependencyFd); - } - } - } - return ServerReflectionResponse.newBuilder() - .setValidHost(request.getHost()) - .setOriginalRequest(request) - .setFileDescriptorResponse(fdRBuilder) - .build(); - } - } - - /** - * Indexes the server's services and allows lookups of file descriptors by filename, symbol, type, - * and extension number. - * - *

Internally, this stores separate indices for the immutable and mutable services. When - * queried, the immutable service index is checked for a matching value. Only if there is no match - * in the immutable service index are the mutable services checked. - */ - private static final class ServerReflectionIndex { - private final FileDescriptorIndex immutableServicesIndex; - private final FileDescriptorIndex mutableServicesIndex; - - public ServerReflectionIndex( - List immutableServices, - List mutableServices) { - immutableServicesIndex = new FileDescriptorIndex(immutableServices); - mutableServicesIndex = new FileDescriptorIndex(mutableServices); - } - - private FileDescriptorIndex getMutableServicesIndex() { - return mutableServicesIndex; - } - - private Set getServiceNames() { - Set immutableServiceNames = immutableServicesIndex.getServiceNames(); - Set mutableServiceNames = mutableServicesIndex.getServiceNames(); - Set serviceNames = - new HashSet<>(immutableServiceNames.size() + mutableServiceNames.size()); - serviceNames.addAll(immutableServiceNames); - serviceNames.addAll(mutableServiceNames); - return serviceNames; - } - - @Nullable - private FileDescriptor getFileDescriptorByName(String name) { - FileDescriptor fd = immutableServicesIndex.getFileDescriptorByName(name); - if (fd == null) { - fd = mutableServicesIndex.getFileDescriptorByName(name); - } - return fd; - } - - @Nullable - private FileDescriptor getFileDescriptorBySymbol(String symbol) { - FileDescriptor fd = immutableServicesIndex.getFileDescriptorBySymbol(symbol); - if (fd == null) { - fd = mutableServicesIndex.getFileDescriptorBySymbol(symbol); - } - return fd; - } - - @Nullable - private FileDescriptor getFileDescriptorByExtensionAndNumber(String type, int extension) { - FileDescriptor fd = - immutableServicesIndex.getFileDescriptorByExtensionAndNumber(type, extension); - if (fd == null) { - fd = mutableServicesIndex.getFileDescriptorByExtensionAndNumber(type, extension); - } - return fd; - } - - @Nullable - private Set getExtensionNumbersOfType(String type) { - Set extensionNumbers = immutableServicesIndex.getExtensionNumbersOfType(type); - if (extensionNumbers == null) { - extensionNumbers = mutableServicesIndex.getExtensionNumbersOfType(type); - } - return extensionNumbers; - } - } - - /** - * Provides a set of methods for answering reflection queries for the file descriptors underlying - * a set of services. Used by {@link ServerReflectionIndex} to separately index immutable and - * mutable services. - */ - private static final class FileDescriptorIndex { - private final Set serviceNames = new HashSet<>(); - private final Set serviceFileDescriptors = new HashSet<>(); - private final Map fileDescriptorsByName = - new HashMap<>(); - private final Map fileDescriptorsBySymbol = - new HashMap<>(); - private final Map> fileDescriptorsByExtensionAndNumber = - new HashMap<>(); - - FileDescriptorIndex(List services) { - Queue fileDescriptorsToProcess = new ArrayDeque<>(); - Set seenFiles = new HashSet<>(); - for (ServerServiceDefinition service : services) { - io.grpc.ServiceDescriptor serviceDescriptor = service.getServiceDescriptor(); - if (serviceDescriptor.getSchemaDescriptor() instanceof ProtoFileDescriptorSupplier) { - FileDescriptor fileDescriptor = - ((ProtoFileDescriptorSupplier) serviceDescriptor.getSchemaDescriptor()) - .getFileDescriptor(); - String serviceName = serviceDescriptor.getName(); - checkState( - !serviceNames.contains(serviceName), "Service already defined: %s", serviceName); - serviceFileDescriptors.add(fileDescriptor); - serviceNames.add(serviceName); - if (!seenFiles.contains(fileDescriptor.getName())) { - seenFiles.add(fileDescriptor.getName()); - fileDescriptorsToProcess.add(fileDescriptor); - } - } - } - - while (!fileDescriptorsToProcess.isEmpty()) { - FileDescriptor currentFd = fileDescriptorsToProcess.remove(); - processFileDescriptor(currentFd); - for (FileDescriptor dependencyFd : currentFd.getDependencies()) { - if (!seenFiles.contains(dependencyFd.getName())) { - seenFiles.add(dependencyFd.getName()); - fileDescriptorsToProcess.add(dependencyFd); - } - } - } - } - - /** - * Returns the file descriptors for the indexed services, but not their dependencies. This is - * used to check if the server's mutable services have changed. - */ - private Set getServiceFileDescriptors() { - return Collections.unmodifiableSet(serviceFileDescriptors); - } - - private Set getServiceNames() { - return Collections.unmodifiableSet(serviceNames); - } - - @Nullable - private FileDescriptor getFileDescriptorByName(String name) { - return fileDescriptorsByName.get(name); - } - - @Nullable - private FileDescriptor getFileDescriptorBySymbol(String symbol) { - return fileDescriptorsBySymbol.get(symbol); - } - - @Nullable - private FileDescriptor getFileDescriptorByExtensionAndNumber(String type, int number) { - if (fileDescriptorsByExtensionAndNumber.containsKey(type)) { - return fileDescriptorsByExtensionAndNumber.get(type).get(number); - } - return null; - } - - @Nullable - private Set getExtensionNumbersOfType(String type) { - if (fileDescriptorsByExtensionAndNumber.containsKey(type)) { - return Collections.unmodifiableSet(fileDescriptorsByExtensionAndNumber.get(type).keySet()); - } - return null; - } - - private void processFileDescriptor(FileDescriptor fd) { - String fdName = fd.getName(); - checkState(!fileDescriptorsByName.containsKey(fdName), "File name already used: %s", fdName); - fileDescriptorsByName.put(fdName, fd); - for (ServiceDescriptor service : fd.getServices()) { - processService(service, fd); - } - for (Descriptor type : fd.getMessageTypes()) { - processType(type, fd); - } - for (FieldDescriptor extension : fd.getExtensions()) { - processExtension(extension, fd); - } - } - - private void processService(ServiceDescriptor service, FileDescriptor fd) { - String serviceName = service.getFullName(); - checkState( - !fileDescriptorsBySymbol.containsKey(serviceName), - "Service already defined: %s", - serviceName); - fileDescriptorsBySymbol.put(serviceName, fd); - for (MethodDescriptor method : service.getMethods()) { - String methodName = method.getFullName(); - checkState( - !fileDescriptorsBySymbol.containsKey(methodName), - "Method already defined: %s", - methodName); - fileDescriptorsBySymbol.put(methodName, fd); - } - } - - private void processType(Descriptor type, FileDescriptor fd) { - String typeName = type.getFullName(); - checkState( - !fileDescriptorsBySymbol.containsKey(typeName), "Type already defined: %s", typeName); - fileDescriptorsBySymbol.put(typeName, fd); - for (FieldDescriptor extension : type.getExtensions()) { - processExtension(extension, fd); - } - for (Descriptor nestedType : type.getNestedTypes()) { - processType(nestedType, fd); - } - } - - private void processExtension(FieldDescriptor extension, FileDescriptor fd) { - String extensionName = extension.getContainingType().getFullName(); - int extensionNumber = extension.getNumber(); - if (!fileDescriptorsByExtensionAndNumber.containsKey(extensionName)) { - fileDescriptorsByExtensionAndNumber.put( - extensionName, new HashMap()); - } - checkState( - !fileDescriptorsByExtensionAndNumber.get(extensionName).containsKey(extensionNumber), - "Extension name and number already defined: %s, %s", - extensionName, - extensionNumber); - fileDescriptorsByExtensionAndNumber.get(extensionName).put(extensionNumber, fd); - } + @SuppressWarnings("unchecked") + private ServerCallHandler + createServerCallHandler( + ServerServiceDefinition serverServiceDefinition) { + return (ServerCallHandler) + serverServiceDefinition.getMethod( + ServerReflectionGrpc.getServerReflectionInfoMethod().getFullMethodName()) + .getServerCallHandler(); } } diff --git a/services/src/main/java/io/grpc/protobuf/services/ProtoReflectionServiceV1.java b/services/src/main/java/io/grpc/protobuf/services/ProtoReflectionServiceV1.java new file mode 100644 index 00000000000..578e9bbd409 --- /dev/null +++ b/services/src/main/java/io/grpc/protobuf/services/ProtoReflectionServiceV1.java @@ -0,0 +1,539 @@ +/* + * Copyright 2016 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.protobuf.services; + +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Descriptors.FieldDescriptor; +import com.google.protobuf.Descriptors.FileDescriptor; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Descriptors.ServiceDescriptor; +import io.grpc.BindableService; +import io.grpc.ExperimentalApi; +import io.grpc.InternalServer; +import io.grpc.Server; +import io.grpc.ServerServiceDefinition; +import io.grpc.Status; +import io.grpc.protobuf.ProtoFileDescriptorSupplier; +import io.grpc.reflection.v1.ErrorResponse; +import io.grpc.reflection.v1.ExtensionNumberResponse; +import io.grpc.reflection.v1.ExtensionRequest; +import io.grpc.reflection.v1.FileDescriptorResponse; +import io.grpc.reflection.v1.ListServiceResponse; +import io.grpc.reflection.v1.ServerReflectionGrpc; +import io.grpc.reflection.v1.ServerReflectionRequest; +import io.grpc.reflection.v1.ServerReflectionResponse; +import io.grpc.reflection.v1.ServiceResponse; +import io.grpc.stub.ServerCallStreamObserver; +import io.grpc.stub.StreamObserver; +import java.util.ArrayDeque; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.WeakHashMap; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; + +/** + * Provides a reflection service for Protobuf services (including the reflection service itself). + * + *

Separately tracks mutable and immutable services. Throws an exception if either group of + * services contains multiple Protobuf files with declarations of the same service, method, type, or + * extension. + */ +@ExperimentalApi("https://github.com/grpc/grpc-java/issues/2222") +public final class ProtoReflectionServiceV1 extends ServerReflectionGrpc.ServerReflectionImplBase { + + private final Object lock = new Object(); + + @GuardedBy("lock") + private final Map serverReflectionIndexes = new WeakHashMap<>(); + + private ProtoReflectionServiceV1() {} + + /** + * Creates a instance of {@link ProtoReflectionServiceV1}. + */ + public static BindableService newInstance() { + return new ProtoReflectionServiceV1(); + } + + /** + * Retrieves the index for services of the server that dispatches the current call. Computes + * one if not exist. The index is updated if any changes to the server's mutable services are + * detected. A change is any addition or removal in the set of file descriptors attached to the + * mutable services or a change in the service names. + */ + private ServerReflectionIndex getRefreshedIndex() { + synchronized (lock) { + Server server = InternalServer.SERVER_CONTEXT_KEY.get(); + ServerReflectionIndex index = serverReflectionIndexes.get(server); + if (index == null) { + index = + new ServerReflectionIndex(server.getImmutableServices(), server.getMutableServices()); + serverReflectionIndexes.put(server, index); + return index; + } + + Set serverFileDescriptors = new HashSet<>(); + Set serverServiceNames = new HashSet<>(); + List serverMutableServices = server.getMutableServices(); + for (ServerServiceDefinition mutableService : serverMutableServices) { + io.grpc.ServiceDescriptor serviceDescriptor = mutableService.getServiceDescriptor(); + if (serviceDescriptor.getSchemaDescriptor() instanceof ProtoFileDescriptorSupplier) { + String serviceName = serviceDescriptor.getName(); + FileDescriptor fileDescriptor = + ((ProtoFileDescriptorSupplier) serviceDescriptor.getSchemaDescriptor()) + .getFileDescriptor(); + serverFileDescriptors.add(fileDescriptor); + serverServiceNames.add(serviceName); + } + } + + // Replace the index if the underlying mutable services have changed. Check both the file + // descriptors and the service names, because one file descriptor can define multiple + // services. + FileDescriptorIndex mutableServicesIndex = index.getMutableServicesIndex(); + if (!mutableServicesIndex.getServiceFileDescriptors().equals(serverFileDescriptors) + || !mutableServicesIndex.getServiceNames().equals(serverServiceNames)) { + index = + new ServerReflectionIndex(server.getImmutableServices(), serverMutableServices); + serverReflectionIndexes.put(server, index); + } + + return index; + } + } + + @Override + public StreamObserver serverReflectionInfo( + final StreamObserver responseObserver) { + final ServerCallStreamObserver serverCallStreamObserver = + (ServerCallStreamObserver) responseObserver; + ProtoReflectionStreamObserver requestObserver = + new ProtoReflectionStreamObserver(getRefreshedIndex(), serverCallStreamObserver); + serverCallStreamObserver.setOnReadyHandler(requestObserver); + serverCallStreamObserver.disableAutoRequest(); + serverCallStreamObserver.request(1); + return requestObserver; + } + + private static class ProtoReflectionStreamObserver + implements Runnable, StreamObserver { + private final ServerReflectionIndex serverReflectionIndex; + private final ServerCallStreamObserver serverCallStreamObserver; + + private boolean closeAfterSend = false; + private ServerReflectionRequest request; + + ProtoReflectionStreamObserver( + ServerReflectionIndex serverReflectionIndex, + ServerCallStreamObserver serverCallStreamObserver) { + this.serverReflectionIndex = serverReflectionIndex; + this.serverCallStreamObserver = checkNotNull(serverCallStreamObserver, "observer"); + } + + @Override + public void run() { + if (request != null) { + handleReflectionRequest(); + } + } + + @Override + public void onNext(ServerReflectionRequest request) { + checkState(this.request == null); + this.request = checkNotNull(request); + handleReflectionRequest(); + } + + private void handleReflectionRequest() { + if (serverCallStreamObserver.isReady()) { + switch (request.getMessageRequestCase()) { + case FILE_BY_FILENAME: + getFileByName(request); + break; + case FILE_CONTAINING_SYMBOL: + getFileContainingSymbol(request); + break; + case FILE_CONTAINING_EXTENSION: + getFileByExtension(request); + break; + case ALL_EXTENSION_NUMBERS_OF_TYPE: + getAllExtensions(request); + break; + case LIST_SERVICES: + listServices(request); + break; + default: + sendErrorResponse( + request, + Status.Code.UNIMPLEMENTED, + "not implemented " + request.getMessageRequestCase()); + } + request = null; + if (closeAfterSend) { + serverCallStreamObserver.onCompleted(); + } else { + serverCallStreamObserver.request(1); + } + } + } + + @Override + public void onCompleted() { + if (request != null) { + closeAfterSend = true; + } else { + serverCallStreamObserver.onCompleted(); + } + } + + @Override + public void onError(Throwable cause) { + serverCallStreamObserver.onError(cause); + } + + private void getFileByName(ServerReflectionRequest request) { + String name = request.getFileByFilename(); + FileDescriptor fd = serverReflectionIndex.getFileDescriptorByName(name); + if (fd != null) { + serverCallStreamObserver.onNext(createServerReflectionResponse(request, fd)); + } else { + sendErrorResponse(request, Status.Code.NOT_FOUND, "File not found."); + } + } + + private void getFileContainingSymbol(ServerReflectionRequest request) { + String symbol = request.getFileContainingSymbol(); + FileDescriptor fd = serverReflectionIndex.getFileDescriptorBySymbol(symbol); + if (fd != null) { + serverCallStreamObserver.onNext(createServerReflectionResponse(request, fd)); + } else { + sendErrorResponse(request, Status.Code.NOT_FOUND, "Symbol not found."); + } + } + + private void getFileByExtension(ServerReflectionRequest request) { + ExtensionRequest extensionRequest = request.getFileContainingExtension(); + String type = extensionRequest.getContainingType(); + int extension = extensionRequest.getExtensionNumber(); + FileDescriptor fd = + serverReflectionIndex.getFileDescriptorByExtensionAndNumber(type, extension); + if (fd != null) { + serverCallStreamObserver.onNext(createServerReflectionResponse(request, fd)); + } else { + sendErrorResponse(request, Status.Code.NOT_FOUND, "Extension not found."); + } + } + + private void getAllExtensions(ServerReflectionRequest request) { + String type = request.getAllExtensionNumbersOfType(); + Set extensions = serverReflectionIndex.getExtensionNumbersOfType(type); + if (extensions != null) { + ExtensionNumberResponse.Builder builder = + ExtensionNumberResponse.newBuilder() + .setBaseTypeName(type) + .addAllExtensionNumber(extensions); + serverCallStreamObserver.onNext( + ServerReflectionResponse.newBuilder() + .setValidHost(request.getHost()) + .setOriginalRequest(request) + .setAllExtensionNumbersResponse(builder) + .build()); + } else { + sendErrorResponse(request, Status.Code.NOT_FOUND, "Type not found."); + } + } + + private void listServices(ServerReflectionRequest request) { + ListServiceResponse.Builder builder = ListServiceResponse.newBuilder(); + for (String serviceName : serverReflectionIndex.getServiceNames()) { + builder.addService(ServiceResponse.newBuilder().setName(serviceName)); + } + serverCallStreamObserver.onNext( + ServerReflectionResponse.newBuilder() + .setValidHost(request.getHost()) + .setOriginalRequest(request) + .setListServicesResponse(builder) + .build()); + } + + private void sendErrorResponse( + ServerReflectionRequest request, Status.Code code, String message) { + ServerReflectionResponse response = + ServerReflectionResponse.newBuilder() + .setValidHost(request.getHost()) + .setOriginalRequest(request) + .setErrorResponse( + ErrorResponse.newBuilder() + .setErrorCode(code.value()) + .setErrorMessage(message)) + .build(); + serverCallStreamObserver.onNext(response); + } + + private ServerReflectionResponse createServerReflectionResponse( + ServerReflectionRequest request, FileDescriptor fd) { + FileDescriptorResponse.Builder fdRBuilder = FileDescriptorResponse.newBuilder(); + + Set seenFiles = new HashSet<>(); + Queue frontier = new ArrayDeque<>(); + seenFiles.add(fd.getName()); + frontier.add(fd); + while (!frontier.isEmpty()) { + FileDescriptor nextFd = frontier.remove(); + fdRBuilder.addFileDescriptorProto(nextFd.toProto().toByteString()); + for (FileDescriptor dependencyFd : nextFd.getDependencies()) { + if (!seenFiles.contains(dependencyFd.getName())) { + seenFiles.add(dependencyFd.getName()); + frontier.add(dependencyFd); + } + } + } + return ServerReflectionResponse.newBuilder() + .setValidHost(request.getHost()) + .setOriginalRequest(request) + .setFileDescriptorResponse(fdRBuilder) + .build(); + } + } + + /** + * Indexes the server's services and allows lookups of file descriptors by filename, symbol, type, + * and extension number. + * + *

Internally, this stores separate indices for the immutable and mutable services. When + * queried, the immutable service index is checked for a matching value. Only if there is no match + * in the immutable service index are the mutable services checked. + */ + private static final class ServerReflectionIndex { + private final FileDescriptorIndex immutableServicesIndex; + private final FileDescriptorIndex mutableServicesIndex; + + public ServerReflectionIndex( + List immutableServices, + List mutableServices) { + immutableServicesIndex = new FileDescriptorIndex(immutableServices); + mutableServicesIndex = new FileDescriptorIndex(mutableServices); + } + + private FileDescriptorIndex getMutableServicesIndex() { + return mutableServicesIndex; + } + + private Set getServiceNames() { + Set immutableServiceNames = immutableServicesIndex.getServiceNames(); + Set mutableServiceNames = mutableServicesIndex.getServiceNames(); + Set serviceNames = + new HashSet<>(immutableServiceNames.size() + mutableServiceNames.size()); + serviceNames.addAll(immutableServiceNames); + serviceNames.addAll(mutableServiceNames); + return serviceNames; + } + + @Nullable + private FileDescriptor getFileDescriptorByName(String name) { + FileDescriptor fd = immutableServicesIndex.getFileDescriptorByName(name); + if (fd == null) { + fd = mutableServicesIndex.getFileDescriptorByName(name); + } + return fd; + } + + @Nullable + private FileDescriptor getFileDescriptorBySymbol(String symbol) { + FileDescriptor fd = immutableServicesIndex.getFileDescriptorBySymbol(symbol); + if (fd == null) { + fd = mutableServicesIndex.getFileDescriptorBySymbol(symbol); + } + return fd; + } + + @Nullable + private FileDescriptor getFileDescriptorByExtensionAndNumber(String type, int extension) { + FileDescriptor fd = + immutableServicesIndex.getFileDescriptorByExtensionAndNumber(type, extension); + if (fd == null) { + fd = mutableServicesIndex.getFileDescriptorByExtensionAndNumber(type, extension); + } + return fd; + } + + @Nullable + private Set getExtensionNumbersOfType(String type) { + Set extensionNumbers = immutableServicesIndex.getExtensionNumbersOfType(type); + if (extensionNumbers == null) { + extensionNumbers = mutableServicesIndex.getExtensionNumbersOfType(type); + } + return extensionNumbers; + } + } + + /** + * Provides a set of methods for answering reflection queries for the file descriptors underlying + * a set of services. Used by {@link ServerReflectionIndex} to separately index immutable and + * mutable services. + */ + private static final class FileDescriptorIndex { + private final Set serviceNames = new HashSet<>(); + private final Set serviceFileDescriptors = new HashSet<>(); + private final Map fileDescriptorsByName = + new HashMap<>(); + private final Map fileDescriptorsBySymbol = + new HashMap<>(); + private final Map> fileDescriptorsByExtensionAndNumber = + new HashMap<>(); + + FileDescriptorIndex(List services) { + Queue fileDescriptorsToProcess = new ArrayDeque<>(); + Set seenFiles = new HashSet<>(); + for (ServerServiceDefinition service : services) { + io.grpc.ServiceDescriptor serviceDescriptor = service.getServiceDescriptor(); + if (serviceDescriptor.getSchemaDescriptor() instanceof ProtoFileDescriptorSupplier) { + FileDescriptor fileDescriptor = + ((ProtoFileDescriptorSupplier) serviceDescriptor.getSchemaDescriptor()) + .getFileDescriptor(); + String serviceName = serviceDescriptor.getName(); + checkState( + !serviceNames.contains(serviceName), "Service already defined: %s", serviceName); + serviceFileDescriptors.add(fileDescriptor); + serviceNames.add(serviceName); + if (!seenFiles.contains(fileDescriptor.getName())) { + seenFiles.add(fileDescriptor.getName()); + fileDescriptorsToProcess.add(fileDescriptor); + } + } + } + + while (!fileDescriptorsToProcess.isEmpty()) { + FileDescriptor currentFd = fileDescriptorsToProcess.remove(); + processFileDescriptor(currentFd); + for (FileDescriptor dependencyFd : currentFd.getDependencies()) { + if (!seenFiles.contains(dependencyFd.getName())) { + seenFiles.add(dependencyFd.getName()); + fileDescriptorsToProcess.add(dependencyFd); + } + } + } + } + + /** + * Returns the file descriptors for the indexed services, but not their dependencies. This is + * used to check if the server's mutable services have changed. + */ + private Set getServiceFileDescriptors() { + return Collections.unmodifiableSet(serviceFileDescriptors); + } + + private Set getServiceNames() { + return Collections.unmodifiableSet(serviceNames); + } + + @Nullable + private FileDescriptor getFileDescriptorByName(String name) { + return fileDescriptorsByName.get(name); + } + + @Nullable + private FileDescriptor getFileDescriptorBySymbol(String symbol) { + return fileDescriptorsBySymbol.get(symbol); + } + + @Nullable + private FileDescriptor getFileDescriptorByExtensionAndNumber(String type, int number) { + if (fileDescriptorsByExtensionAndNumber.containsKey(type)) { + return fileDescriptorsByExtensionAndNumber.get(type).get(number); + } + return null; + } + + @Nullable + private Set getExtensionNumbersOfType(String type) { + if (fileDescriptorsByExtensionAndNumber.containsKey(type)) { + return Collections.unmodifiableSet(fileDescriptorsByExtensionAndNumber.get(type).keySet()); + } + return null; + } + + private void processFileDescriptor(FileDescriptor fd) { + String fdName = fd.getName(); + checkState(!fileDescriptorsByName.containsKey(fdName), "File name already used: %s", fdName); + fileDescriptorsByName.put(fdName, fd); + for (ServiceDescriptor service : fd.getServices()) { + processService(service, fd); + } + for (Descriptor type : fd.getMessageTypes()) { + processType(type, fd); + } + for (FieldDescriptor extension : fd.getExtensions()) { + processExtension(extension, fd); + } + } + + private void processService(ServiceDescriptor service, FileDescriptor fd) { + String serviceName = service.getFullName(); + checkState( + !fileDescriptorsBySymbol.containsKey(serviceName), + "Service already defined: %s", + serviceName); + fileDescriptorsBySymbol.put(serviceName, fd); + for (MethodDescriptor method : service.getMethods()) { + String methodName = method.getFullName(); + checkState( + !fileDescriptorsBySymbol.containsKey(methodName), + "Method already defined: %s", + methodName); + fileDescriptorsBySymbol.put(methodName, fd); + } + } + + private void processType(Descriptor type, FileDescriptor fd) { + String typeName = type.getFullName(); + checkState( + !fileDescriptorsBySymbol.containsKey(typeName), "Type already defined: %s", typeName); + fileDescriptorsBySymbol.put(typeName, fd); + for (FieldDescriptor extension : type.getExtensions()) { + processExtension(extension, fd); + } + for (Descriptor nestedType : type.getNestedTypes()) { + processType(nestedType, fd); + } + } + + private void processExtension(FieldDescriptor extension, FileDescriptor fd) { + String extensionName = extension.getContainingType().getFullName(); + int extensionNumber = extension.getNumber(); + if (!fileDescriptorsByExtensionAndNumber.containsKey(extensionName)) { + fileDescriptorsByExtensionAndNumber.put( + extensionName, new HashMap()); + } + checkState( + !fileDescriptorsByExtensionAndNumber.get(extensionName).containsKey(extensionNumber), + "Extension name and number already defined: %s, %s", + extensionName, + extensionNumber); + fileDescriptorsByExtensionAndNumber.get(extensionName).put(extensionNumber, fd); + } + } +} diff --git a/services/src/main/proto/grpc/reflection/v1/reflection.proto b/services/src/main/proto/grpc/reflection/v1/reflection.proto new file mode 100644 index 00000000000..1a2ceedc3d2 --- /dev/null +++ b/services/src/main/proto/grpc/reflection/v1/reflection.proto @@ -0,0 +1,147 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +syntax = "proto3"; + +package grpc.reflection.v1; + +option go_package = "google.golang.org/grpc/reflection/grpc_reflection_v1"; +option java_multiple_files = true; +option java_package = "io.grpc.reflection.v1"; +option java_outer_classname = "ServerReflectionProto"; + +service ServerReflection { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + rpc ServerReflectionInfo(stream ServerReflectionRequest) + returns (stream ServerReflectionResponse); +} + +// The message sent by the client when calling ServerReflectionInfo method. +message ServerReflectionRequest { + string host = 1; + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + oneof message_request { + // Find a proto file by the file name. + string file_by_filename = 3; + + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + string file_containing_symbol = 4; + + // Find the proto file which defines an extension extending the given + // message type with the given field number. + ExtensionRequest file_containing_extension = 5; + + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + string all_extension_numbers_of_type = 6; + + // List the full names of registered services. The content will not be + // checked. + string list_services = 7; + } +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +message ExtensionRequest { + // Fully-qualified type name. The format should be . + string containing_type = 1; + int32 extension_number = 2; +} + +// The message sent by the server to answer ServerReflectionInfo method. +message ServerReflectionResponse { + string valid_host = 1; + ServerReflectionRequest original_request = 2; + // The server sets one of the following fields according to the message_request + // in the request. + oneof message_response { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse file_descriptor_response = 4; + + // This message is used to answer all_extension_numbers_of_type requests. + ExtensionNumberResponse all_extension_numbers_response = 5; + + // This message is used to answer list_services requests. + ListServiceResponse list_services_response = 6; + + // This message is used when an error occurs. + ErrorResponse error_response = 7; + } +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +message FileDescriptorResponse { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + repeated bytes file_descriptor_proto = 1; +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +message ExtensionNumberResponse { + // Full name of the base type, including the package name. The format + // is . + string base_type_name = 1; + repeated int32 extension_number = 2; +} + +// A list of ServiceResponse sent by the server answering list_services request. +message ListServiceResponse { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + repeated ServiceResponse service = 1; +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +message ServiceResponse { + // Full name of a registered service, including its package name. The format + // is . + string name = 1; +} + +// The error code and error message sent by the server when an error occurs. +message ErrorResponse { + // This field uses the error codes defined in grpc::StatusCode. + int32 error_code = 1; + string error_message = 2; +} + diff --git a/services/src/test/java/io/grpc/protobuf/services/ProtoReflectionServiceV1Test.java b/services/src/test/java/io/grpc/protobuf/services/ProtoReflectionServiceV1Test.java new file mode 100644 index 00000000000..47bd3e792ad --- /dev/null +++ b/services/src/test/java/io/grpc/protobuf/services/ProtoReflectionServiceV1Test.java @@ -0,0 +1,670 @@ +/* + * Copyright 2016 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.protobuf.services; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.protobuf.ByteString; +import io.grpc.BindableService; +import io.grpc.ManagedChannel; +import io.grpc.Server; +import io.grpc.ServerServiceDefinition; +import io.grpc.inprocess.InProcessChannelBuilder; +import io.grpc.inprocess.InProcessServerBuilder; +import io.grpc.internal.testing.StreamRecorder; +import io.grpc.reflection.testing.AnotherDynamicServiceGrpc; +import io.grpc.reflection.testing.AnotherReflectableServiceGrpc; +import io.grpc.reflection.testing.DynamicReflectionTestDepthTwoProto; +import io.grpc.reflection.testing.DynamicServiceGrpc; +import io.grpc.reflection.testing.ReflectableServiceGrpc; +import io.grpc.reflection.testing.ReflectionTestDepthThreeProto; +import io.grpc.reflection.testing.ReflectionTestDepthTwoAlternateProto; +import io.grpc.reflection.testing.ReflectionTestDepthTwoProto; +import io.grpc.reflection.testing.ReflectionTestProto; +import io.grpc.reflection.v1.ExtensionNumberResponse; +import io.grpc.reflection.v1.ExtensionRequest; +import io.grpc.reflection.v1.FileDescriptorResponse; +import io.grpc.reflection.v1.ServerReflectionGrpc; +import io.grpc.reflection.v1.ServerReflectionRequest; +import io.grpc.reflection.v1.ServerReflectionResponse; +import io.grpc.reflection.v1.ServiceResponse; +import io.grpc.stub.ClientCallStreamObserver; +import io.grpc.stub.ClientResponseObserver; +import io.grpc.stub.StreamObserver; +import io.grpc.testing.GrpcCleanupRule; +import io.grpc.util.MutableHandlerRegistry; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Tests for {@link ProtoReflectionServiceV1}. */ +@RunWith(JUnit4.class) +public class ProtoReflectionServiceV1Test { + @Rule + public GrpcCleanupRule grpcCleanupRule = new GrpcCleanupRule(); + + private static final String TEST_HOST = "localhost"; + private MutableHandlerRegistry handlerRegistry = new MutableHandlerRegistry(); + private BindableService reflectionService; + private ServerServiceDefinition dynamicService = + new DynamicServiceGrpc.DynamicServiceImplBase() {}.bindService(); + private ServerServiceDefinition anotherDynamicService = + new AnotherDynamicServiceGrpc.AnotherDynamicServiceImplBase() {}.bindService(); + private ServerReflectionGrpc.ServerReflectionStub stub; + + @Before + public void setUp() throws Exception { + reflectionService = ProtoReflectionServiceV1.newInstance(); + Server server = + InProcessServerBuilder.forName("proto-reflection-test") + .directExecutor() + .addService(reflectionService) + .addService(new ReflectableServiceGrpc.ReflectableServiceImplBase() {}) + .fallbackHandlerRegistry(handlerRegistry) + .build() + .start(); + grpcCleanupRule.register(server); + ManagedChannel channel = + grpcCleanupRule.register( + InProcessChannelBuilder.forName("proto-reflection-test").directExecutor().build()); + stub = ServerReflectionGrpc.newStub(channel); + } + + @Test + public void listServices() throws Exception { + Set originalServices = + new HashSet<>( + Arrays.asList( + ServiceResponse.newBuilder() + .setName("grpc.reflection.v1.ServerReflection") + .build(), + ServiceResponse.newBuilder() + .setName("grpc.reflection.testing.ReflectableService") + .build())); + assertServiceResponseEquals(originalServices); + + handlerRegistry.addService(dynamicService); + assertServiceResponseEquals( + new HashSet<>( + Arrays.asList( + ServiceResponse.newBuilder() + .setName("grpc.reflection.v1.ServerReflection") + .build(), + ServiceResponse.newBuilder() + .setName("grpc.reflection.testing.ReflectableService") + .build(), + ServiceResponse.newBuilder() + .setName("grpc.reflection.testing.DynamicService") + .build()))); + + handlerRegistry.addService(anotherDynamicService); + assertServiceResponseEquals( + new HashSet<>( + Arrays.asList( + ServiceResponse.newBuilder() + .setName("grpc.reflection.v1.ServerReflection") + .build(), + ServiceResponse.newBuilder() + .setName("grpc.reflection.testing.ReflectableService") + .build(), + ServiceResponse.newBuilder() + .setName("grpc.reflection.testing.DynamicService") + .build(), + ServiceResponse.newBuilder() + .setName("grpc.reflection.testing.AnotherDynamicService") + .build()))); + + handlerRegistry.removeService(dynamicService); + assertServiceResponseEquals( + new HashSet<>( + Arrays.asList( + ServiceResponse.newBuilder() + .setName("grpc.reflection.v1.ServerReflection") + .build(), + ServiceResponse.newBuilder() + .setName("grpc.reflection.testing.ReflectableService") + .build(), + ServiceResponse.newBuilder() + .setName("grpc.reflection.testing.AnotherDynamicService") + .build()))); + + handlerRegistry.removeService(anotherDynamicService); + assertServiceResponseEquals(originalServices); + } + + @Test + public void fileByFilename() throws Exception { + ServerReflectionRequest request = + ServerReflectionRequest.newBuilder() + .setHost(TEST_HOST) + .setFileByFilename("io/grpc/reflection/testing/reflection_test_depth_three.proto") + .build(); + + ServerReflectionResponse goldenResponse = + ServerReflectionResponse.newBuilder() + .setValidHost(TEST_HOST) + .setOriginalRequest(request) + .setFileDescriptorResponse( + FileDescriptorResponse.newBuilder() + .addFileDescriptorProto( + ReflectionTestDepthThreeProto.getDescriptor().toProto().toByteString()) + .build()) + .build(); + + StreamRecorder responseObserver = StreamRecorder.create(); + StreamObserver requestObserver = + stub.serverReflectionInfo(responseObserver); + requestObserver.onNext(request); + requestObserver.onCompleted(); + + assertEquals(goldenResponse, responseObserver.firstValue().get()); + } + + @Test + public void fileByFilenameConsistentForMutableServices() throws Exception { + ServerReflectionRequest request = + ServerReflectionRequest.newBuilder() + .setHost(TEST_HOST) + .setFileByFilename("io/grpc/reflection/testing/dynamic_reflection_test_depth_two.proto") + .build(); + ServerReflectionResponse goldenResponse = + ServerReflectionResponse.newBuilder() + .setValidHost(TEST_HOST) + .setOriginalRequest(request) + .setFileDescriptorResponse( + FileDescriptorResponse.newBuilder() + .addFileDescriptorProto( + DynamicReflectionTestDepthTwoProto.getDescriptor().toProto().toByteString()) + .build()) + .build(); + + StreamRecorder responseObserver = StreamRecorder.create(); + StreamObserver requestObserver = + stub.serverReflectionInfo(responseObserver); + handlerRegistry.addService(dynamicService); + requestObserver.onNext(request); + requestObserver.onCompleted(); + StreamRecorder responseObserver2 = StreamRecorder.create(); + StreamObserver requestObserver2 = + stub.serverReflectionInfo(responseObserver2); + handlerRegistry.removeService(dynamicService); + requestObserver2.onNext(request); + requestObserver2.onCompleted(); + StreamRecorder responseObserver3 = StreamRecorder.create(); + StreamObserver requestObserver3 = + stub.serverReflectionInfo(responseObserver3); + requestObserver3.onNext(request); + requestObserver3.onCompleted(); + + assertEquals( + ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE, + responseObserver.firstValue().get().getMessageResponseCase()); + assertEquals(goldenResponse, responseObserver2.firstValue().get()); + assertEquals( + ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE, + responseObserver3.firstValue().get().getMessageResponseCase()); + } + + @Test + public void fileContainingSymbol() throws Exception { + ServerReflectionRequest request = + ServerReflectionRequest.newBuilder() + .setHost(TEST_HOST) + .setFileContainingSymbol("grpc.reflection.testing.ReflectableService.Method") + .build(); + + List goldenResponse = + Arrays.asList( + ReflectionTestProto.getDescriptor().toProto().toByteString(), + ReflectionTestDepthTwoProto.getDescriptor().toProto().toByteString(), + ReflectionTestDepthTwoAlternateProto.getDescriptor().toProto().toByteString(), + ReflectionTestDepthThreeProto.getDescriptor().toProto().toByteString()); + + StreamRecorder responseObserver = StreamRecorder.create(); + StreamObserver requestObserver = + stub.serverReflectionInfo(responseObserver); + requestObserver.onNext(request); + requestObserver.onCompleted(); + + List response = + responseObserver + .firstValue() + .get() + .getFileDescriptorResponse() + .getFileDescriptorProtoList(); + assertEquals(goldenResponse.size(), response.size()); + assertEquals(new HashSet<>(goldenResponse), new HashSet<>(response)); + } + + @Test + public void fileContainingNestedSymbol() throws Exception { + ServerReflectionRequest request = + ServerReflectionRequest.newBuilder() + .setHost(TEST_HOST) + .setFileContainingSymbol("grpc.reflection.testing.NestedTypeOuter.Middle.Inner") + .build(); + + ServerReflectionResponse goldenResponse = + ServerReflectionResponse.newBuilder() + .setValidHost(TEST_HOST) + .setOriginalRequest(request) + .setFileDescriptorResponse( + FileDescriptorResponse.newBuilder() + .addFileDescriptorProto( + ReflectionTestDepthThreeProto.getDescriptor().toProto().toByteString()) + .build()) + .build(); + + StreamRecorder responseObserver = StreamRecorder.create(); + StreamObserver requestObserver = + stub.serverReflectionInfo(responseObserver); + requestObserver.onNext(request); + requestObserver.onCompleted(); + assertEquals(goldenResponse, responseObserver.firstValue().get()); + } + + @Test + public void fileContainingSymbolForMutableServices() throws Exception { + ServerReflectionRequest request = + ServerReflectionRequest.newBuilder() + .setHost(TEST_HOST) + .setFileContainingSymbol("grpc.reflection.testing.DynamicRequest") + .build(); + ServerReflectionResponse goldenResponse = + ServerReflectionResponse.newBuilder() + .setValidHost(TEST_HOST) + .setOriginalRequest(request) + .setFileDescriptorResponse( + FileDescriptorResponse.newBuilder() + .addFileDescriptorProto( + DynamicReflectionTestDepthTwoProto.getDescriptor().toProto().toByteString()) + .build()) + .build(); + + StreamRecorder responseObserver = StreamRecorder.create(); + StreamObserver requestObserver = + stub.serverReflectionInfo(responseObserver); + handlerRegistry.addService(dynamicService); + requestObserver.onNext(request); + requestObserver.onCompleted(); + StreamRecorder responseObserver2 = StreamRecorder.create(); + StreamObserver requestObserver2 = + stub.serverReflectionInfo(responseObserver2); + handlerRegistry.removeService(dynamicService); + requestObserver2.onNext(request); + requestObserver2.onCompleted(); + StreamRecorder responseObserver3 = StreamRecorder.create(); + StreamObserver requestObserver3 = + stub.serverReflectionInfo(responseObserver3); + requestObserver3.onNext(request); + requestObserver3.onCompleted(); + + assertEquals( + ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE, + responseObserver.firstValue().get().getMessageResponseCase()); + assertEquals(goldenResponse, responseObserver2.firstValue().get()); + assertEquals( + ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE, + responseObserver3.firstValue().get().getMessageResponseCase()); + } + + @Test + public void fileContainingExtension() throws Exception { + ServerReflectionRequest request = + ServerReflectionRequest.newBuilder() + .setHost(TEST_HOST) + .setFileContainingExtension( + ExtensionRequest.newBuilder() + .setContainingType("grpc.reflection.testing.ThirdLevelType") + .setExtensionNumber(100) + .build()) + .build(); + + List goldenResponse = + Arrays.asList( + ReflectionTestProto.getDescriptor().toProto().toByteString(), + ReflectionTestDepthTwoProto.getDescriptor().toProto().toByteString(), + ReflectionTestDepthTwoAlternateProto.getDescriptor().toProto().toByteString(), + ReflectionTestDepthThreeProto.getDescriptor().toProto().toByteString()); + + StreamRecorder responseObserver = StreamRecorder.create(); + StreamObserver requestObserver = + stub.serverReflectionInfo(responseObserver); + requestObserver.onNext(request); + requestObserver.onCompleted(); + + List response = + responseObserver + .firstValue() + .get() + .getFileDescriptorResponse() + .getFileDescriptorProtoList(); + assertEquals(goldenResponse.size(), response.size()); + assertEquals(new HashSet<>(goldenResponse), new HashSet<>(response)); + } + + @Test + public void fileContainingNestedExtension() throws Exception { + ServerReflectionRequest request = + ServerReflectionRequest.newBuilder() + .setHost(TEST_HOST) + .setFileContainingExtension( + ExtensionRequest.newBuilder() + .setContainingType("grpc.reflection.testing.ThirdLevelType") + .setExtensionNumber(101) + .build()) + .build(); + + ServerReflectionResponse goldenResponse = + ServerReflectionResponse.newBuilder() + .setValidHost(TEST_HOST) + .setOriginalRequest(request) + .setFileDescriptorResponse( + FileDescriptorResponse.newBuilder() + .addFileDescriptorProto( + ReflectionTestDepthTwoProto.getDescriptor().toProto().toByteString()) + .addFileDescriptorProto( + ReflectionTestDepthThreeProto.getDescriptor().toProto().toByteString()) + .build()) + .build(); + + StreamRecorder responseObserver = StreamRecorder.create(); + StreamObserver requestObserver = + stub.serverReflectionInfo(responseObserver); + requestObserver.onNext(request); + requestObserver.onCompleted(); + assertEquals(goldenResponse, responseObserver.firstValue().get()); + } + + @Test + public void fileContainingExtensionForMutableServices() throws Exception { + ServerReflectionRequest request = + ServerReflectionRequest.newBuilder() + .setHost(TEST_HOST) + .setFileContainingExtension( + ExtensionRequest.newBuilder() + .setContainingType("grpc.reflection.testing.TypeWithExtensions") + .setExtensionNumber(200) + .build()) + .build(); + ServerReflectionResponse goldenResponse = + ServerReflectionResponse.newBuilder() + .setValidHost(TEST_HOST) + .setOriginalRequest(request) + .setFileDescriptorResponse( + FileDescriptorResponse.newBuilder() + .addFileDescriptorProto( + DynamicReflectionTestDepthTwoProto.getDescriptor().toProto().toByteString()) + .build()) + .build(); + + StreamRecorder responseObserver = StreamRecorder.create(); + StreamObserver requestObserver = + stub.serverReflectionInfo(responseObserver); + handlerRegistry.addService(dynamicService); + requestObserver.onNext(request); + requestObserver.onCompleted(); + StreamRecorder responseObserver2 = StreamRecorder.create(); + StreamObserver requestObserver2 = + stub.serverReflectionInfo(responseObserver2); + handlerRegistry.removeService(dynamicService); + requestObserver2.onNext(request); + requestObserver2.onCompleted(); + StreamRecorder responseObserver3 = StreamRecorder.create(); + StreamObserver requestObserver3 = + stub.serverReflectionInfo(responseObserver3); + requestObserver3.onNext(request); + requestObserver3.onCompleted(); + + assertEquals( + ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE, + responseObserver.firstValue().get().getMessageResponseCase()); + assertEquals(goldenResponse, responseObserver2.firstValue().get()); + assertEquals( + ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE, + responseObserver3.firstValue().get().getMessageResponseCase()); + } + + @Test + public void allExtensionNumbersOfType() throws Exception { + ServerReflectionRequest request = + ServerReflectionRequest.newBuilder() + .setHost(TEST_HOST) + .setAllExtensionNumbersOfType("grpc.reflection.testing.ThirdLevelType") + .build(); + + Set goldenResponse = new HashSet<>(Arrays.asList(100, 101)); + + StreamRecorder responseObserver = StreamRecorder.create(); + StreamObserver requestObserver = + stub.serverReflectionInfo(responseObserver); + requestObserver.onNext(request); + requestObserver.onCompleted(); + Set extensionNumberResponseSet = + new HashSet<>( + responseObserver + .firstValue() + .get() + .getAllExtensionNumbersResponse() + .getExtensionNumberList()); + assertEquals(goldenResponse, extensionNumberResponseSet); + } + + @Test + public void allExtensionNumbersOfTypeForMutableServices() throws Exception { + String type = "grpc.reflection.testing.TypeWithExtensions"; + ServerReflectionRequest request = + ServerReflectionRequest.newBuilder() + .setHost(TEST_HOST) + .setAllExtensionNumbersOfType(type) + .build(); + ServerReflectionResponse goldenResponse = + ServerReflectionResponse.newBuilder() + .setValidHost(TEST_HOST) + .setOriginalRequest(request) + .setAllExtensionNumbersResponse( + ExtensionNumberResponse.newBuilder() + .setBaseTypeName(type) + .addExtensionNumber(200) + .build()) + .build(); + + StreamRecorder responseObserver = StreamRecorder.create(); + StreamObserver requestObserver = + stub.serverReflectionInfo(responseObserver); + handlerRegistry.addService(dynamicService); + requestObserver.onNext(request); + requestObserver.onCompleted(); + StreamRecorder responseObserver2 = StreamRecorder.create(); + StreamObserver requestObserver2 = + stub.serverReflectionInfo(responseObserver2); + handlerRegistry.removeService(dynamicService); + requestObserver2.onNext(request); + requestObserver2.onCompleted(); + StreamRecorder responseObserver3 = StreamRecorder.create(); + StreamObserver requestObserver3 = + stub.serverReflectionInfo(responseObserver3); + requestObserver3.onNext(request); + requestObserver3.onCompleted(); + + assertEquals( + ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE, + responseObserver.firstValue().get().getMessageResponseCase()); + assertEquals(goldenResponse, responseObserver2.firstValue().get()); + assertEquals( + ServerReflectionResponse.MessageResponseCase.ERROR_RESPONSE, + responseObserver3.firstValue().get().getMessageResponseCase()); + } + + @Test + public void sharedServiceBetweenServers() + throws IOException, ExecutionException, InterruptedException { + Server anotherServer = InProcessServerBuilder.forName("proto-reflection-test-2") + .directExecutor() + .addService(reflectionService) + .addService(new AnotherReflectableServiceGrpc.AnotherReflectableServiceImplBase() {}) + .build() + .start(); + grpcCleanupRule.register(anotherServer); + ManagedChannel anotherChannel = grpcCleanupRule.register( + InProcessChannelBuilder.forName("proto-reflection-test-2").directExecutor().build()); + ServerReflectionGrpc.ServerReflectionStub stub2 = ServerReflectionGrpc.newStub(anotherChannel); + + ServerReflectionRequest request = + ServerReflectionRequest.newBuilder().setHost(TEST_HOST).setListServices("services").build(); + StreamRecorder responseObserver = StreamRecorder.create(); + StreamObserver requestObserver = + stub2.serverReflectionInfo(responseObserver); + requestObserver.onNext(request); + requestObserver.onCompleted(); + List response = + responseObserver.firstValue().get().getListServicesResponse().getServiceList(); + assertEquals(new HashSet<>( + Arrays.asList( + ServiceResponse.newBuilder() + .setName("grpc.reflection.v1.ServerReflection") + .build(), + ServiceResponse.newBuilder() + .setName("grpc.reflection.testing.AnotherReflectableService") + .build())), + new HashSet<>(response)); + } + + @Test + public void flowControl() throws Exception { + FlowControlClientResponseObserver clientResponseObserver = + new FlowControlClientResponseObserver(); + ClientCallStreamObserver requestObserver = + (ClientCallStreamObserver) + stub.serverReflectionInfo(clientResponseObserver); + + // Verify we don't receive a response until we request it. + requestObserver.onNext(flowControlRequest); + assertEquals(0, clientResponseObserver.getResponses().size()); + + requestObserver.request(1); + assertEquals(1, clientResponseObserver.getResponses().size()); + assertEquals(flowControlGoldenResponse, clientResponseObserver.getResponses().get(0)); + + // Verify we don't receive an additional response until we request it. + requestObserver.onNext(flowControlRequest); + assertEquals(1, clientResponseObserver.getResponses().size()); + + requestObserver.request(1); + assertEquals(2, clientResponseObserver.getResponses().size()); + assertEquals(flowControlGoldenResponse, clientResponseObserver.getResponses().get(1)); + + requestObserver.onCompleted(); + assertTrue(clientResponseObserver.onCompleteCalled()); + } + + @Test + public void flowControlOnCompleteWithPendingRequest() throws Exception { + FlowControlClientResponseObserver clientResponseObserver = + new FlowControlClientResponseObserver(); + ClientCallStreamObserver requestObserver = + (ClientCallStreamObserver) + stub.serverReflectionInfo(clientResponseObserver); + + requestObserver.onNext(flowControlRequest); + requestObserver.onCompleted(); + assertEquals(0, clientResponseObserver.getResponses().size()); + assertFalse(clientResponseObserver.onCompleteCalled()); + + requestObserver.request(1); + assertTrue(clientResponseObserver.onCompleteCalled()); + assertEquals(1, clientResponseObserver.getResponses().size()); + assertEquals(flowControlGoldenResponse, clientResponseObserver.getResponses().get(0)); + } + + private final ServerReflectionRequest flowControlRequest = + ServerReflectionRequest.newBuilder() + .setHost(TEST_HOST) + .setFileByFilename("io/grpc/reflection/testing/reflection_test_depth_three.proto") + .build(); + private final ServerReflectionResponse flowControlGoldenResponse = + ServerReflectionResponse.newBuilder() + .setValidHost(TEST_HOST) + .setOriginalRequest(flowControlRequest) + .setFileDescriptorResponse( + FileDescriptorResponse.newBuilder() + .addFileDescriptorProto( + ReflectionTestDepthThreeProto.getDescriptor().toProto().toByteString()) + .build()) + .build(); + + private static class FlowControlClientResponseObserver + implements ClientResponseObserver { + private final List responses = + new ArrayList<>(); + private boolean onCompleteCalled = false; + + @Override + public void beforeStart(final ClientCallStreamObserver requestStream) { + requestStream.disableAutoRequestWithInitial(0); + } + + @Override + public void onNext(ServerReflectionResponse value) { + responses.add(value); + } + + @Override + public void onError(Throwable t) { + fail("onError called"); + } + + @Override + public void onCompleted() { + onCompleteCalled = true; + } + + public List getResponses() { + return responses; + } + + public boolean onCompleteCalled() { + return onCompleteCalled; + } + } + + private void assertServiceResponseEquals(Set goldenResponse) throws Exception { + ServerReflectionRequest request = + ServerReflectionRequest.newBuilder().setHost(TEST_HOST).setListServices("services").build(); + StreamRecorder responseObserver = StreamRecorder.create(); + StreamObserver requestObserver = + stub.serverReflectionInfo(responseObserver); + requestObserver.onNext(request); + requestObserver.onCompleted(); + List response = + responseObserver.firstValue().get().getListServicesResponse().getServiceList(); + assertEquals(goldenResponse.size(), response.size()); + assertEquals(goldenResponse, new HashSet<>(response)); + } +} From 926905012513301d70f28b9d3a3c85c8cc3d8ff2 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Wed, 17 Jul 2024 14:15:02 -0700 Subject: [PATCH 03/53] examples: For Bazel, remove compat repo for maven_install It hasn't been needed since 0064991. In that commit the main WORKSPACE was cleaned up, but not the examples. --- examples/WORKSPACE | 5 ----- 1 file changed, 5 deletions(-) diff --git a/examples/WORKSPACE b/examples/WORKSPACE index 7291584b3fc..9f6a4eff704 100644 --- a/examples/WORKSPACE +++ b/examples/WORKSPACE @@ -40,13 +40,8 @@ maven_install( "com.google.api.grpc:grpc-google-cloud-pubsub-v1:0.1.24", "com.google.api.grpc:proto-google-cloud-pubsub-v1:0.1.24", ] + IO_GRPC_GRPC_JAVA_ARTIFACTS + PROTOBUF_MAVEN_ARTIFACTS, - generate_compat_repositories = True, override_targets = IO_GRPC_GRPC_JAVA_OVERRIDE_TARGETS, repositories = [ "https://repo.maven.apache.org/maven2/", ], ) - -load("@maven//:compat.bzl", "compat_repositories") - -compat_repositories() From 85e0a01ecdba688f48a63eeaf2b7a8be3af3229c Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Mon, 22 Jul 2024 13:09:42 -0700 Subject: [PATCH 04/53] util: Mark GracefulSwitchLB.switchTo() deprecated --- .../io/grpc/util/GracefulSwitchLoadBalancer.java | 4 ++++ .../grpc/util/GracefulSwitchLoadBalancerTest.java | 13 +++++++++++++ 2 files changed, 17 insertions(+) diff --git a/util/src/main/java/io/grpc/util/GracefulSwitchLoadBalancer.java b/util/src/main/java/io/grpc/util/GracefulSwitchLoadBalancer.java index ca2b940e201..a63a641b037 100644 --- a/util/src/main/java/io/grpc/util/GracefulSwitchLoadBalancer.java +++ b/util/src/main/java/io/grpc/util/GracefulSwitchLoadBalancer.java @@ -142,7 +142,11 @@ public Status acceptResolvedAddresses(ResolvedAddresses resolvedAddresses) { /** * Gracefully switch to a new policy defined by the given factory, if the given factory isn't * equal to the current one. + * + * @deprecated Use {@code parseLoadBalancingPolicyConfig()} and pass the configuration to + * {@link io.grpc.LoadBalancer.ResolvedAddresses.Builder#setLoadBalancingPolicyConfig} */ + @Deprecated public void switchTo(LoadBalancer.Factory newBalancerFactory) { switchToCalled = true; switchToInternal(newBalancerFactory); diff --git a/util/src/test/java/io/grpc/util/GracefulSwitchLoadBalancerTest.java b/util/src/test/java/io/grpc/util/GracefulSwitchLoadBalancerTest.java index d871999dad2..f31443ace7b 100644 --- a/util/src/test/java/io/grpc/util/GracefulSwitchLoadBalancerTest.java +++ b/util/src/test/java/io/grpc/util/GracefulSwitchLoadBalancerTest.java @@ -86,6 +86,7 @@ public class GracefulSwitchLoadBalancerTest { // OLD TESTS @Test + @Deprecated public void switchTo_canHandleEmptyAddressListFromNameResolutionForwardedToLatestPolicy() { gracefulSwitchLb.switchTo(lbPolicies[0]); LoadBalancer lb0 = balancers.get(lbPolicies[0]); @@ -115,6 +116,7 @@ public void switchTo_canHandleEmptyAddressListFromNameResolutionForwardedToLates } @Test + @Deprecated public void switchTo_handleResolvedAddressesAndNameResolutionErrorForwardedToLatestPolicy() { gracefulSwitchLb.switchTo(lbPolicies[0]); LoadBalancer lb0 = balancers.get(lbPolicies[0]); @@ -155,6 +157,7 @@ public void switchTo_handleResolvedAddressesAndNameResolutionErrorForwardedToLat } @Test + @Deprecated public void switchTo_acceptResolvedAddressesAndNameResolutionErrorForwardedToLatestPolicy() { gracefulSwitchLb.switchTo(lbPolicies[0]); LoadBalancer lb0 = balancers.get(lbPolicies[0]); @@ -195,6 +198,7 @@ public void switchTo_acceptResolvedAddressesAndNameResolutionErrorForwardedToLat } @Test + @Deprecated public void switchTo_shutdownTriggeredWhenSwitchAndForwardedWhenSwitchLbShutdown() { gracefulSwitchLb.switchTo(lbPolicies[0]); LoadBalancer lb0 = balancers.get(lbPolicies[0]); @@ -226,6 +230,7 @@ public void switchTo_shutdownTriggeredWhenSwitchAndForwardedWhenSwitchLbShutdown } @Test + @Deprecated public void switchTo_requestConnectionForwardedToLatestPolicies() { gracefulSwitchLb.switchTo(lbPolicies[0]); LoadBalancer lb0 = balancers.get(lbPolicies[0]); @@ -263,6 +268,7 @@ public void switchTo_requestConnectionForwardedToLatestPolicies() { } @Test + @Deprecated public void switchTo_createSubchannelForwarded() { gracefulSwitchLb.switchTo(lbPolicies[0]); LoadBalancer lb0 = balancers.get(lbPolicies[0]); @@ -289,6 +295,7 @@ public void switchTo_createSubchannelForwarded() { } @Test + @Deprecated public void switchTo_updateBalancingStateIsGraceful() { gracefulSwitchLb.switchTo(lbPolicies[0]); LoadBalancer lb0 = balancers.get(lbPolicies[0]); @@ -340,6 +347,7 @@ public void switchTo_updateBalancingStateIsGraceful() { } @Test + @Deprecated public void switchTo_switchWhileOldPolicyIsNotReady() { gracefulSwitchLb.switchTo(lbPolicies[0]); LoadBalancer lb0 = balancers.get(lbPolicies[0]); @@ -368,6 +376,7 @@ public void switchTo_switchWhileOldPolicyIsNotReady() { } @Test + @Deprecated public void switchTo_switchWhileOldPolicyGoesFromReadyToNotReady() { gracefulSwitchLb.switchTo(lbPolicies[0]); LoadBalancer lb0 = balancers.get(lbPolicies[0]); @@ -398,6 +407,7 @@ public void switchTo_switchWhileOldPolicyGoesFromReadyToNotReady() { } @Test + @Deprecated public void switchTo_switchWhileOldPolicyGoesFromReadyToNotReadyWhileNewPolicyStillIdle() { gracefulSwitchLb.switchTo(lbPolicies[0]); LoadBalancer lb0 = balancers.get(lbPolicies[0]); @@ -428,6 +438,7 @@ public void switchTo_switchWhileOldPolicyGoesFromReadyToNotReadyWhileNewPolicySt } @Test + @Deprecated public void switchTo_newPolicyNameTheSameAsPendingPolicy_shouldHaveNoEffect() { gracefulSwitchLb.switchTo(lbPolicies[0]); LoadBalancer lb0 = balancers.get(lbPolicies[0]); @@ -445,6 +456,7 @@ public void switchTo_newPolicyNameTheSameAsPendingPolicy_shouldHaveNoEffect() { } @Test + @Deprecated public void switchTo_newPolicyNameTheSameAsCurrentPolicy_shouldShutdownPendingLb() { gracefulSwitchLb.switchTo(lbPolicies[0]); LoadBalancer lb0 = balancers.get(lbPolicies[0]); @@ -468,6 +480,7 @@ public void switchTo_newPolicyNameTheSameAsCurrentPolicy_shouldShutdownPendingLb @Test + @Deprecated public void switchTo_newLbFactoryEqualToOldOneShouldHaveNoEffect() { final List balancers = new ArrayList<>(); From 36e687f9d9da447c5f94dd95c149fd462c3fb7f1 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Mon, 22 Jul 2024 12:58:21 -0700 Subject: [PATCH 05/53] examples: Add bzlmod support This also updates the version number of the grpc-java module as part of the release process. --- .github/workflows/testing.yml | 10 ++++++++++ MODULE.bazel | 2 +- RELEASING.md | 2 ++ examples/MODULE.bazel | 24 ++++++++++++++++++++++++ examples/WORKSPACE.bzlmod | 1 + 5 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 examples/MODULE.bazel create mode 100644 examples/WORKSPACE.bzlmod diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index fb44d36cfb2..bc5a175906f 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -97,7 +97,17 @@ jobs: - name: Run bazel build run: bazelisk build //... --enable_bzlmod=false + - name: Run example bazel build + run: bazelisk build //... --enable_bzlmod=false + working-directory: ./examples + - name: Run bazel build (bzlmod) env: USE_BAZEL_VERSION: 7.0.0 run: bazelisk build //... --enable_bzlmod=true + + - name: Run example bazel build (bzlmod) + env: + USE_BAZEL_VERSION: 7.0.0 + run: bazelisk build //... --enable_bzlmod=true + working-directory: ./examples diff --git a/MODULE.bazel b/MODULE.bazel index 9bf2389a2ca..9c9c72fb991 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -2,7 +2,7 @@ module( name = "grpc-java", compatibility_level = 0, repo_name = "io_grpc_grpc_java", - version = "0", + version = "1.66.0-SNAPSHOT", # CURRENT_GRPC_VERSION ) # GRPC_DEPS_START diff --git a/RELEASING.md b/RELEASING.md index efa291804a5..bb1b77d0557 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -18,8 +18,10 @@ them before continuing, and set them again when resuming. ```bash MAJOR=1 MINOR=7 PATCH=0 # Set appropriately for new release VERSION_FILES=( + MODULE.bazel build.gradle core/src/main/java/io/grpc/internal/GrpcUtil.java + examples/MODULE.bazel examples/build.gradle examples/pom.xml examples/android/clientcache/app/build.gradle diff --git a/examples/MODULE.bazel b/examples/MODULE.bazel new file mode 100644 index 00000000000..54438442acc --- /dev/null +++ b/examples/MODULE.bazel @@ -0,0 +1,24 @@ +bazel_dep(name = "grpc-java", repo_name = "io_grpc_grpc_java", version = "1.66.0-SNAPSHOT") # CURRENT_GRPC_VERSION +bazel_dep(name = "grpc-proto", repo_name = "io_grpc_grpc_proto", version = "0.0.0-20240627-ec30f58") +bazel_dep(name = "protobuf", repo_name = "com_google_protobuf", version = "23.1") +bazel_dep(name = "rules_jvm_external", version = "6.0") +bazel_dep(name = "rules_proto", version = "5.3.0-21.7") + +# Do not use this override in your own MODULE.bazel. Use a version from BCR +local_path_override( + module_name = "grpc-java", + path = "..", +) + +maven = use_extension("@rules_jvm_external//:extensions.bzl", "maven") + +use_repo(maven, "maven") + +maven.install( + artifacts = [ + "com.google.api.grpc:grpc-google-cloud-pubsub-v1:0.1.24", + "com.google.api.grpc:proto-google-cloud-pubsub-v1:0.1.24", + ], + repositories = ["https://repo.maven.apache.org/maven2/"], + strict_visibility = True, +) diff --git a/examples/WORKSPACE.bzlmod b/examples/WORKSPACE.bzlmod new file mode 100644 index 00000000000..4ecb9e5d985 --- /dev/null +++ b/examples/WORKSPACE.bzlmod @@ -0,0 +1 @@ +# When using bzlmod this makes sure nothing from the legacy WORKSPACE is loaded From 5ec0187e236e6103cfba47962b6bab52bc69a2d1 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Mon, 22 Jul 2024 12:58:52 -0700 Subject: [PATCH 06/53] examples: Fix WORKSPACE to allow referencing grpc-xds --- MODULE.bazel | 2 -- examples/BUILD.bazel | 10 ++++++++++ examples/MODULE.bazel | 5 +++++ examples/WORKSPACE | 11 +++++++++++ 4 files changed, 26 insertions(+), 2 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 9c9c72fb991..aa8c8d84601 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -203,5 +203,3 @@ maven.override( switched_rules = use_extension("@com_google_googleapis//:extensions.bzl", "switched_rules") switched_rules.use_languages(java = True) - -use_repo(switched_rules, "com_google_googleapis_imports") diff --git a/examples/BUILD.bazel b/examples/BUILD.bazel index 46da0f9c2f9..3a0936780a0 100644 --- a/examples/BUILD.bazel +++ b/examples/BUILD.bazel @@ -265,3 +265,13 @@ java_binary( ":examples", ], ) + +# grpc-xds requires some WORKSPACE/MODULE deps that aren't needed by the other +# targets. This just makes sure the example WORKSPACE/MODULE works with +# grpc-xds. +java_library( + name = "test_grpc_xds_compiles", + runtime_deps = [ + "@io_grpc_grpc_java//xds", + ], +) diff --git a/examples/MODULE.bazel b/examples/MODULE.bazel index 54438442acc..60bed40f349 100644 --- a/examples/MODULE.bazel +++ b/examples/MODULE.bazel @@ -1,3 +1,4 @@ +bazel_dep(name = "googleapis", repo_name = "com_google_googleapis", version = "0.0.0-20240326-1c8d509c5") bazel_dep(name = "grpc-java", repo_name = "io_grpc_grpc_java", version = "1.66.0-SNAPSHOT") # CURRENT_GRPC_VERSION bazel_dep(name = "grpc-proto", repo_name = "io_grpc_grpc_proto", version = "0.0.0-20240627-ec30f58") bazel_dep(name = "protobuf", repo_name = "com_google_protobuf", version = "23.1") @@ -10,6 +11,10 @@ local_path_override( path = "..", ) +switched_rules = use_extension("@com_google_googleapis//:extensions.bzl", "switched_rules") + +switched_rules.use_languages(java = True) + maven = use_extension("@rules_jvm_external//:extensions.bzl", "maven") use_repo(maven, "maven") diff --git a/examples/WORKSPACE b/examples/WORKSPACE index 9f6a4eff704..170e06a90c7 100644 --- a/examples/WORKSPACE +++ b/examples/WORKSPACE @@ -35,6 +35,17 @@ load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") protobuf_deps() +load("@envoy_api//bazel:repositories.bzl", "api_dependencies") + +api_dependencies() + +load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language") + +switched_rules_by_language( + name = "com_google_googleapis_imports", + java = True, +) + maven_install( artifacts = [ "com.google.api.grpc:grpc-google-cloud-pubsub-v1:0.1.24", From 7a25e68958183d098f411136c5922ef2d96eee19 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Tue, 23 Jul 2024 15:59:58 -0700 Subject: [PATCH 07/53] bazel: Use com_google_protobuf instead of com_google_protobuf_javalite Since Bazel 6 [1], Bazel has used com_google_protobuf for javalite. We only used the other repo because Bazel expected it, which was because Protobuf split out javalite to a separate branch for a while. Since everything is now reunified, we can use a singular protobuf repo. 1. https://github.com/bazelbuild/bazel/commit/abdb1d6bfeba6f059e2d8e5383121fbead332c6a --- MODULE.bazel | 1 - repositories.bzl | 13 +------------ 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index aa8c8d84601..b0c923c0c35 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -62,7 +62,6 @@ use_repo( non_module_deps, "com_github_cncf_xds", "envoy_api", - "com_google_protobuf_javalite", ) grpc_repo_deps_ext = use_extension("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_repo_deps_ext") diff --git a/repositories.bzl b/repositories.bzl index ebbe666eec2..12dca04862b 100644 --- a/repositories.bzl +++ b/repositories.bzl @@ -62,7 +62,7 @@ IO_GRPC_GRPC_JAVA_ARTIFACTS = [ IO_GRPC_GRPC_JAVA_OVERRIDE_TARGETS = { "com.google.protobuf:protobuf-java": "@com_google_protobuf//:protobuf_java", "com.google.protobuf:protobuf-java-util": "@com_google_protobuf//:protobuf_java_util", - "com.google.protobuf:protobuf-javalite": "@com_google_protobuf_javalite//:protobuf_javalite", + "com.google.protobuf:protobuf-javalite": "@com_google_protobuf//:protobuf_javalite", "io.grpc:grpc-alts": "@io_grpc_grpc_java//alts", "io.grpc:grpc-api": "@io_grpc_grpc_java//api", "io.grpc:grpc-auth": "@io_grpc_grpc_java//auth", @@ -107,8 +107,6 @@ def grpc_java_repositories(bzlmod = False): ) if not bzlmod and not native.existing_rule("com_google_protobuf"): com_google_protobuf() - if not native.existing_rule("com_google_protobuf_javalite"): - com_google_protobuf_javalite() if not bzlmod and not native.existing_rule("com_google_googleapis"): http_archive( name = "com_google_googleapis", @@ -153,15 +151,6 @@ def com_google_protobuf(): urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v25.1/protobuf-25.1.tar.gz"], ) -def com_google_protobuf_javalite(): - # java_lite_proto_library rules implicitly depend on @com_google_protobuf_javalite - http_archive( - name = "com_google_protobuf_javalite", - sha256 = "9bd87b8280ef720d3240514f884e56a712f2218f0d693b48050c836028940a42", - strip_prefix = "protobuf-25.1", - urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v25.1/protobuf-25.1.tar.gz"], - ) - def io_grpc_grpc_proto(): http_archive( name = "io_grpc_grpc_proto", From d3c2f5a2dad9147f42325993b20ab84bdfe4fac6 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Tue, 23 Jul 2024 16:07:20 -0700 Subject: [PATCH 08/53] bazel: Don't use artifact() for protobuf deps We don't include protobuf in IO_GRPC_GRPC_JAVA_ARTIFACTS, so there might not actually be an alias available for it to @com_google_protobuf. While we could add it, it is easier to use the @com_google_protobuf references directly. This was preventing `bazel query 'deps(//...)' from succeeding, because it couldn't find javalite. --- protobuf-lite/BUILD.bazel | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/protobuf-lite/BUILD.bazel b/protobuf-lite/BUILD.bazel index 2120cbd283a..087723e95fb 100644 --- a/protobuf-lite/BUILD.bazel +++ b/protobuf-lite/BUILD.bazel @@ -12,8 +12,8 @@ java_library( artifact("com.google.guava:guava"), artifact("com.google.j2objc:j2objc-annotations"), ] + select({ - ":android": [artifact("com.google.protobuf:protobuf-javalite")], - "//conditions:default": [artifact("com.google.protobuf:protobuf-java")], + ":android": ["@com_google_protobuf//:protobuf_javalite"], + "//conditions:default": ["@com_google_protobuf//:protobuf_java"], }), ) From eb4cdf7959795d70c44aa74d572ddc5f8bd2ac5e Mon Sep 17 00:00:00 2001 From: subhraOffGit <162678244+subhraOffGit@users.noreply.github.com> Date: Wed, 24 Jul 2024 22:49:10 +0530 Subject: [PATCH 09/53] Update MAINTAINERS.md (#11241) Removed sanjaypujare from Active Maintainers list and added him to Emeritus Maintainers list. Removed the org names from Emeritus maintainers list --- MAINTAINERS.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 1783acc6042..f1c07ccd6f2 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -14,22 +14,22 @@ for general contribution guidelines. - [larry-safran](https://github.com/larry-safran), Google LLC - [markb74](https://github.com/markb74), Google LLC - [ran-su](https://github.com/ran-su), Google LLC -- [sanjaypujare](https://github.com/sanjaypujare), Google LLC - [sergiitk](https://github.com/sergiitk), Google LLC - [temawi](https://github.com/temawi), Google LLC - [YifeiZhuang](https://github.com/YifeiZhuang), Google LLC - [zhangkun83](https://github.com/zhangkun83), Google LLC ## Emeritus Maintainers (in alphabetical order) -- [carl-mastrangelo](https://github.com/carl-mastrangelo), Google LLC -- [creamsoup](https://github.com/creamsoup), Google LLC -- [dapengzhang0](https://github.com/dapengzhang0), Google LLC -- [ericgribkoff](https://github.com/ericgribkoff), Google LLC -- [jiangtaoli2016](https://github.com/jiangtaoli2016), Google LLC -- [jtattermusch](https://github.com/jtattermusch), Google LLC -- [louiscryan](https://github.com/louiscryan), Google LLC -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC -- [nmittler](https://github.com/nmittler), Google LLC -- [srini100](https://github.com/srini100), Google LLC -- [voidzcy](https://github.com/voidzcy), Google LLC -- [zpencer](https://github.com/zpencer), Google LLC +- [carl-mastrangelo](https://github.com/carl-mastrangelo) +- [creamsoup](https://github.com/creamsoup) +- [dapengzhang0](https://github.com/dapengzhang0) +- [ericgribkoff](https://github.com/ericgribkoff) +- [jiangtaoli2016](https://github.com/jiangtaoli2016) +- [jtattermusch](https://github.com/jtattermusch) +- [louiscryan](https://github.com/louiscryan) +- [nicolasnoble](https://github.com/nicolasnoble) +- [nmittler](https://github.com/nmittler) +- [sanjaypujare](https://github.com/sanjaypujare) +- [srini100](https://github.com/srini100) +- [voidzcy](https://github.com/voidzcy) +- [zpencer](https://github.com/zpencer) From b108ed3ddf08d20926cab1ea4ddd75264aff8c18 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Wed, 24 Jul 2024 21:30:10 -0700 Subject: [PATCH 10/53] api: Give instruments a toString() including their name This makes it much easier when testing to understand what the values/arguments are at various parts of the code. --- api/src/main/java/io/grpc/PartialMetricInstrument.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/api/src/main/java/io/grpc/PartialMetricInstrument.java b/api/src/main/java/io/grpc/PartialMetricInstrument.java index c246b67f810..7e032634f96 100644 --- a/api/src/main/java/io/grpc/PartialMetricInstrument.java +++ b/api/src/main/java/io/grpc/PartialMetricInstrument.java @@ -89,4 +89,9 @@ public List getOptionalLabelKeys() { public boolean isEnableByDefault() { return enableByDefault; } + + @Override + public String toString() { + return getClass().getName() + "(" + getName() + ")"; + } } From 786523dca4461597072cc2b86e827d18a34e6440 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Wed, 24 Jul 2024 16:31:00 -0700 Subject: [PATCH 11/53] xds: WRR rr_fallback should trigger with one endpoint weight From gRFC A58: > When less than two subchannels have load info, all subchannels will > get the same weight and the policy will behave the same as round_robin --- .../xds/WeightedRoundRobinLoadBalancer.java | 8 +++++--- .../WeightedRoundRobinLoadBalancerTest.java | 20 ++++++++++++++----- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java index b200b9fdd6c..c3383148079 100644 --- a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java @@ -598,13 +598,15 @@ static final class StaticStrideScheduler { if (numWeightedChannels > 0) { unscaledMeanWeight = sumWeight / numWeightedChannels; unscaledMaxWeight = Math.min(unscaledMaxWeight, (float) (K_MAX_RATIO * unscaledMeanWeight)); - usesRoundRobin = false; } else { - // Fall back to round robin if all values are non-positives - usesRoundRobin = true; + // Fall back to round robin if all values are non-positives. Note that + // numWeightedChannels == 1 also behaves like RR because the weights are all the same, but + // the weights aren't 1, so it doesn't go through this path. unscaledMeanWeight = 1; unscaledMaxWeight = 1; } + // We need at least two weights for WRR to be distinguishable from round_robin. + usesRoundRobin = numWeightedChannels < 2; // Scales weights s.t. max(weights) == K_MAX_WEIGHT, meanWeight is scaled accordingly. // Note that, since we cap the weights to stay within K_MAX_RATIO, meanWeight might not diff --git a/xds/src/test/java/io/grpc/xds/WeightedRoundRobinLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/WeightedRoundRobinLoadBalancerTest.java index 2913a1e1d7d..a5b5651133b 100644 --- a/xds/src/test/java/io/grpc/xds/WeightedRoundRobinLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/WeightedRoundRobinLoadBalancerTest.java @@ -1190,13 +1190,17 @@ public void metrics() { verifyLongCounterRecord("grpc.lb.wrr.endpoint_weight_not_yet_usable", 1, 2); verifyLongCounterRecord("grpc.lb.wrr.endpoint_weight_not_yet_usable", 1, 3); - // Send each child LB state an ORCA update with some valid utilization/qps data so that weights - // can be calculated. + // Send one child LB state an ORCA update with some valid utilization/qps data so that weights + // can be calculated, but it's still essentially round_robin Iterator childLbStates = wrr.getChildLbStates().iterator(); ((WeightedChildLbState)childLbStates.next()).new OrcaReportListener( weightedConfig.errorUtilizationPenalty).onLoadReport( InternalCallMetricRecorder.createMetricReport(0.1, 0, 0.1, 1, 0, new HashMap<>(), new HashMap<>(), new HashMap<>())); + + fakeClock.forwardTime(1, TimeUnit.SECONDS); + + // Now send a second child LB state an ORCA update, so there's real weights ((WeightedChildLbState)childLbStates.next()).new OrcaReportListener( weightedConfig.errorUtilizationPenalty).onLoadReport( InternalCallMetricRecorder.createMetricReport(0.1, 0, 0.1, 1, 0, new HashMap<>(), @@ -1210,9 +1214,15 @@ public void metrics() { // weights were updated reset(mockMetricRecorder); - // We go forward in time past the default 10s blackout period before weights can be considered - // for wrr. The eights would get updated as the default update interval is 1s. - fakeClock.forwardTime(11, TimeUnit.SECONDS); + // We go forward in time past the default 10s blackout period for the first child. The weights + // would get updated as the default update interval is 1s. + fakeClock.forwardTime(9, TimeUnit.SECONDS); + + verifyLongCounterRecord("grpc.lb.wrr.rr_fallback", 1, 1); + + // And after another second the other children have weights + reset(mockMetricRecorder); + fakeClock.forwardTime(1, TimeUnit.SECONDS); // Since we have weights on all the child LB states, the weight update should not result in // further rr_fallback metric entries. From 9ba2f9dec5c71a5d0afbba0f196331a47844bc07 Mon Sep 17 00:00:00 2001 From: Kannan J Date: Fri, 26 Jul 2024 15:43:36 +0530 Subject: [PATCH 12/53] Introduce onResult2 in NameResolver Listener2 that returns Status (#11313) Introducing NameResolver listener method "Status Listener2::onResult2(ResolutionResult)" that returns Status of the acceptance of the name resolution by the load balancer, and the Name Resolver will call this method for both success and error cases. --- api/src/main/java/io/grpc/NameResolver.java | 10 + .../io/grpc/internal/DnsNameResolver.java | 4 +- .../io/grpc/internal/ManagedChannelImpl.java | 255 +++++++-------- .../grpc/internal/RetryingNameResolver.java | 12 + .../io/grpc/internal/DnsNameResolverTest.java | 50 ++- .../grpc/internal/ManagedChannelImplTest.java | 300 +++++++++++++++++- .../internal/RetryingNameResolverTest.java | 28 +- .../grpc/grpclb/GrpclbNameResolverTest.java | 10 +- 8 files changed, 501 insertions(+), 168 deletions(-) diff --git a/api/src/main/java/io/grpc/NameResolver.java b/api/src/main/java/io/grpc/NameResolver.java index a74512eb7e3..8af8112ffdb 100644 --- a/api/src/main/java/io/grpc/NameResolver.java +++ b/api/src/main/java/io/grpc/NameResolver.java @@ -246,6 +246,16 @@ public final void onAddresses( */ @Override public abstract void onError(Status error); + + /** + * Handles updates on resolved addresses and attributes. + * + * @param resolutionResult the resolved server addresses, attributes, and Service Config. + * @since 1.66 + */ + public Status onResult2(ResolutionResult resolutionResult) { + throw new UnsupportedOperationException("Not implemented."); + } } /** diff --git a/core/src/main/java/io/grpc/internal/DnsNameResolver.java b/core/src/main/java/io/grpc/internal/DnsNameResolver.java index 5ef6dd863c2..df51d6f2c5c 100644 --- a/core/src/main/java/io/grpc/internal/DnsNameResolver.java +++ b/core/src/main/java/io/grpc/internal/DnsNameResolver.java @@ -330,7 +330,9 @@ public void run() { resolutionResultBuilder.setAttributes(result.attributes); } } - savedListener.onResult(resolutionResultBuilder.build()); + syncContext.execute(() -> { + savedListener.onResult2(resolutionResultBuilder.build()); + }); } catch (IOException e) { savedListener.onError( Status.UNAVAILABLE.withDescription("Unable to resolve host " + host).withCause(e)); diff --git a/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java b/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java index c5c7b66e15d..7f45ca967ea 100644 --- a/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java +++ b/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java @@ -1673,146 +1673,147 @@ final class NameResolverListener extends NameResolver.Listener2 { public void onResult(final ResolutionResult resolutionResult) { final class NamesResolved implements Runnable { - @SuppressWarnings("ReferenceEquality") @Override public void run() { - if (ManagedChannelImpl.this.nameResolver != resolver) { - return; - } - - List servers = resolutionResult.getAddresses(); - channelLogger.log( - ChannelLogLevel.DEBUG, - "Resolved address: {0}, config={1}", - servers, - resolutionResult.getAttributes()); - - if (lastResolutionState != ResolutionState.SUCCESS) { - channelLogger.log(ChannelLogLevel.INFO, "Address resolved: {0}", servers); - lastResolutionState = ResolutionState.SUCCESS; - } - - ConfigOrError configOrError = resolutionResult.getServiceConfig(); + Status status = onResult2(resolutionResult); ResolutionResultListener resolutionResultListener = resolutionResult.getAttributes() .get(RetryingNameResolver.RESOLUTION_RESULT_LISTENER_KEY); - InternalConfigSelector resolvedConfigSelector = - resolutionResult.getAttributes().get(InternalConfigSelector.KEY); - ManagedChannelServiceConfig validServiceConfig = - configOrError != null && configOrError.getConfig() != null - ? (ManagedChannelServiceConfig) configOrError.getConfig() - : null; - Status serviceConfigError = configOrError != null ? configOrError.getError() : null; - - ManagedChannelServiceConfig effectiveServiceConfig; - if (!lookUpServiceConfig) { - if (validServiceConfig != null) { - channelLogger.log( - ChannelLogLevel.INFO, - "Service config from name resolver discarded by channel settings"); - } - effectiveServiceConfig = - defaultServiceConfig == null ? EMPTY_SERVICE_CONFIG : defaultServiceConfig; - if (resolvedConfigSelector != null) { + resolutionResultListener.resolutionAttempted(status); + } + } + + syncContext.execute(new NamesResolved()); + } + + @SuppressWarnings("ReferenceEquality") + @Override + public Status onResult2(final ResolutionResult resolutionResult) { + syncContext.throwIfNotInThisSynchronizationContext(); + if (ManagedChannelImpl.this.nameResolver != resolver) { + return Status.OK; + } + + List servers = resolutionResult.getAddresses(); + channelLogger.log( + ChannelLogLevel.DEBUG, + "Resolved address: {0}, config={1}", + servers, + resolutionResult.getAttributes()); + + if (lastResolutionState != ResolutionState.SUCCESS) { + channelLogger.log(ChannelLogLevel.INFO, "Address resolved: {0}", servers); + lastResolutionState = ResolutionState.SUCCESS; + } + + ConfigOrError configOrError = resolutionResult.getServiceConfig(); + InternalConfigSelector resolvedConfigSelector = + resolutionResult.getAttributes().get(InternalConfigSelector.KEY); + ManagedChannelServiceConfig validServiceConfig = + configOrError != null && configOrError.getConfig() != null + ? (ManagedChannelServiceConfig) configOrError.getConfig() + : null; + Status serviceConfigError = configOrError != null ? configOrError.getError() : null; + + ManagedChannelServiceConfig effectiveServiceConfig; + if (!lookUpServiceConfig) { + if (validServiceConfig != null) { + channelLogger.log( + ChannelLogLevel.INFO, + "Service config from name resolver discarded by channel settings"); + } + effectiveServiceConfig = + defaultServiceConfig == null ? EMPTY_SERVICE_CONFIG : defaultServiceConfig; + if (resolvedConfigSelector != null) { + channelLogger.log( + ChannelLogLevel.INFO, + "Config selector from name resolver discarded by channel settings"); + } + realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); + } else { + // Try to use config if returned from name resolver + // Otherwise, try to use the default config if available + if (validServiceConfig != null) { + effectiveServiceConfig = validServiceConfig; + if (resolvedConfigSelector != null) { + realChannel.updateConfigSelector(resolvedConfigSelector); + if (effectiveServiceConfig.getDefaultConfigSelector() != null) { channelLogger.log( - ChannelLogLevel.INFO, - "Config selector from name resolver discarded by channel settings"); + ChannelLogLevel.DEBUG, + "Method configs in service config will be discarded due to presence of" + + "config-selector"); } + } else { realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); + } + } else if (defaultServiceConfig != null) { + effectiveServiceConfig = defaultServiceConfig; + realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); + channelLogger.log( + ChannelLogLevel.INFO, + "Received no service config, using default service config"); + } else if (serviceConfigError != null) { + if (!serviceConfigUpdated) { + // First DNS lookup has invalid service config, and cannot fall back to default + channelLogger.log( + ChannelLogLevel.INFO, + "Fallback to error due to invalid first service config without default config"); + // This error could be an "inappropriate" control plane error that should not bleed + // through to client code using gRPC. We let them flow through here to the LB as + // we later check for these error codes when investigating pick results in + // GrpcUtil.getTransportFromPickResult(). + onError(configOrError.getError()); + return configOrError.getError(); } else { - // Try to use config if returned from name resolver - // Otherwise, try to use the default config if available - if (validServiceConfig != null) { - effectiveServiceConfig = validServiceConfig; - if (resolvedConfigSelector != null) { - realChannel.updateConfigSelector(resolvedConfigSelector); - if (effectiveServiceConfig.getDefaultConfigSelector() != null) { - channelLogger.log( - ChannelLogLevel.DEBUG, - "Method configs in service config will be discarded due to presence of" - + "config-selector"); - } - } else { - realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); - } - } else if (defaultServiceConfig != null) { - effectiveServiceConfig = defaultServiceConfig; - realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); - channelLogger.log( - ChannelLogLevel.INFO, - "Received no service config, using default service config"); - } else if (serviceConfigError != null) { - if (!serviceConfigUpdated) { - // First DNS lookup has invalid service config, and cannot fall back to default - channelLogger.log( - ChannelLogLevel.INFO, - "Fallback to error due to invalid first service config without default config"); - // This error could be an "inappropriate" control plane error that should not bleed - // through to client code using gRPC. We let them flow through here to the LB as - // we later check for these error codes when investigating pick results in - // GrpcUtil.getTransportFromPickResult(). - onError(configOrError.getError()); - if (resolutionResultListener != null) { - resolutionResultListener.resolutionAttempted(configOrError.getError()); - } - return; - } else { - effectiveServiceConfig = lastServiceConfig; - } - } else { - effectiveServiceConfig = EMPTY_SERVICE_CONFIG; - realChannel.updateConfigSelector(null); - } - if (!effectiveServiceConfig.equals(lastServiceConfig)) { - channelLogger.log( - ChannelLogLevel.INFO, - "Service config changed{0}", - effectiveServiceConfig == EMPTY_SERVICE_CONFIG ? " to empty" : ""); - lastServiceConfig = effectiveServiceConfig; - transportProvider.throttle = effectiveServiceConfig.getRetryThrottling(); - } - - try { - // TODO(creamsoup): when `servers` is empty and lastResolutionStateCopy == SUCCESS - // and lbNeedAddress, it shouldn't call the handleServiceConfigUpdate. But, - // lbNeedAddress is not deterministic - serviceConfigUpdated = true; - } catch (RuntimeException re) { - logger.log( - Level.WARNING, - "[" + getLogId() + "] Unexpected exception from parsing service config", - re); - } + effectiveServiceConfig = lastServiceConfig; } + } else { + effectiveServiceConfig = EMPTY_SERVICE_CONFIG; + realChannel.updateConfigSelector(null); + } + if (!effectiveServiceConfig.equals(lastServiceConfig)) { + channelLogger.log( + ChannelLogLevel.INFO, + "Service config changed{0}", + effectiveServiceConfig == EMPTY_SERVICE_CONFIG ? " to empty" : ""); + lastServiceConfig = effectiveServiceConfig; + transportProvider.throttle = effectiveServiceConfig.getRetryThrottling(); + } - Attributes effectiveAttrs = resolutionResult.getAttributes(); - // Call LB only if it's not shutdown. If LB is shutdown, lbHelper won't match. - if (NameResolverListener.this.helper == ManagedChannelImpl.this.lbHelper) { - Attributes.Builder attrBuilder = - effectiveAttrs.toBuilder().discard(InternalConfigSelector.KEY); - Map healthCheckingConfig = - effectiveServiceConfig.getHealthCheckingConfig(); - if (healthCheckingConfig != null) { - attrBuilder - .set(LoadBalancer.ATTR_HEALTH_CHECKING_CONFIG, healthCheckingConfig) - .build(); - } - Attributes attributes = attrBuilder.build(); - - Status addressAcceptanceStatus = helper.lb.tryAcceptResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setAttributes(attributes) - .setLoadBalancingPolicyConfig(effectiveServiceConfig.getLoadBalancingConfig()) - .build()); - // If a listener is provided, let it know if the addresses were accepted. - if (resolutionResultListener != null) { - resolutionResultListener.resolutionAttempted(addressAcceptanceStatus); - } - } + try { + // TODO(creamsoup): when `servers` is empty and lastResolutionStateCopy == SUCCESS + // and lbNeedAddress, it shouldn't call the handleServiceConfigUpdate. But, + // lbNeedAddress is not deterministic + serviceConfigUpdated = true; + } catch (RuntimeException re) { + logger.log( + Level.WARNING, + "[" + getLogId() + "] Unexpected exception from parsing service config", + re); } } - syncContext.execute(new NamesResolved()); + Attributes effectiveAttrs = resolutionResult.getAttributes(); + // Call LB only if it's not shutdown. If LB is shutdown, lbHelper won't match. + if (NameResolverListener.this.helper == ManagedChannelImpl.this.lbHelper) { + Attributes.Builder attrBuilder = + effectiveAttrs.toBuilder().discard(InternalConfigSelector.KEY); + Map healthCheckingConfig = + effectiveServiceConfig.getHealthCheckingConfig(); + if (healthCheckingConfig != null) { + attrBuilder + .set(LoadBalancer.ATTR_HEALTH_CHECKING_CONFIG, healthCheckingConfig) + .build(); + } + Attributes attributes = attrBuilder.build(); + + return helper.lb.tryAcceptResolvedAddresses( + ResolvedAddresses.newBuilder() + .setAddresses(servers) + .setAttributes(attributes) + .setLoadBalancingPolicyConfig(effectiveServiceConfig.getLoadBalancingConfig()) + .build()); + } + return Status.OK; } @Override diff --git a/core/src/main/java/io/grpc/internal/RetryingNameResolver.java b/core/src/main/java/io/grpc/internal/RetryingNameResolver.java index 6d806e95944..6dcfcd3534a 100644 --- a/core/src/main/java/io/grpc/internal/RetryingNameResolver.java +++ b/core/src/main/java/io/grpc/internal/RetryingNameResolver.java @@ -95,12 +95,24 @@ public void onResult(ResolutionResult resolutionResult) { "RetryingNameResolver can only be used once to wrap a NameResolver"); } + // To have retry behavior for name resolvers that haven't migrated to onResult2. delegateListener.onResult(resolutionResult.toBuilder().setAttributes( resolutionResult.getAttributes().toBuilder() .set(RESOLUTION_RESULT_LISTENER_KEY, new ResolutionResultListener()).build()) .build()); } + @Override + public Status onResult2(ResolutionResult resolutionResult) { + Status status = delegateListener.onResult2(resolutionResult); + if (status.isOk()) { + retryScheduler.reset(); + } else { + retryScheduler.schedule(new DelayedNameResolverRefresh()); + } + return status; + } + @Override public void onError(Status error) { delegateListener.onError(error); diff --git a/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java b/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java index 14d3fddd290..0512171f4e7 100644 --- a/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java +++ b/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java @@ -26,7 +26,6 @@ import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.isA; -import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -226,13 +225,7 @@ public void setUp() { System.getProperty(DnsNameResolver.NETWORKADDRESS_CACHE_TTL_PROPERTY); // By default the mock listener processes the result successfully. - doAnswer(invocation -> { - ResolutionResult result = invocation.getArgument(0); - syncContext.execute( - () -> result.getAttributes().get(RetryingNameResolver.RESOLUTION_RESULT_LISTENER_KEY) - .resolutionAttempted(Status.OK)); - return null; - }).when(mockListener).onResult(isA(ResolutionResult.class)); + when(mockListener.onResult2(isA(ResolutionResult.class))).thenReturn(Status.OK); } @After @@ -319,13 +312,13 @@ private void resolveNeverCache(boolean isAndroid) throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); assertAnswerMatches(answer1, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); resolver.refresh(); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener, times(2)).onResult(resultCaptor.capture()); + verify(mockListener, times(2)).onResult2(resultCaptor.capture()); assertAnswerMatches(answer2, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -347,7 +340,7 @@ public void testExecutor_default() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); assertAnswerMatches(answer, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -389,7 +382,7 @@ public void execute(Runnable command) { resolver.start(mockListener); assertEquals(0, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); assertAnswerMatches(answer, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -418,7 +411,7 @@ public void resolve_cacheForever() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); assertAnswerMatches(answer1, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); @@ -452,7 +445,7 @@ public void resolve_usingCache() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); assertAnswerMatches(answer, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); @@ -487,14 +480,14 @@ public void resolve_cacheExpired() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); assertAnswerMatches(answer1, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); fakeTicker.advance(ttl + 1, TimeUnit.SECONDS); resolver.refresh(); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener, times(2)).onResult(resultCaptor.capture()); + verify(mockListener, times(2)).onResult2(resultCaptor.capture()); assertAnswerMatches(answer2, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -531,7 +524,7 @@ private void resolveDefaultValue() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); assertAnswerMatches(answer1, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); @@ -544,7 +537,7 @@ private void resolveDefaultValue() throws Exception { fakeTicker.advance(1, TimeUnit.SECONDS); resolver.refresh(); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener, times(2)).onResult(resultCaptor.capture()); + verify(mockListener, times(2)).onResult2(resultCaptor.capture()); assertAnswerMatches(answer2, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -575,7 +568,7 @@ public List resolveAddress(String host) throws Exception { assertThat(fakeExecutor.runDueTasks()).isEqualTo(1); ArgumentCaptor ac = ArgumentCaptor.forClass(ResolutionResult.class); - verify(mockListener).onResult(ac.capture()); + verify(mockListener).onResult2(ac.capture()); verifyNoMoreInteractions(mockListener); assertThat(ac.getValue().getAddresses()).isEmpty(); assertThat(ac.getValue().getServiceConfig()).isNull(); @@ -588,12 +581,7 @@ public List resolveAddress(String host) throws Exception { // Load balancer rejects the empty addresses. @Test public void resolve_emptyResult_notAccepted() throws Exception { - doAnswer(invocation -> { - ResolutionResult result = invocation.getArgument(0); - result.getAttributes().get(RetryingNameResolver.RESOLUTION_RESULT_LISTENER_KEY) - .resolutionAttempted(Status.UNAVAILABLE); - return null; - }).when(mockListener).onResult(isA(ResolutionResult.class)); + when(mockListener.onResult2(isA(ResolutionResult.class))).thenReturn(Status.UNAVAILABLE); DnsNameResolver.enableTxt = true; RetryingNameResolver resolver = newResolver("dns:///addr.fake:1234", 443); @@ -614,7 +602,7 @@ public List resolveAddress(String host) throws Exception { syncContext.execute(() -> assertThat(fakeExecutor.runDueTasks()).isEqualTo(1)); ArgumentCaptor ac = ArgumentCaptor.forClass(ResolutionResult.class); - verify(mockListener).onResult(ac.capture()); + verify(mockListener).onResult2(ac.capture()); verifyNoMoreInteractions(mockListener); assertThat(ac.getValue().getAddresses()).isEmpty(); assertThat(ac.getValue().getServiceConfig()).isNull(); @@ -640,7 +628,7 @@ public void resolve_nullResourceResolver() throws Exception { dnsResolver.setResourceResolver(null); resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -712,7 +700,7 @@ public ConfigOrError parseServiceConfig(Map rawServiceConfig) { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -770,7 +758,7 @@ public void resolve_serviceConfigLookupFails_nullServiceConfig() throws Exceptio dnsResolver.setResourceResolver(mockResourceResolver); resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -802,7 +790,7 @@ public void resolve_serviceConfigMalformed_serviceConfigError() throws Exception dnsResolver.setResourceResolver(mockResourceResolver); resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -870,7 +858,7 @@ public HttpConnectProxiedSocketAddress proxyFor(SocketAddress targetAddress) { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); List result = resultCaptor.getValue().getAddresses(); assertThat(result).hasSize(1); EquivalentAddressGroup eag = result.get(0); diff --git a/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java b/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java index 1d6492f791c..4d42056b689 100644 --- a/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java +++ b/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java @@ -1054,6 +1054,79 @@ public void noMoreCallbackAfterLoadBalancerShutdown() { verifyNoMoreInteractions(mockLoadBalancer); } + @Test + public void noMoreCallbackAfterLoadBalancerShutdown_configError() throws InterruptedException { + FakeNameResolverFactory nameResolverFactory = + new FakeNameResolverFactory.Builder(expectedUri) + .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) + .build(); + channelBuilder.nameResolverFactory(nameResolverFactory); + Status resolutionError = Status.UNAVAILABLE.withDescription("Resolution failed"); + createChannel(); + + FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.get(0); + verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); + verify(mockLoadBalancer).acceptResolvedAddresses(resolvedAddressCaptor.capture()); + assertThat(resolvedAddressCaptor.getValue().getAddresses()).containsExactly(addressGroup); + + SubchannelStateListener stateListener1 = mock(SubchannelStateListener.class); + SubchannelStateListener stateListener2 = mock(SubchannelStateListener.class); + Subchannel subchannel1 = + createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, stateListener1); + Subchannel subchannel2 = + createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, stateListener2); + requestConnectionSafely(helper, subchannel1); + requestConnectionSafely(helper, subchannel2); + verify(mockTransportFactory, times(2)) + .newClientTransport( + any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); + MockClientTransportInfo transportInfo1 = transports.poll(); + MockClientTransportInfo transportInfo2 = transports.poll(); + + // LoadBalancer receives all sorts of callbacks + transportInfo1.listener.transportReady(); + + verify(stateListener1, times(2)).onSubchannelState(stateInfoCaptor.capture()); + assertSame(CONNECTING, stateInfoCaptor.getAllValues().get(0).getState()); + assertSame(READY, stateInfoCaptor.getAllValues().get(1).getState()); + + verify(stateListener2).onSubchannelState(stateInfoCaptor.capture()); + assertSame(CONNECTING, stateInfoCaptor.getValue().getState()); + + resolver.listener.onError(resolutionError); + verify(mockLoadBalancer).handleNameResolutionError(resolutionError); + + verifyNoMoreInteractions(mockLoadBalancer); + + channel.shutdown(); + verify(mockLoadBalancer).shutdown(); + verifyNoMoreInteractions(stateListener1, stateListener2); + + // LoadBalancer will normally shutdown all subchannels + shutdownSafely(helper, subchannel1); + shutdownSafely(helper, subchannel2); + + // Since subchannels are shutdown, SubchannelStateListeners will only get SHUTDOWN regardless of + // the transport states. + transportInfo1.listener.transportShutdown(Status.UNAVAILABLE); + transportInfo2.listener.transportReady(); + verify(stateListener1).onSubchannelState(ConnectivityStateInfo.forNonError(SHUTDOWN)); + verify(stateListener2).onSubchannelState(ConnectivityStateInfo.forNonError(SHUTDOWN)); + verifyNoMoreInteractions(stateListener1, stateListener2); + + // No more callback should be delivered to LoadBalancer after it's shut down + resolver.listener.onResult( + ResolutionResult.newBuilder() + .setAddresses(new ArrayList<>()) + .setServiceConfig( + ConfigOrError.fromError(Status.UNAVAILABLE.withDescription("Resolution failed"))) + .build()); + Thread.sleep(1100); + assertThat(timer.getPendingTasks()).isEmpty(); + resolver.resolved(); + verifyNoMoreInteractions(mockLoadBalancer); + } + @Test public void interceptor() throws Exception { final AtomicLong atomic = new AtomicLong(); @@ -3138,6 +3211,48 @@ public void channelTracing_nameResolvedEvent_zeorAndNonzeroBackends() throws Exc assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); } + @Test + public void channelTracing_nameResolvedEvent_zeorAndNonzeroBackends_usesListener2onResult2() + throws Exception { + timer.forwardNanos(1234); + channelBuilder.maxTraceEvents(10); + List servers = new ArrayList<>(); + servers.add(new EquivalentAddressGroup(socketAddress)); + FakeNameResolverFactory nameResolverFactory = + new FakeNameResolverFactory.Builder(expectedUri).setServers(servers).build(); + channelBuilder.nameResolverFactory(nameResolverFactory); + createChannel(); + + int prevSize = getStats(channel).channelTrace.events.size(); + ResolutionResult resolutionResult1 = ResolutionResult.newBuilder() + .setAddresses(Collections.singletonList( + new EquivalentAddressGroup( + Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) + .build(); + + channel.syncContext.execute( + () -> nameResolverFactory.resolvers.get(0).listener.onResult2(resolutionResult1)); + assertThat(getStats(channel).channelTrace.events).hasSize(prevSize); + + prevSize = getStats(channel).channelTrace.events.size(); + nameResolverFactory.resolvers.get(0).listener.onError(Status.INTERNAL); + assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); + + prevSize = getStats(channel).channelTrace.events.size(); + nameResolverFactory.resolvers.get(0).listener.onError(Status.INTERNAL); + assertThat(getStats(channel).channelTrace.events).hasSize(prevSize); + + prevSize = getStats(channel).channelTrace.events.size(); + ResolutionResult resolutionResult2 = ResolutionResult.newBuilder() + .setAddresses(Collections.singletonList( + new EquivalentAddressGroup( + Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) + .build(); + channel.syncContext.execute( + () -> nameResolverFactory.resolvers.get(0).listener.onResult2(resolutionResult2)); + assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); + } + @Test public void channelTracing_serviceConfigChange() throws Exception { timer.forwardNanos(1234); @@ -3197,6 +3312,69 @@ public void channelTracing_serviceConfigChange() throws Exception { .build()); } + @Test + public void channelTracing_serviceConfigChange_usesListener2OnResult2() throws Exception { + timer.forwardNanos(1234); + channelBuilder.maxTraceEvents(10); + List servers = new ArrayList<>(); + servers.add(new EquivalentAddressGroup(socketAddress)); + FakeNameResolverFactory nameResolverFactory = + new FakeNameResolverFactory.Builder(expectedUri).setServers(servers).build(); + channelBuilder.nameResolverFactory(nameResolverFactory); + createChannel(); + + int prevSize = getStats(channel).channelTrace.events.size(); + ManagedChannelServiceConfig mcsc1 = createManagedChannelServiceConfig( + ImmutableMap.of(), + new PolicySelection( + mockLoadBalancerProvider, null)); + ResolutionResult resolutionResult1 = ResolutionResult.newBuilder() + .setAddresses(Collections.singletonList( + new EquivalentAddressGroup( + Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) + .setServiceConfig(ConfigOrError.fromConfig(mcsc1)) + .build(); + + channel.syncContext.execute(() -> + nameResolverFactory.resolvers.get(0).listener.onResult2(resolutionResult1)); + assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); + assertThat(getStats(channel).channelTrace.events.get(prevSize)) + .isEqualTo(new ChannelTrace.Event.Builder() + .setDescription("Service config changed") + .setSeverity(ChannelTrace.Event.Severity.CT_INFO) + .setTimestampNanos(timer.getTicker().read()) + .build()); + + prevSize = getStats(channel).channelTrace.events.size(); + ResolutionResult resolutionResult2 = ResolutionResult.newBuilder().setAddresses( + Collections.singletonList( + new EquivalentAddressGroup( + Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) + .setServiceConfig(ConfigOrError.fromConfig(mcsc1)) + .build(); + channel.syncContext.execute(() -> + nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult2)); + assertThat(getStats(channel).channelTrace.events).hasSize(prevSize); + + prevSize = getStats(channel).channelTrace.events.size(); + timer.forwardNanos(1234); + ResolutionResult resolutionResult3 = ResolutionResult.newBuilder() + .setAddresses(Collections.singletonList( + new EquivalentAddressGroup( + Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) + .setServiceConfig(ConfigOrError.fromConfig(ManagedChannelServiceConfig.empty())) + .build(); + channel.syncContext.execute(() -> + nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult3)); + assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); + assertThat(getStats(channel).channelTrace.events.get(prevSize)) + .isEqualTo(new ChannelTrace.Event.Builder() + .setDescription("Service config changed") + .setSeverity(ChannelTrace.Event.Severity.CT_INFO) + .setTimestampNanos(timer.getTicker().read()) + .build()); + } + @Test public void channelTracing_stateChangeEvent() throws Exception { channelBuilder.maxTraceEvents(10); @@ -3857,6 +4035,120 @@ public ClientTransportFactory buildClientTransportFactory() { mychannel.shutdownNow(); } + @Test + public void badServiceConfigIsRecoverable_usesListener2OnResult2() throws Exception { + final List addresses = + ImmutableList.of(new EquivalentAddressGroup(new SocketAddress() {})); + final class FakeNameResolver extends NameResolver { + Listener2 listener; + private final SynchronizationContext syncContext; + + FakeNameResolver(Args args) { + this.syncContext = args.getSynchronizationContext(); + } + + @Override + public String getServiceAuthority() { + return "also fake"; + } + + @Override + public void start(Listener2 listener) { + this.listener = listener; + syncContext.execute(() -> + listener.onResult2( + ResolutionResult.newBuilder() + .setAddresses(addresses) + .setServiceConfig( + ConfigOrError.fromError( + Status.INTERNAL.withDescription("kaboom is invalid"))) + .build())); + } + + @Override + public void shutdown() {} + } + + final class FakeNameResolverFactory2 extends NameResolver.Factory { + FakeNameResolver resolver; + ManagedChannelImpl managedChannel; + SynchronizationContext syncContext; + + @Nullable + @Override + public NameResolver newNameResolver(URI targetUri, NameResolver.Args args) { + syncContext = args.getSynchronizationContext(); + return (resolver = new FakeNameResolver(args)); + } + + @Override + public String getDefaultScheme() { + return "fake"; + } + } + + FakeNameResolverFactory2 factory = new FakeNameResolverFactory2(); + + ManagedChannelImplBuilder customBuilder = new ManagedChannelImplBuilder(TARGET, + new ClientTransportFactoryBuilder() { + @Override + public ClientTransportFactory buildClientTransportFactory() { + return mockTransportFactory; + } + }, + null); + when(mockTransportFactory.getSupportedSocketAddressTypes()).thenReturn(Collections.singleton( + InetSocketAddress.class)); + customBuilder.executorPool = executorPool; + customBuilder.channelz = channelz; + ManagedChannel mychannel = customBuilder.nameResolverFactory(factory).build(); + + ClientCall call1 = + mychannel.newCall(TestMethodDescriptors.voidMethod(), CallOptions.DEFAULT); + ListenableFuture future1 = ClientCalls.futureUnaryCall(call1, null); + executor.runDueTasks(); + try { + future1.get(1, TimeUnit.SECONDS); + Assert.fail(); + } catch (ExecutionException e) { + assertThat(Throwables.getStackTraceAsString(e.getCause())).contains("kaboom"); + } + + // ok the service config is bad, let's fix it. + Map rawServiceConfig = + parseConfig("{\"loadBalancingConfig\": [{\"round_robin\": {}}]}"); + Object fakeLbConfig = new Object(); + PolicySelection lbConfigs = + new PolicySelection( + mockLoadBalancerProvider, fakeLbConfig); + mockLoadBalancerProvider.parseLoadBalancingPolicyConfig(rawServiceConfig); + ManagedChannelServiceConfig managedChannelServiceConfig = + createManagedChannelServiceConfig(rawServiceConfig, lbConfigs); + factory.syncContext.execute(() -> + factory.resolver.listener.onResult2( + ResolutionResult.newBuilder() + .setAddresses(addresses) + .setServiceConfig(ConfigOrError.fromConfig(managedChannelServiceConfig)) + .build())); + + ClientCall call2 = mychannel.newCall( + TestMethodDescriptors.voidMethod(), + CallOptions.DEFAULT.withDeadlineAfter(5, TimeUnit.SECONDS)); + ListenableFuture future2 = ClientCalls.futureUnaryCall(call2, null); + + timer.forwardTime(1234, TimeUnit.SECONDS); + + executor.runDueTasks(); + try { + future2.get(); + Assert.fail(); + } catch (ExecutionException e) { + assertThat(Throwables.getStackTraceAsString(e.getCause())).contains("deadline"); + } + + mychannel.shutdownNow(); + } + @Test public void nameResolverArgsPropagation() { final AtomicReference capturedArgs = new AtomicReference<>(); @@ -4518,7 +4810,7 @@ public NameResolver newNameResolver(final URI targetUri, NameResolver.Args args) } assertEquals(DEFAULT_PORT, args.getDefaultPort()); FakeNameResolverFactory.FakeNameResolver resolver = - new FakeNameResolverFactory.FakeNameResolver(targetUri, error); + new FakeNameResolverFactory.FakeNameResolver(targetUri, error, args); resolvers.add(resolver); return resolver; } @@ -4546,14 +4838,16 @@ void allResolved() { final class FakeNameResolver extends NameResolver { final URI targetUri; + final SynchronizationContext syncContext; Listener2 listener; boolean shutdown; int refreshCalled; Status error; - FakeNameResolver(URI targetUri, Status error) { + FakeNameResolver(URI targetUri, Status error, Args args) { this.targetUri = targetUri; this.error = error; + syncContext = args.getSynchronizationContext(); } @Override public String getServiceAuthority() { @@ -4585,7 +4879,7 @@ void resolved() { if (configOrError != null) { builder.setServiceConfig(configOrError); } - listener.onResult(builder.build()); + syncContext.execute(() -> listener.onResult(builder.build())); } @Override public void shutdown() { diff --git a/core/src/test/java/io/grpc/internal/RetryingNameResolverTest.java b/core/src/test/java/io/grpc/internal/RetryingNameResolverTest.java index 8801f540394..6347416f0ca 100644 --- a/core/src/test/java/io/grpc/internal/RetryingNameResolverTest.java +++ b/core/src/test/java/io/grpc/internal/RetryingNameResolverTest.java @@ -21,6 +21,7 @@ import static org.mockito.ArgumentMatchers.isA; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import io.grpc.NameResolver; import io.grpc.NameResolver.Listener2; @@ -79,7 +80,7 @@ public void startAndShutdown() { // Make sure the ResolutionResultListener callback is added to the ResolutionResult attributes, // and the retry scheduler is reset since the name resolution was successful. @Test - public void onResult_sucess() { + public void onResult_success() { retryingNameResolver.start(mockListener); verify(mockNameResolver).start(listenerCaptor.capture()); @@ -94,6 +95,18 @@ public void onResult_sucess() { verify(mockRetryScheduler).reset(); } + @Test + public void onResult2_sucesss() { + when(mockListener.onResult2(isA(ResolutionResult.class))).thenReturn(Status.OK); + retryingNameResolver.start(mockListener); + verify(mockNameResolver).start(listenerCaptor.capture()); + + assertThat(listenerCaptor.getValue().onResult2(ResolutionResult.newBuilder().build())) + .isEqualTo(Status.OK); + + verify(mockRetryScheduler).reset(); + } + // Make sure the ResolutionResultListener callback is added to the ResolutionResult attributes, // and that a retry gets scheduled when the resolution results are rejected. @Test @@ -112,6 +125,19 @@ public void onResult_failure() { verify(mockRetryScheduler).schedule(isA(Runnable.class)); } + // Make sure that a retry gets scheduled when the resolution results are rejected. + @Test + public void onResult2_failure() { + when(mockListener.onResult2(isA(ResolutionResult.class))).thenReturn(Status.UNAVAILABLE); + retryingNameResolver.start(mockListener); + verify(mockNameResolver).start(listenerCaptor.capture()); + + assertThat(listenerCaptor.getValue().onResult2(ResolutionResult.newBuilder().build())) + .isEqualTo(Status.UNAVAILABLE); + + verify(mockRetryScheduler).schedule(isA(Runnable.class)); + } + // Wrapping a NameResolver more than once is a misconfiguration. @Test public void onResult_failure_doubleWrapped() { diff --git a/grpclb/src/test/java/io/grpc/grpclb/GrpclbNameResolverTest.java b/grpclb/src/test/java/io/grpc/grpclb/GrpclbNameResolverTest.java index 3e2cf22605f..c195a78e6f4 100644 --- a/grpclb/src/test/java/io/grpc/grpclb/GrpclbNameResolverTest.java +++ b/grpclb/src/test/java/io/grpc/grpclb/GrpclbNameResolverTest.java @@ -152,7 +152,7 @@ public List resolveSrv(String host) throws Exception { resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); assertThat(result.getAddresses()).isEmpty(); assertThat(result.getAttributes()).isEqualTo(Attributes.EMPTY); @@ -192,7 +192,7 @@ public ConfigOrError answer(InvocationOnMock invocation) { resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -225,7 +225,7 @@ public void resolve_nullResourceResolver() throws Exception { resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); assertThat(result.getAddresses()) .containsExactly( @@ -272,7 +272,7 @@ public void resolve_addressFailure_stillLookUpBalancersAndServiceConfig() throws resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); assertThat(result.getAddresses()).isEmpty(); EquivalentAddressGroup resolvedBalancerAddr = @@ -306,7 +306,7 @@ public void resolveAll_balancerLookupFails_stillLookUpServiceConfig() throws Exc resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = From 96a788a3499bc5d709d136418f026526272fe94c Mon Sep 17 00:00:00 2001 From: Sergii Tkachenko Date: Mon, 29 Jul 2024 12:18:18 -0400 Subject: [PATCH 13/53] xds: Envoy proto sync to 2024-07-06 (#11401) `envoyproxy/envoy`: Sync protos to the latest imported version https://github.com/envoyproxy/envoy/commit/ab911ac2ff971f805ec822ad4d4ff6b42a61cc7c (commit 2024-07-06, cl/651956889). Should be a noop, just a routine xDS proto update to make upcoming RLQS-related imports simpler. --- buildscripts/data-plane-api-no-envoy.patch | 50 -------- repositories.bzl | 9 +- xds/third_party/envoy/import.sh | 2 +- .../envoy/config/accesslog/v3/accesslog.proto | 1 + .../envoy/config/bootstrap/v3/bootstrap.proto | 17 ++- .../envoy/config/cluster/v3/cluster.proto | 46 ++++++-- .../config/cluster/v3/outlier_detection.proto | 16 ++- .../proto/envoy/config/core/v3/base.proto | 107 +++++++++++++++++- .../envoy/config/core/v3/config_source.proto | 8 +- .../envoy/config/core/v3/grpc_service.proto | 19 +++- .../envoy/config/core/v3/health_check.proto | 20 +++- .../proto/envoy/config/core/v3/protocol.proto | 38 +++++-- .../envoy/config/endpoint/v3/endpoint.proto | 6 + .../endpoint/v3/endpoint_components.proto | 5 +- .../config/endpoint/v3/load_report.proto | 29 ++++- .../envoy/config/listener/v3/listener.proto | 5 +- .../config/listener/v3/quic_config.proto | 13 ++- .../proto/envoy/config/rbac/v3/rbac.proto | 6 +- .../config/route/v3/route_components.proto | 15 ++- .../envoy/config/trace/v3/dynamic_ot.proto | 10 +- .../proto/envoy/config/trace/v3/zipkin.proto | 7 +- .../envoy/data/accesslog/v3/accesslog.proto | 5 +- .../filters/http/rbac/v3/rbac.proto | 10 +- .../v3/http_connection_manager.proto | 6 +- .../least_request/v3/least_request.proto | 39 ++++++- .../transport_sockets/tls/v3/common.proto | 34 +++++- .../transport_sockets/tls/v3/tls.proto | 18 +-- .../proto/envoy/type/matcher/v3/string.proto | 8 +- 28 files changed, 419 insertions(+), 130 deletions(-) delete mode 100644 buildscripts/data-plane-api-no-envoy.patch diff --git a/buildscripts/data-plane-api-no-envoy.patch b/buildscripts/data-plane-api-no-envoy.patch deleted file mode 100644 index 0c1eec60f1c..00000000000 --- a/buildscripts/data-plane-api-no-envoy.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 786c93ccaae9891338f098a5aba60e9987d78bd3 Mon Sep 17 00:00:00 2001 -From: "update-envoy[bot]" - <135279899+update-envoy[bot]@users.noreply.github.com> -Date: Mon, 17 Jun 2024 02:25:24 +0000 -Subject: [PATCH] bazel: `@envoy_api` should not depend on `@envoy` (#34759) - -The extra dependency was introduced in 65273b2a9b. pgv.patch is only -used by envoy_api, so just moving the file avoids the dependency. - -Signed-off-by: Eric Anderson - -Mirrored from https://github.com/envoyproxy/envoy @ 9fde867399cc7fcf97815995f8466f62172b26f6 ---- - bazel/pgv.patch | 13 +++++++++++++ - bazel/repositories.bzl | 2 +- - 2 files changed, 14 insertions(+), 1 deletion(-) - create mode 100644 bazel/pgv.patch - -diff --git a/bazel/pgv.patch b/bazel/pgv.patch -new file mode 100644 -index 000000000..81e25abfe ---- /dev/null -+++ b/bazel/pgv.patch -@@ -0,0 +1,13 @@ -+--- a/templates/cc/register.go 2023-06-22 14:25:05.776175085 +0000 -++++ b/templates/cc/register.go 2023-06-22 14:26:33.008090583 +0000 -+@@ -116,6 +116,10 @@ -+ func (fns CCFuncs) methodName(name interface{}) string { -+ nameStr := fmt.Sprintf("%s", name) -+ switch nameStr { -++ case "concept": -++ return "concept_" -++ case "requires": -++ return "requires_" -+ case "const": -+ return "const_" -+ case "inline": -diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl -index 3e24566a9..7813b0abd 100644 ---- a/bazel/repositories.bzl -+++ b/bazel/repositories.bzl -@@ -19,7 +19,7 @@ def api_dependencies(): - external_http_archive( - name = "com_envoyproxy_protoc_gen_validate", - patch_args = ["-p1"], -- patches = ["@envoy//bazel:pgv.patch"], -+ patches = ["@envoy_api//bazel:pgv.patch"], - ) - external_http_archive( - name = "com_google_googleapis", diff --git a/repositories.bzl b/repositories.bzl index 12dca04862b..c2be72c476f 100644 --- a/repositories.bzl +++ b/repositories.bzl @@ -130,14 +130,11 @@ def grpc_java_repositories(bzlmod = False): if not native.existing_rule("envoy_api"): http_archive( name = "envoy_api", - sha256 = "c4c9c43903e413924b0cb08e9747f3c3a0727ad221a3c446a326db32def18c60", - strip_prefix = "data-plane-api-1611a7304794e13efe2d26f8480a2d2473a528c5", + sha256 = "cb7cd388eaa297320d392c872ceb82571dee71f4b6f1c4546b0c0a399636f523", + strip_prefix = "data-plane-api-874e3aa8c3aa5086b6bffa2166e0e0077bb32f71", urls = [ - "https://storage.googleapis.com/grpc-bazel-mirror/github.com/envoyproxy/data-plane-api/archive/1611a7304794e13efe2d26f8480a2d2473a528c5.tar.gz", - "https://github.com/envoyproxy/data-plane-api/archive/1611a7304794e13efe2d26f8480a2d2473a528c5.tar.gz", + "https://github.com/envoyproxy/data-plane-api/archive/874e3aa8c3aa5086b6bffa2166e0e0077bb32f71.tar.gz", ], - patch_args = ["-p1"], - patches = ["@io_grpc_grpc_java//:buildscripts/data-plane-api-no-envoy.patch"], ) def com_google_protobuf(): diff --git a/xds/third_party/envoy/import.sh b/xds/third_party/envoy/import.sh index adc1e5e9e65..3eeb46cf664 100755 --- a/xds/third_party/envoy/import.sh +++ b/xds/third_party/envoy/import.sh @@ -17,7 +17,7 @@ set -e # import VERSION from the google internal copybara_version.txt for Envoy -VERSION=147e6b9523d8d2ae0d9d2205254d6e633644c6fe +VERSION=ab911ac2ff971f805ec822ad4d4ff6b42a61cc7c DOWNLOAD_URL="https://github.com/envoyproxy/envoy/archive/${VERSION}.tar.gz" DOWNLOAD_BASE_DIR="envoy-${VERSION}" SOURCE_PROTO_BASE_DIR="${DOWNLOAD_BASE_DIR}/api" diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/accesslog/v3/accesslog.proto b/xds/third_party/envoy/src/main/proto/envoy/config/accesslog/v3/accesslog.proto index fe3ba2bc97c..5599f8082d3 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/accesslog/v3/accesslog.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/accesslog/v3/accesslog.proto @@ -256,6 +256,7 @@ message ResponseFlagFilter { in: "OM" in: "DF" in: "DO" + in: "DR" } } }]; diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/bootstrap/v3/bootstrap.proto b/xds/third_party/envoy/src/main/proto/envoy/config/bootstrap/v3/bootstrap.proto index b5f36f273bc..94868f13432 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/bootstrap/v3/bootstrap.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/bootstrap/v3/bootstrap.proto @@ -41,7 +41,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 41] +// [#next-free-field: 42] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -411,6 +411,10 @@ message Bootstrap { // Optional gRPC async manager config. GrpcAsyncClientManagerConfig grpc_async_client_manager_config = 40; + + // Optional configuration for memory allocation manager. + // Memory releasing is only supported for `tcmalloc allocator `_. + MemoryAllocatorManager memory_allocator_manager = 41; } // Administration interface :ref:`operations documentation @@ -734,3 +738,14 @@ message CustomInlineHeader { // The type of the header that is expected to be set as the inline header. InlineHeaderType inline_header_type = 2 [(validate.rules).enum = {defined_only: true}]; } + +message MemoryAllocatorManager { + // Configures tcmalloc to perform background release of free memory in amount of bytes per ``memory_release_interval`` interval. + // If equals to ``0``, no memory release will occur. Defaults to ``0``. + uint64 bytes_to_release = 1; + + // Interval in milliseconds for memory releasing. If specified, during every + // interval Envoy will try to release ``bytes_to_release`` of free memory back to operating system for reuse. + // Defaults to 1000 milliseconds. + google.protobuf.Duration memory_release_interval = 2; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/cluster/v3/cluster.proto b/xds/third_party/envoy/src/main/proto/envoy/config/cluster/v3/cluster.proto index 9b847a33126..0074e63dff6 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/cluster/v3/cluster.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/cluster/v3/cluster.proto @@ -168,7 +168,7 @@ message Cluster { // The name of the match, used in stats generation. string name = 1 [(validate.rules).string = {min_len: 1}]; - // Optional endpoint metadata match criteria. + // Optional metadata match criteria. // The connection to the endpoint with metadata matching what is set in this field // will use the transport socket configuration specified here. // The endpoint's metadata entry in ``envoy.transport_socket_match`` is used to match @@ -754,12 +754,14 @@ message Cluster { reserved "hosts", "tls_context", "extension_protocol_options"; - // Configuration to use different transport sockets for different endpoints. - // The entry of ``envoy.transport_socket_match`` in the - // :ref:`LbEndpoint.Metadata ` - // is used to match against the transport sockets as they appear in the list. The first - // :ref:`match ` is used. - // For example, with the following match + // Configuration to use different transport sockets for different endpoints. The entry of + // ``envoy.transport_socket_match`` in the :ref:`LbEndpoint.Metadata + // ` is used to match against the + // transport sockets as they appear in the list. If a match is not found, the search continues in + // :ref:`LocalityLbEndpoints.Metadata + // `. The first :ref:`match + // ` is used. For example, with + // the following match // // .. code-block:: yaml // @@ -783,8 +785,9 @@ message Cluster { // socket match in case above. // // If an endpoint metadata's value under ``envoy.transport_socket_match`` does not match any - // ``TransportSocketMatch``, socket configuration fallbacks to use the ``tls_context`` or - // ``transport_socket`` specified in this cluster. + // ``TransportSocketMatch``, the locality metadata is then checked for a match. Barring any + // matches in the endpoint or locality metadata, the socket configuration fallbacks to use the + // ``tls_context`` or ``transport_socket`` specified in this cluster. // // This field allows gradual and flexible transport socket configuration changes. // @@ -1236,6 +1239,26 @@ message UpstreamConnectionOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.UpstreamConnectionOptions"; + enum FirstAddressFamilyVersion { + // respect the native ranking of destination ip addresses returned from dns + // resolution + DEFAULT = 0; + + V4 = 1; + + V6 = 2; + } + + message HappyEyeballsConfig { + // Specify the IP address family to attempt connection first in happy + // eyeballs algorithm according to RFC8305#section-4. + FirstAddressFamilyVersion first_address_family_version = 1; + + // Specify the number of addresses of the first_address_family_version being + // attempted for connection before the other address family. + google.protobuf.UInt32Value first_address_family_count = 2 [(validate.rules).uint32 = {gte: 1}]; + } + // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. core.v3.TcpKeepalive tcp_keepalive = 1; @@ -1243,6 +1266,11 @@ message UpstreamConnectionOptions { // This can be used by extensions during processing of requests. The association mechanism is // implementation specific. Defaults to false due to performance concerns. bool set_local_interface_name_on_upstream_connections = 2; + + // Configurations for happy eyeballs algorithm. + // Add configs for first_address_family_version and first_address_family_count + // when sorting destination ip addresses. + HappyEyeballsConfig happy_eyeballs_config = 3; } message TrackClusterStats { diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/cluster/v3/outlier_detection.proto b/xds/third_party/envoy/src/main/proto/envoy/config/cluster/v3/outlier_detection.proto index 11289e26b4f..822d81da850 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/cluster/v3/outlier_detection.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/cluster/v3/outlier_detection.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.cluster.v3; +import "envoy/config/core/v3/extension.proto"; + import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -19,7 +21,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // See the :ref:`architecture overview ` for // more information on outlier detection. -// [#next-free-field: 24] +// [#next-free-field: 26] message OutlierDetection { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.cluster.OutlierDetection"; @@ -40,8 +42,8 @@ message OutlierDetection { // Defaults to 30000ms or 30s. google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; - // The maximum % of an upstream cluster that can be ejected due to outlier - // detection. Defaults to 10% but will eject at least one host regardless of the value. + // The maximum % of an upstream cluster that can be ejected due to outlier detection. Defaults to 10% . + // Will eject at least one host regardless of the value if :ref:`always_eject_one_host` is enabled. google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status @@ -167,4 +169,12 @@ message OutlierDetection { // To change this default behavior set this config to ``false`` where active health checking will not uneject the host. // Defaults to true. google.protobuf.BoolValue successful_active_health_check_uneject_host = 23; + + // Set of host's passive monitors. + // [#not-implemented-hide:] + repeated core.v3.TypedExtensionConfig monitors = 24; + + // If enabled, at least one host is ejected regardless of the value of :ref:`max_ejection_percent`. + // Defaults to false. + google.protobuf.BoolValue always_eject_one_host = 25; } diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/base.proto b/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/base.proto index 97131e4b8c6..df91565d0a7 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/base.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/base.proto @@ -245,7 +245,8 @@ message Metadata { // :ref:`typed_filter_metadata ` // fields are present in the metadata with same keys, // only ``typed_filter_metadata`` field will be parsed. - map filter_metadata = 1; + map filter_metadata = 1 + [(validate.rules).map = {keys {string {min_len: 1}}}]; // Key is the reverse DNS filter name, e.g. com.acme.widget. The ``envoy.*`` // namespace is reserved for Envoy's built-in filters. @@ -253,7 +254,8 @@ message Metadata { // If both :ref:`filter_metadata ` // and ``typed_filter_metadata`` fields are present in the metadata with same keys, // only ``typed_filter_metadata`` field will be parsed. - map typed_filter_metadata = 2; + map typed_filter_metadata = 2 + [(validate.rules).map = {keys {string {min_len: 1}}}]; } // Runtime derived uint32 with a default when not specified. @@ -301,6 +303,59 @@ message RuntimeFeatureFlag { string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } +message KeyValue { + // The key of the key/value pair. + string key = 1 [(validate.rules).string = {min_len: 1 max_bytes: 16384}]; + + // The value of the key/value pair. + bytes value = 2; +} + +// Key/value pair plus option to control append behavior. This is used to specify +// key/value pairs that should be appended to a set of existing key/value pairs. +message KeyValueAppend { + // Describes the supported actions types for key/value pair append action. + enum KeyValueAppendAction { + // If the key already exists, this action will result in the following behavior: + // + // - Comma-concatenated value if multiple values are not allowed. + // - New value added to the list of values if multiple values are allowed. + // + // If the key doesn't exist then this will add pair with specified key and value. + APPEND_IF_EXISTS_OR_ADD = 0; + + // This action will add the key/value pair if it doesn't already exist. If the + // key already exists then this will be a no-op. + ADD_IF_ABSENT = 1; + + // This action will overwrite the specified value by discarding any existing + // values if the key already exists. If the key doesn't exist then this will add + // the pair with specified key and value. + OVERWRITE_IF_EXISTS_OR_ADD = 2; + + // This action will overwrite the specified value by discarding any existing + // values if the key already exists. If the key doesn't exist then this will + // be no-op. + OVERWRITE_IF_EXISTS = 3; + } + + // Key/value pair entry that this option to append or overwrite. + KeyValue entry = 1 [(validate.rules).message = {required: true}]; + + // Describes the action taken to append/overwrite the given value for an existing + // key or to only add this key if it's absent. + KeyValueAppendAction action = 2 [(validate.rules).enum = {defined_only: true}]; +} + +// Key/value pair to append or remove. +message KeyValueMutation { + // Key/value pair to append or overwrite. Only one of ``append`` or ``remove`` can be set. + KeyValueAppend append = 1; + + // Key to remove. Only one of ``append`` or ``remove`` can be set. + string remove = 2 [(validate.rules).string = {max_bytes: 16384}]; +} + // Query parameter name/value pair. message QueryParameter { // The key of the query parameter. Case sensitive. @@ -409,6 +464,7 @@ message WatchedDirectory { } // Data source consisting of a file, an inline value, or an environment variable. +// [#next-free-field: 6] message DataSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.DataSource"; @@ -427,12 +483,47 @@ message DataSource { // Environment variable data source. string environment_variable = 4 [(validate.rules).string = {min_len: 1}]; } + + // Watched directory that is watched for file changes. If this is set explicitly, the file + // specified in the ``filename`` field will be reloaded when relevant file move events occur. + // + // .. note:: + // This field only makes sense when the ``filename`` field is set. + // + // .. note:: + // Envoy only updates when the file is replaced by a file move, and not when the file is + // edited in place. + // + // .. note:: + // Not all use cases of ``DataSource`` support watching directories. It depends on the + // specific usage of the ``DataSource``. See the documentation of the parent message for + // details. + WatchedDirectory watched_directory = 5; } // The message specifies the retry policy of remote data source when fetching fails. +// [#next-free-field: 7] message RetryPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RetryPolicy"; + // See :ref:`RetryPriority `. + message RetryPriority { + string name = 1 [(validate.rules).string = {min_len: 1}]; + + oneof config_type { + google.protobuf.Any typed_config = 2; + } + } + + // See :ref:`RetryHostPredicate `. + message RetryHostPredicate { + string name = 1 [(validate.rules).string = {min_len: 1}]; + + oneof config_type { + google.protobuf.Any typed_config = 2; + } + } + // Specifies parameters that control :ref:`retry backoff strategy `. // This parameter is optional, in which case the default base interval is 1000 milliseconds. The // default maximum interval is 10 times the base interval. @@ -442,6 +533,18 @@ message RetryPolicy { // defaults to 1. google.protobuf.UInt32Value num_retries = 2 [(udpa.annotations.field_migrate).rename = "max_retries"]; + + // For details, see :ref:`retry_on `. + string retry_on = 3; + + // For details, see :ref:`retry_priority `. + RetryPriority retry_priority = 4; + + // For details, see :ref:`RetryHostPredicate `. + repeated RetryHostPredicate retry_host_predicate = 5; + + // For details, see :ref:`host_selection_retry_max_attempts `. + int64 host_selection_retry_max_attempts = 6; } // The message specifies how to fetch data from remote and how to verify it. diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/config_source.proto b/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/config_source.proto index 70204bad9eb..f0effd99e45 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/config_source.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/config_source.proto @@ -28,12 +28,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // xDS API and non-xDS services version. This is used to describe both resource and transport // protocol versions (in distinct configuration fields). enum ApiVersion { - // When not specified, we assume v2, to ease migration to Envoy's stable API - // versioning. If a client does not support v2 (e.g. due to deprecation), this - // is an invalid value. - AUTO = 0 [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.0"]; + // When not specified, we assume v3; it is the only supported version. + AUTO = 0; - // Use xDS v2 API. + // Use xDS v2 API. This is no longer supported. V2 = 1 [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.0"]; // Use xDS v3 API. diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/grpc_service.proto b/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/grpc_service.proto index f266c7bce5b..5fd7921a806 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/grpc_service.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/grpc_service.proto @@ -25,10 +25,11 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // gRPC service configuration. This is used by :ref:`ApiConfigSource // ` and filter configurations. -// [#next-free-field: 6] +// [#next-free-field: 7] message GrpcService { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService"; + // [#next-free-field: 6] message EnvoyGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.EnvoyGrpc"; @@ -49,6 +50,18 @@ message GrpcService { // Currently only supported for xDS gRPC streams. // If not set, xDS gRPC streams default base interval:500ms, maximum interval:30s will be applied. RetryPolicy retry_policy = 3; + + // Maximum gRPC message size that is allowed to be received. + // If a message over this limit is received, the gRPC stream is terminated with the RESOURCE_EXHAUSTED error. + // This limit is applied to individual messages in the streaming response and not the total size of streaming response. + // Defaults to 0, which means unlimited. + google.protobuf.UInt32Value max_receive_message_length = 4; + + // This provides gRPC client level control over envoy generated headers. + // If false, the header will be sent but it can be overridden by per stream option. + // If true, the header will be removed and can not be overridden by per stream option. + // Default to false. + bool skip_envoy_headers = 5; } // [#next-free-field: 9] @@ -300,4 +313,8 @@ message GrpcService { // documentation on :ref:`custom request headers // `. repeated HeaderValue initial_metadata = 5; + + // Optional default retry policy for streams toward the service. + // If an async stream doesn't have retry policy configured in its stream options, this retry policy is used. + RetryPolicy retry_policy = 6; } diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/health_check.proto b/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/health_check.proto index 2ec258d8ac0..821f042bbe6 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/health_check.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/health_check.proto @@ -5,6 +5,7 @@ package envoy.config.core.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/event_service_config.proto"; import "envoy/config/core/v3/extension.proto"; +import "envoy/config/core/v3/proxy_protocol.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/http.proto"; import "envoy/type/v3/range.proto"; @@ -62,7 +63,7 @@ message HealthStatusSet { [(validate.rules).repeated = {items {enum {defined_only: true}}}]; } -// [#next-free-field: 26] +// [#next-free-field: 27] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck"; @@ -95,12 +96,11 @@ message HealthCheck { // left empty (default value), the name of the cluster this health check is associated // with will be used. The host header can be customized for a specific endpoint by setting the // :ref:`hostname ` field. - string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE}]; // Specifies the HTTP path that will be requested during health checking. For example // ``/healthcheck``. - string path = 2 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; + string path = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE}]; // [#not-implemented-hide:] HTTP specific payload. Payload send = 3; @@ -178,6 +178,13 @@ message HealthCheck { // payload block must be found, and in the order specified, but not // necessarily contiguous. repeated Payload receive = 2; + + // When setting this value, it tries to attempt health check request with ProxyProtocol. + // When ``send`` is presented, they are sent after preceding ProxyProtocol header. + // Only ProxyProtocol header is sent when ``send`` is not presented. + // It allows to use both ProxyProtocol V1 and V2. In V1, it presents L3/L4. In V2, it includes + // LOCAL command and doesn't include L3/L4. + ProxyProtocolConfig proxy_protocol_config = 3; } message RedisHealthCheck { @@ -392,6 +399,11 @@ message HealthCheck { // The default value is false. bool always_log_health_check_failures = 19; + // If set to true, health check success events will always be logged. If set to false, only host addition event will be logged + // if it is the first successful health check, or if the healthy threshold is reached. + // The default value is false. + bool always_log_health_check_success = 26; + // This allows overriding the cluster TLS settings, just for health check connections. TlsOptions tls_options = 21; diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/protocol.proto b/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/protocol.proto index d128dc6d93d..e2c5863d784 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/protocol.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/core/v3/protocol.proto @@ -56,7 +56,7 @@ message QuicKeepAliveSettings { } // QUIC protocol options which apply to both downstream and upstream connections. -// [#next-free-field: 8] +// [#next-free-field: 9] message QuicProtocolOptions { // Maximum number of streams that the client can negotiate per connection. 100 // if not specified. @@ -64,7 +64,7 @@ message QuicProtocolOptions { // `Initial stream-level flow-control receive window // `_ size. Valid values range from - // 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 65536 (2^16). + // 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 16777216 (16 * 1024 * 1024). // // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use 16384 instead. // QUICHE IETF Quic implementation supports 1 bytes window. We only support increasing the default window size now, so it's also the minimum. @@ -76,8 +76,8 @@ message QuicProtocolOptions { [(validate.rules).uint32 = {lte: 16777216 gte: 1}]; // Similar to ``initial_stream_window_size``, but for connection-level - // flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults to 65536 (2^16). - // window. Currently, this has the same minimum/default as ``initial_stream_window_size``. + // flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults + // to 25165824 (24 * 1024 * 1024). // // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default // window size now, so it's also the minimum. @@ -102,6 +102,15 @@ message QuicProtocolOptions { // A comma-separated list of strings representing QUIC client connection options defined in // `QUICHE `_ and to be sent by upstream connections. string client_connection_options = 7; + + // The duration that a QUIC connection stays idle before it closes itself. If this field is not present, QUICHE + // default 600s will be applied. + // For internal corporate network, a long timeout is often fine. + // But for client facing network, 30s is usually a good choice. + google.protobuf.Duration idle_network_timeout = 8 [(validate.rules).duration = { + lte {seconds: 600} + gte {seconds: 1} + }]; } message UpstreamHttpProtocolOptions { @@ -477,10 +486,10 @@ message Http2ProtocolOptions { // Allows proxying Websocket and other upgrades over H2 connect. bool allow_connect = 5; - // [#not-implemented-hide:] Hiding until envoy has full metadata support. + // [#not-implemented-hide:] Hiding until Envoy has full metadata support. // Still under implementation. DO NOT USE. // - // Allows metadata. See [metadata + // Allows sending and receiving HTTP/2 METADATA frames. See [metadata // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more // information. bool allow_metadata = 6; @@ -609,7 +618,7 @@ message GrpcProtocolOptions { } // A message which allows using HTTP/3. -// [#next-free-field: 6] +// [#next-free-field: 7] message Http3ProtocolOptions { QuicProtocolOptions quic_protocol_options = 1; @@ -628,12 +637,27 @@ message Http3ProtocolOptions { // `_ // Note that HTTP/3 CONNECT is not yet an RFC. bool allow_extended_connect = 5 [(xds.annotations.v3.field_status).work_in_progress = true]; + + // [#not-implemented-hide:] Hiding until Envoy has full metadata support. + // Still under implementation. DO NOT USE. + // + // Allows sending and receiving HTTP/3 METADATA frames. See [metadata + // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more + // information. + bool allow_metadata = 6; } // A message to control transformations to the :scheme header message SchemeHeaderTransformation { oneof transformation { // Overwrite any Scheme header with the contents of this string. + // If set, takes precedence over match_upstream. string scheme_to_overwrite = 1 [(validate.rules).string = {in: "http" in: "https"}]; } + + // Set the Scheme header to match the upstream transport protocol. For example, should a + // request be sent to the upstream over TLS, the scheme header will be set to "https". Should the + // request be sent over plaintext, the scheme header will be set to "http". + // If scheme_to_overwrite is set, this field is not used. + bool match_upstream = 2; } diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/endpoint/v3/endpoint.proto b/xds/third_party/envoy/src/main/proto/envoy/config/endpoint/v3/endpoint.proto index 20939526eb5..894f68310a4 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/endpoint/v3/endpoint.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/endpoint/v3/endpoint.proto @@ -77,6 +77,12 @@ message ClusterLoadAssignment { // // Envoy supports only one element and will NACK if more than one element is present. // Other xDS-capable data planes will not necessarily have this limitation. + // + // In Envoy, this ``drop_overloads`` config can be overridden by a runtime key + // "load_balancing_policy.drop_overload_limit" setting. This runtime key can be set to + // any integer number between 0 and 100. 0 means drop 0%. 100 means drop 100%. + // When both ``drop_overloads`` config and "load_balancing_policy.drop_overload_limit" + // setting are in place, the min of these two wins. repeated DropOverload drop_overloads = 2; // Priority levels and localities are considered overprovisioned with this diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/endpoint/v3/endpoint_components.proto b/xds/third_party/envoy/src/main/proto/envoy/config/endpoint/v3/endpoint_components.proto index ebd2bb4c332..6673691105e 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/endpoint/v3/endpoint_components.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/endpoint/v3/endpoint_components.proto @@ -147,7 +147,7 @@ message LedsClusterLocalityConfig { // A group of endpoints belonging to a Locality. // One can have multiple LocalityLbEndpoints for a locality, but only if // they have different priorities. -// [#next-free-field: 9] +// [#next-free-field: 10] message LocalityLbEndpoints { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.LocalityLbEndpoints"; @@ -161,6 +161,9 @@ message LocalityLbEndpoints { // Identifies location of where the upstream hosts run. core.v3.Locality locality = 1; + // Metadata to provide additional information about the locality endpoints in aggregate. + core.v3.Metadata metadata = 9; + // The group of endpoints belonging to the locality specified. // [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be // deprecated and replaced by ``load_balancer_endpoints``.] diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/endpoint/v3/load_report.proto b/xds/third_party/envoy/src/main/proto/envoy/config/endpoint/v3/load_report.proto index 832fe83dbb0..fbd1d36d5d0 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/endpoint/v3/load_report.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/endpoint/v3/load_report.proto @@ -8,6 +8,8 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; +import "xds/annotations/v3/status.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -23,7 +25,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // These are stats Envoy reports to the management server at a frequency defined by // :ref:`LoadStatsResponse.load_reporting_interval`. // Stats per upstream region/zone and optionally per subzone. -// [#next-free-field: 9] +// [#next-free-field: 12] message UpstreamLocalityStats { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.UpstreamLocalityStats"; @@ -48,6 +50,31 @@ message UpstreamLocalityStats { // upstream endpoints in the locality. uint64 total_issued_requests = 8; + // The total number of connections in an established state at the time of the + // report. This field is aggregated over all the upstream endpoints in the + // locality. + // In Envoy, this information may be based on ``upstream_cx_active metric``. + // [#not-implemented-hide:] + uint64 total_active_connections = 9 [(xds.annotations.v3.field_status).work_in_progress = true]; + + // The total number of connections opened since the last report. + // This field is aggregated over all the upstream endpoints in the locality. + // In Envoy, this information may be based on ``upstream_cx_total`` metric + // compared to itself between start and end of an interval, i.e. + // ``upstream_cx_total``(now) - ``upstream_cx_total``(now - + // load_report_interval). + // [#not-implemented-hide:] + uint64 total_new_connections = 10 [(xds.annotations.v3.field_status).work_in_progress = true]; + + // The total number of connection failures since the last report. + // This field is aggregated over all the upstream endpoints in the locality. + // In Envoy, this information may be based on ``upstream_cx_connect_fail`` + // metric compared to itself between start and end of an interval, i.e. + // ``upstream_cx_connect_fail``(now) - ``upstream_cx_connect_fail``(now - + // load_report_interval). + // [#not-implemented-hide:] + uint64 total_fail_connections = 11 [(xds.annotations.v3.field_status).work_in_progress = true]; + // Stats for multi-dimensional load balancing. repeated EndpointLoadMetricStats load_metric_stats = 5; diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/listener/v3/listener.proto b/xds/third_party/envoy/src/main/proto/envoy/config/listener/v3/listener.proto index a1a3d82c1c8..9381d4eb7ac 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/listener/v3/listener.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/listener/v3/listener.proto @@ -53,7 +53,7 @@ message ListenerCollection { repeated xds.core.v3.CollectionEntry entries = 1; } -// [#next-free-field: 35] +// [#next-free-field: 36] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; @@ -387,6 +387,9 @@ message Listener { // Whether the listener should limit connections based upon the value of // :ref:`global_downstream_max_connections `. bool ignore_global_conn_limit = 31; + + // Whether the listener bypasses configured overload manager actions. + bool bypass_overload_manager = 35; } // A placeholder proto so that users can explicitly configure the standard diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/listener/v3/quic_config.proto b/xds/third_party/envoy/src/main/proto/envoy/config/listener/v3/quic_config.proto index 3a8ce2cd0a6..3ddebe900ef 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/listener/v3/quic_config.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/listener/v3/quic_config.proto @@ -24,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: QUIC listener config] // Configuration specific to the UDP QUIC listener. -// [#next-free-field: 10] +// [#next-free-field: 12] message QuicProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.QuicProtocolOptions"; @@ -72,9 +72,18 @@ message QuicProtocolOptions { core.v3.TypedExtensionConfig connection_id_generator_config = 8; // Configure the server's preferred address to advertise so that client can migrate to it. See :ref:`example ` which configures a pair of v4 and v6 preferred addresses. - // The current QUICHE implementation will advertise only one of the preferred IPv4 and IPv6 addresses based on the address family the client initially connects with, and only if the client is also QUICHE-based. + // The current QUICHE implementation will advertise only one of the preferred IPv4 and IPv6 addresses based on the address family the client initially connects with. // If not specified, Envoy will not advertise any server's preferred address. // [#extension-category: envoy.quic.server_preferred_address] core.v3.TypedExtensionConfig server_preferred_address_config = 9 [(xds.annotations.v3.field_status).work_in_progress = true]; + + // Configure the server to send transport parameter `disable_active_migration `_. + // Defaults to false (do not send this transport parameter). + google.protobuf.BoolValue send_disable_active_migration = 10; + + // Configure which implementation of ``quic::QuicConnectionDebugVisitor`` to be used for this listener. + // If not specified, no debug visitor will be attached to connections. + // [#extension-category: envoy.quic.connection_debug_visitor] + core.v3.TypedExtensionConfig connection_debug_visitor_config = 11; } diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/rbac/v3/rbac.proto b/xds/third_party/envoy/src/main/proto/envoy/config/rbac/v3/rbac.proto index 3a9271c0015..8d98fd7155d 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/rbac/v3/rbac.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/rbac/v3/rbac.proto @@ -194,7 +194,7 @@ message Policy { } // Permission defines an action (or actions) that a principal can take. -// [#next-free-field: 13] +// [#next-free-field: 14] message Permission { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Permission"; @@ -270,6 +270,10 @@ message Permission { // Extension for configuring custom matchers for RBAC. // [#extension-category: envoy.rbac.matchers] core.v3.TypedExtensionConfig matcher = 12; + + // URI template path matching. + // [#extension-category: envoy.path.match] + core.v3.TypedExtensionConfig uri_template = 13; } } diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/route/v3/route_components.proto b/xds/third_party/envoy/src/main/proto/envoy/config/route/v3/route_components.proto index 1e2b486d288..7e2ff33da5c 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/route/v3/route_components.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/route/v3/route_components.proto @@ -673,7 +673,7 @@ message RouteMatch { // :ref:`CorsPolicy in filter extension ` // as as alternative. // -// [#next-free-field: 13] +// [#next-free-field: 14] message CorsPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.CorsPolicy"; @@ -727,6 +727,10 @@ message CorsPolicy { // // More details refer to https://developer.chrome.com/blog/private-network-access-preflight. google.protobuf.BoolValue allow_private_network_access = 12; + + // Specifies if preflight requests not matching the configured allowed origin should be forwarded + // to the upstream. Default is true. + google.protobuf.BoolValue forward_not_matching_preflights = 13; } // [#next-free-field: 42] @@ -759,7 +763,8 @@ message RouteAction { // collected for the shadow cluster making this feature useful for testing. // // During shadowing, the host/authority header is altered such that ``-shadow`` is appended. This is - // useful for logging. For example, ``cluster1`` becomes ``cluster1-shadow``. + // useful for logging. For example, ``cluster1`` becomes ``cluster1-shadow``. This behavior can be + // disabled by setting ``disable_shadow_host_suffix_append`` to ``true``. // // .. note:: // @@ -768,7 +773,7 @@ message RouteAction { // .. note:: // // Shadowing doesn't support Http CONNECT and upgrades. - // [#next-free-field: 6] + // [#next-free-field: 7] message RequestMirrorPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.RequestMirrorPolicy"; @@ -814,6 +819,9 @@ message RouteAction { // Determines if the trace span should be sampled. Defaults to true. google.protobuf.BoolValue trace_sampled = 4; + + // Disables appending the ``-shadow`` suffix to the shadowed ``Host`` header. Defaults to ``false``. + bool disable_shadow_host_suffix_append = 6; } // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer @@ -1211,7 +1219,6 @@ message RouteAction { // :ref:`host_rewrite_path_regex `) // causes the original value of the host header, if any, to be appended to the // :ref:`config_http_conn_man_headers_x-forwarded-host` HTTP header if it is different to the last value appended. - // This can be disabled by setting the runtime guard ``envoy_reloadable_features_append_xfh_idempotent`` to false. bool append_x_forwarded_host = 38; // Specifies the upstream timeout for the route. If not specified, the default is 15s. This diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/trace/v3/dynamic_ot.proto b/xds/third_party/envoy/src/main/proto/envoy/config/trace/v3/dynamic_ot.proto index 35971f30dfb..d2664ef717e 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/trace/v3/dynamic_ot.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/trace/v3/dynamic_ot.proto @@ -33,11 +33,15 @@ message DynamicOtConfig { string library = 1 [ deprecated = true, (validate.rules).string = {min_len: 1}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true ]; // The configuration to use when creating a tracer from the given dynamic // library. - google.protobuf.Struct config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + google.protobuf.Struct config = 2 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; } diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/trace/v3/zipkin.proto b/xds/third_party/envoy/src/main/proto/envoy/config/trace/v3/zipkin.proto index a9aefef0c6d..2d8f3195c31 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/trace/v3/zipkin.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/trace/v3/zipkin.proto @@ -82,5 +82,10 @@ message ZipkinConfig { // If this is set to true, then the // :ref:`start_child_span of router ` // SHOULD be set to true also to ensure the correctness of trace chain. - bool split_spans_for_request = 7; + // + // Both this field and ``start_child_span`` are deprecated by the + // :ref:`spawn_upstream_span `. + // Please use that ``spawn_upstream_span`` field to control the span creation. + bool split_spans_for_request = 7 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; } diff --git a/xds/third_party/envoy/src/main/proto/envoy/data/accesslog/v3/accesslog.proto b/xds/third_party/envoy/src/main/proto/envoy/data/accesslog/v3/accesslog.proto index a247c08df30..2e02f1eb455 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/data/accesslog/v3/accesslog.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/data/accesslog/v3/accesslog.proto @@ -271,7 +271,7 @@ message AccessLogCommon { } // Flags indicating occurrences during request/response processing. -// [#next-free-field: 28] +// [#next-free-field: 29] message ResponseFlags { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags"; @@ -372,6 +372,9 @@ message ResponseFlags { // Indicates a DNS resolution failed. bool dns_resolution_failure = 27; + + // Indicates a downstream remote codec level reset was received on the stream + bool downstream_remote_reset = 28; } // Properties of a negotiated TLS connection. diff --git a/xds/third_party/envoy/src/main/proto/envoy/extensions/filters/http/rbac/v3/rbac.proto b/xds/third_party/envoy/src/main/proto/envoy/extensions/filters/http/rbac/v3/rbac.proto index eeb505a17fb..649869a255d 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/extensions/filters/http/rbac/v3/rbac.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/extensions/filters/http/rbac/v3/rbac.proto @@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#extension: envoy.filters.http.rbac] // RBAC filter config. -// [#next-free-field: 6] +// [#next-free-field: 8] message RBAC { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rbac.v2.RBAC"; @@ -34,6 +34,11 @@ message RBAC { config.rbac.v3.RBAC rules = 1 [(udpa.annotations.field_migrate).oneof_promotion = "rules_specifier"]; + // If specified, rules will emit stats with the given prefix. + // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with + // rules. + string rules_stat_prefix = 6; + // The match tree to use when resolving RBAC action for incoming requests. Requests do not // match any matcher will be denied. // If absent, no enforcing RBAC matcher will be applied. @@ -62,6 +67,9 @@ message RBAC { // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with // shadow rules. string shadow_rules_stat_prefix = 3; + + // If track_per_rule_stats is true, counters will be published for each rule and shadow rule. + bool track_per_rule_stats = 7; } message RBACPerRoute { diff --git a/xds/third_party/envoy/src/main/proto/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/xds/third_party/envoy/src/main/proto/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 7a92259eb43..9e7274daa53 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -37,7 +37,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 57] +// [#next-free-field: 58] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; @@ -887,6 +887,10 @@ message HttpConnectionManager { // will be ignored if the ``x-forwarded-port`` header has been set by any trusted proxy in front of Envoy. bool append_x_forwarded_port = 51; + // Append the :ref:`config_http_conn_man_headers_x-envoy-local-overloaded` HTTP header in the scenario where + // the Overload Manager has been triggered. + bool append_local_overload = 57; + // Whether the HCM will add ProxyProtocolFilterState to the Connection lifetime filter state. Defaults to ``true``. // This should be set to ``false`` in cases where Envoy's view of the downstream address may not correspond to the // actual client address, for example, if there's another proxy in front of the Envoy. diff --git a/xds/third_party/envoy/src/main/proto/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto b/xds/third_party/envoy/src/main/proto/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto index ebef61852e2..095f6075286 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto @@ -7,6 +7,7 @@ import "envoy/extensions/load_balancing_policies/common/v3/common.proto"; import "google/protobuf/wrappers.proto"; +import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; @@ -22,10 +23,34 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // This configuration allows the built-in LEAST_REQUEST LB policy to be configured via the LB policy // extension point. See the :ref:`load balancing architecture overview // ` for more information. -// [#next-free-field: 6] +// [#next-free-field: 7] message LeastRequest { + // Available methods for selecting the host set from which to return the host with the + // fewest active requests. + enum SelectionMethod { + // Return host with fewest requests from a set of ``choice_count`` randomly selected hosts. + // Best selection method for most scenarios. + N_CHOICES = 0; + + // Return host with fewest requests from all hosts. + // Useful in some niche use cases involving low request rates and one of: + // (example 1) low request limits on workloads, or (example 2) few hosts. + // + // Example 1: Consider a workload type that can only accept one connection at a time. + // If such workloads are deployed across many hosts, only a small percentage of those + // workloads have zero connections at any given time, and the rate of new connections is low, + // the ``FULL_SCAN`` method is more likely to select a suitable host than ``N_CHOICES``. + // + // Example 2: Consider a workload type that is only deployed on 2 hosts. With default settings, + // the ``N_CHOICES`` method will return the host with more active requests 25% of the time. + // If the request rate is sufficiently low, the behavior of always selecting the host with least + // requests as of the last metrics refresh may be preferable. + FULL_SCAN = 1; + } + // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. + // Only applies to the ``N_CHOICES`` selection method. google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; // The following formula is used to calculate the dynamic weights when hosts have different load @@ -61,8 +86,12 @@ message LeastRequest { common.v3.LocalityLbConfig locality_lb_config = 4; // [#not-implemented-hide:] - // Configuration for performing full scan on the list of hosts. - // If this configuration is set, when selecting the host a full scan on the list hosts will be - // used to select the one with least requests instead of using random choices. - google.protobuf.BoolValue enable_full_scan = 5; + // Unused. Replaced by the `selection_method` enum for better extensibility. + google.protobuf.BoolValue enable_full_scan = 5 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Method for selecting the host set from which to return the host with the fewest active requests. + // + // Defaults to ``N_CHOICES``. + SelectionMethod selection_method = 6 [(validate.rules).enum = {defined_only: true}]; } diff --git a/xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/tls/v3/common.proto b/xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/tls/v3/common.proto index d244adcdf54..c1a3f5b33b3 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/tls/v3/common.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/tls/v3/common.proto @@ -314,16 +314,32 @@ message SubjectAltNameMatcher { DNS = 2; URI = 3; IP_ADDRESS = 4; + OTHER_NAME = 5; } // Specification of type of SAN. Note that the default enum value is an invalid choice. SanType san_type = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; // Matcher for SAN value. + // + // The string matching for OTHER_NAME SAN values depends on their ASN.1 type: + // + // * OBJECT: Validated against its dotted numeric notation (e.g., "1.2.3.4") + // * BOOLEAN: Validated against strings "true" or "false" + // * INTEGER/ENUMERATED: Validated against a string containing the integer value + // * NULL: Validated against an empty string + // * Other types: Validated directly against the string value type.matcher.v3.StringMatcher matcher = 2 [(validate.rules).message = {required: true}]; + + // OID Value which is required if OTHER_NAME SAN type is used. + // For example, UPN OID is 1.3.6.1.4.1.311.20.2.3 + // (Reference: http://oid-info.com/get/1.3.6.1.4.1.311.20.2.3). + // + // If set for SAN types other than OTHER_NAME, it will be ignored. + string oid = 3; } -// [#next-free-field: 17] +// [#next-free-field: 18] message CertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CertificateValidationContext"; @@ -339,6 +355,9 @@ message CertificateValidationContext { ACCEPT_UNTRUSTED = 1; } + message SystemRootCerts { + } + reserved 4, 5; reserved "verify_subject_alt_name"; @@ -378,20 +397,23 @@ message CertificateValidationContext { // can be treated as trust anchor as well. It allows verification with building valid partial chain instead // of a full chain. // - // Only one of ``trusted_ca`` and ``ca_certificate_provider_instance`` may be specified. - // - // [#next-major-version: This field and watched_directory below should ideally be moved into a - // separate sub-message, since there's no point in specifying the latter field without this one.] + // If ``ca_certificate_provider_instance`` is set, it takes precedence over ``trusted_ca``. config.core.v3.DataSource trusted_ca = 1 [(udpa.annotations.field_migrate).oneof_promotion = "ca_cert_source"]; // Certificate provider instance for fetching TLS certificates. // - // Only one of ``trusted_ca`` and ``ca_certificate_provider_instance`` may be specified. + // If set, takes precedence over ``trusted_ca``. // [#not-implemented-hide:] CertificateProviderPluginInstance ca_certificate_provider_instance = 13 [(udpa.annotations.field_migrate).oneof_promotion = "ca_cert_source"]; + // Use system root certs for validation. + // If present, system root certs are used only if neither of the ``trusted_ca`` + // or ``ca_certificate_provider_instance`` fields are set. + // [#not-implemented-hide:] + SystemRootCerts system_root_certs = 17; + // If specified, updates of a file-based ``trusted_ca`` source will be triggered // by this watch. This allows explicit control over the path watched, by // default the parent directory of the filesystem path in ``trusted_ca`` is diff --git a/xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/tls/v3/tls.proto b/xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/tls/v3/tls.proto index f94889cfad0..9d465c97321 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/tls/v3/tls.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -248,11 +248,8 @@ message CommonTlsContext { // :ref:`Multiple TLS certificates ` can be associated with the // same context to allow both RSA and ECDSA certificates and support SNI-based selection. // - // Only one of ``tls_certificates``, ``tls_certificate_sds_secret_configs``, - // and ``tls_certificate_provider_instance`` may be used. - // [#next-major-version: These mutually exclusive fields should ideally be in a oneof, but it's - // not legal to put a repeated field in a oneof. In the next major version, we should rework - // this to avoid this problem.] + // If ``tls_certificate_provider_instance`` is set, this field is ignored. + // If this field is set, ``tls_certificate_sds_secret_configs`` is ignored. repeated TlsCertificate tls_certificates = 2; // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be @@ -261,17 +258,14 @@ message CommonTlsContext { // The same number and types of certificates as :ref:`tls_certificates ` // are valid in the the certificates fetched through this setting. // - // Only one of ``tls_certificates``, ``tls_certificate_sds_secret_configs``, - // and ``tls_certificate_provider_instance`` may be used. - // [#next-major-version: These mutually exclusive fields should ideally be in a oneof, but it's - // not legal to put a repeated field in a oneof. In the next major version, we should rework - // this to avoid this problem.] + // If ``tls_certificates`` or ``tls_certificate_provider_instance`` are set, this field + // is ignored. repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6; // Certificate provider instance for fetching TLS certs. // - // Only one of ``tls_certificates``, ``tls_certificate_sds_secret_configs``, - // and ``tls_certificate_provider_instance`` may be used. + // If this field is set, ``tls_certificates`` and ``tls_certificate_provider_instance`` + // are ignored. // [#not-implemented-hide:] CertificateProviderPluginInstance tls_certificate_provider_instance = 14; diff --git a/xds/third_party/envoy/src/main/proto/envoy/type/matcher/v3/string.proto b/xds/third_party/envoy/src/main/proto/envoy/type/matcher/v3/string.proto index 2df1bd37a6a..10033749acd 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/type/matcher/v3/string.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/type/matcher/v3/string.proto @@ -4,6 +4,8 @@ package envoy.type.matcher.v3; import "envoy/type/matcher/v3/regex.proto"; +import "xds/core/v3/extension.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -17,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: String matcher] // Specifies the way to match a string. -// [#next-free-field: 8] +// [#next-free-field: 9] message StringMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.StringMatcher"; @@ -61,6 +63,10 @@ message StringMatcher { // // * ``abc`` matches the value ``xyz.abc.def`` string contains = 7 [(validate.rules).string = {min_len: 1}]; + + // Use an extension as the matcher type. + // [#extension-category: envoy.string_matcher] + xds.core.v3.TypedExtensionConfig custom = 8; } // If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. This From 448ec4f37e6ade0b9e547fd50bece6a22a317bf3 Mon Sep 17 00:00:00 2001 From: Jiajing LU Date: Tue, 30 Jul 2024 23:46:01 +0800 Subject: [PATCH 14/53] xds: XdsClient should unsubscribe on last resource (#11264) Otherwise, the server will continue sending updates and if we re-subscribe to the last resource, the server won't re-send it. Also completely remove the per-type state, as it could only add confusion. --- .../grpc/xds/client/ControlPlaneClient.java | 10 +++- .../io/grpc/xds/client/XdsClientImpl.java | 2 +- .../grpc/xds/GrpcXdsClientImplTestBase.java | 46 ++++++++++++++++++- .../io/grpc/xds/GrpcXdsClientImplV3Test.java | 5 +- 4 files changed, 57 insertions(+), 6 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/client/ControlPlaneClient.java b/xds/src/main/java/io/grpc/xds/client/ControlPlaneClient.java index 761c10ede6a..3074d1120ad 100644 --- a/xds/src/main/java/io/grpc/xds/client/ControlPlaneClient.java +++ b/xds/src/main/java/io/grpc/xds/client/ControlPlaneClient.java @@ -152,8 +152,14 @@ void adjustResourceSubscription(XdsResourceType resourceType) { startRpcStream(); } Collection resources = resourceStore.getSubscribedResources(serverInfo, resourceType); - if (resources != null) { - adsStream.sendDiscoveryRequest(resourceType, resources); + if (resources == null) { + resources = Collections.emptyList(); + } + adsStream.sendDiscoveryRequest(resourceType, resources); + if (resources.isEmpty()) { + // The resource type no longer has subscribing resources; clean up references to it + versions.remove(resourceType); + adsStream.respNonces.remove(resourceType); } } diff --git a/xds/src/main/java/io/grpc/xds/client/XdsClientImpl.java b/xds/src/main/java/io/grpc/xds/client/XdsClientImpl.java index 969660bf7d4..79147cd9862 100644 --- a/xds/src/main/java/io/grpc/xds/client/XdsClientImpl.java +++ b/xds/src/main/java/io/grpc/xds/client/XdsClientImpl.java @@ -281,7 +281,7 @@ public void cancelXdsResourceWatch(XdsResourceType @SuppressWarnings("unchecked") public void run() { ResourceSubscriber subscriber = - (ResourceSubscriber) resourceSubscribers.get(type).get(resourceName);; + (ResourceSubscriber) resourceSubscribers.get(type).get(resourceName); subscriber.removeWatcher(watcher); if (!subscriber.isWatched()) { subscriber.cancelResourceWatch(); diff --git a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplTestBase.java b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplTestBase.java index fd276a849ce..6b04edcb9b8 100644 --- a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplTestBase.java +++ b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplTestBase.java @@ -133,6 +133,7 @@ import org.mockito.junit.MockitoJUnit; import org.mockito.junit.MockitoRule; import org.mockito.stubbing.Answer; +import org.mockito.verification.VerificationMode; /** * Tests for {@link XdsClientImpl}. @@ -2757,6 +2758,37 @@ public void edsResourceNotFound() { verifySubscribedResourcesMetadataSizes(0, 0, 0, 1); } + @Test + public void edsCleanupNonceAfterUnsubscription() { + Assume.assumeFalse(ignoreResourceDeletion()); + + // Suppose we have an EDS subscription A.1 + xdsClient.watchXdsResource(XdsEndpointResource.getInstance(), "A.1", edsResourceWatcher); + DiscoveryRpcCall call = resourceDiscoveryCalls.poll(); + assertThat(call).isNotNull(); + call.verifyRequest(EDS, "A.1", "", "", NODE); + + // EDS -> {A.1}, version 1 + List dropOverloads = ImmutableList.of(); + List endpointsV1 = ImmutableList.of(lbEndpointHealthy); + ImmutableMap resourcesV1 = ImmutableMap.of( + "A.1", Any.pack(mf.buildClusterLoadAssignment("A.1", endpointsV1, dropOverloads))); + call.sendResponse(EDS, resourcesV1.values().asList(), VERSION_1, "0000"); + // {A.1} -> ACK, version 1 + call.verifyRequest(EDS, "A.1", VERSION_1, "0000", NODE); + verify(edsResourceWatcher, times(1)).onChanged(any()); + + // trigger an EDS resource unsubscription. + xdsClient.cancelXdsResourceWatch(XdsEndpointResource.getInstance(), "A.1", edsResourceWatcher); + verifySubscribedResourcesMetadataSizes(0, 0, 0, 0); + call.verifyRequest(EDS, Arrays.asList(), VERSION_1, "0000", NODE); + + // When re-subscribing, the version and nonce were properly forgotten, so the request is the + // same as the initial request + xdsClient.watchXdsResource(XdsEndpointResource.getInstance(), "A.1", edsResourceWatcher); + call.verifyRequest(EDS, "A.1", "", "", NODE, Mockito.timeout(2000).times(2)); + } + @Test public void edsResponseErrorHandling_allResourcesFailedUnpack() { DiscoveryRpcCall call = startResourceWatcher(XdsEndpointResource.getInstance(), EDS_RESOURCE, @@ -3787,10 +3819,22 @@ protected abstract static class DiscoveryRpcCall { protected void verifyRequest( XdsResourceType type, List resources, String versionInfo, String nonce, - Node node) { + Node node, VerificationMode verificationMode) { throw new UnsupportedOperationException(); } + protected void verifyRequest( + XdsResourceType type, List resources, String versionInfo, String nonce, + Node node) { + verifyRequest(type, resources, versionInfo, nonce, node, Mockito.timeout(2000)); + } + + protected void verifyRequest( + XdsResourceType type, String resource, String versionInfo, String nonce, + Node node, VerificationMode verificationMode) { + verifyRequest(type, ImmutableList.of(resource), versionInfo, nonce, node, verificationMode); + } + protected void verifyRequest( XdsResourceType type, String resource, String versionInfo, String nonce, Node node) { verifyRequest(type, ImmutableList.of(resource), versionInfo, nonce, node); diff --git a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplV3Test.java b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplV3Test.java index 71d0895a252..2b2ce5cbd72 100644 --- a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplV3Test.java +++ b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplV3Test.java @@ -118,6 +118,7 @@ import org.mockito.ArgumentMatcher; import org.mockito.InOrder; import org.mockito.Mockito; +import org.mockito.verification.VerificationMode; /** * Tests for {@link XdsClientImpl} with protocol version v3. @@ -205,8 +206,8 @@ private DiscoveryRpcCallV3(StreamObserver requestObserver, @Override protected void verifyRequest( XdsResourceType type, List resources, String versionInfo, String nonce, - EnvoyProtoData.Node node) { - verify(requestObserver, Mockito.timeout(2000)).onNext(argThat(new DiscoveryRequestMatcher( + EnvoyProtoData.Node node, VerificationMode verificationMode) { + verify(requestObserver, verificationMode).onNext(argThat(new DiscoveryRequestMatcher( node.toEnvoyProtoNode(), versionInfo, resources, type.typeUrl(), nonce, null, null))); } From 0017c98f6b6b800b8515aa7799567b54830f8aea Mon Sep 17 00:00:00 2001 From: Sergii Tkachenko Date: Tue, 30 Jul 2024 15:17:49 -0400 Subject: [PATCH 15/53] xds: cncf/xds proto sync to 2024-07-24 (#11417) `cncf/xds`: Sync protos to the latest imported version cncf/xds@024c85f (commit 2024-07-23, cl/655545156). Should be a noop, just a routine xDS proto update to make upcoming RLQS-related imports simpler, see related #11401. Note that CEL is only added as a bazel dependency as now it's required to build cncf/xds. Actual third-party source import will be done in the follow up PR, where RLQS dependencies are added to the import scripts. --- MODULE.bazel | 2 ++ repositories.bzl | 15 ++++++++++++--- xds/third_party/xds/import.sh | 2 +- .../src/main/proto/udpa/annotations/migrate.proto | 2 +- .../main/proto/udpa/annotations/security.proto | 2 +- .../main/proto/udpa/annotations/sensitive.proto | 2 +- .../src/main/proto/udpa/annotations/status.proto | 2 +- .../main/proto/udpa/annotations/versioning.proto | 2 +- .../main/proto/xds/type/matcher/v3/string.proto | 7 ++++++- 9 files changed, 26 insertions(+), 10 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index b0c923c0c35..89ce334d270 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -47,6 +47,8 @@ IO_GRPC_GRPC_JAVA_ARTIFACTS = [ bazel_dep(name = "bazel_skylib", version = "1.7.1") bazel_dep(name = "googleapis", repo_name = "com_google_googleapis", version = "0.0.0-20240326-1c8d509c5") +# CEL Spec may be removed when cncf/xds MODULE is no longer using protobuf 27.x +bazel_dep(name = "cel-spec", repo_name = "dev_cel", version = "0.15.0") bazel_dep(name = "grpc", repo_name = "com_github_grpc_grpc", version = "1.56.3.bcr.1") bazel_dep(name = "grpc-proto", repo_name = "io_grpc_grpc_proto", version = "0.0.0-20240627-ec30f58") bazel_dep(name = "protobuf", repo_name = "com_google_protobuf", version = "23.1") diff --git a/repositories.bzl b/repositories.bzl index c2be72c476f..af3acc8ddcf 100644 --- a/repositories.bzl +++ b/repositories.bzl @@ -87,13 +87,22 @@ IO_GRPC_GRPC_JAVA_OVERRIDE_TARGETS = { def grpc_java_repositories(bzlmod = False): """Imports dependencies for grpc-java.""" + if not bzlmod and not native.existing_rule("dev_cel"): + http_archive( + name = "dev_cel", + strip_prefix = "cel-spec-0.15.0", + sha256 = "3ee09eb69dbe77722e9dee23dc48dc2cd9f765869fcf5ffb1226587c81791a0b", + urls = [ + "https://github.com/google/cel-spec/archive/refs/tags/v0.15.0.tar.gz", + ], + ) if not native.existing_rule("com_github_cncf_xds"): http_archive( name = "com_github_cncf_xds", - strip_prefix = "xds-e9ce68804cb4e64cab5a52e3c8baf840d4ff87b7", - sha256 = "0d33b83f8c6368954e72e7785539f0d272a8aba2f6e2e336ed15fd1514bc9899", + strip_prefix = "xds-024c85f92f20cab567a83acc50934c7f9711d124", + sha256 = "5f403aa681711500ca8e62387be3e37d971977db6e88616fc21862a406430649", urls = [ - "https://github.com/cncf/xds/archive/e9ce68804cb4e64cab5a52e3c8baf840d4ff87b7.tar.gz", + "https://github.com/cncf/xds/archive/024c85f92f20cab567a83acc50934c7f9711d124.tar.gz", ], ) if not bzlmod and not native.existing_rule("com_github_grpc_grpc"): diff --git a/xds/third_party/xds/import.sh b/xds/third_party/xds/import.sh index cda86d0368f..44f9ad12ed4 100755 --- a/xds/third_party/xds/import.sh +++ b/xds/third_party/xds/import.sh @@ -17,7 +17,7 @@ set -e # import VERSION from one of the google internal CLs -VERSION=e9ce68804cb4e64cab5a52e3c8baf840d4ff87b7 +VERSION=024c85f92f20cab567a83acc50934c7f9711d124 DOWNLOAD_URL="https://github.com/cncf/xds/archive/${VERSION}.tar.gz" DOWNLOAD_BASE_DIR="xds-${VERSION}" SOURCE_PROTO_BASE_DIR="${DOWNLOAD_BASE_DIR}" diff --git a/xds/third_party/xds/src/main/proto/udpa/annotations/migrate.proto b/xds/third_party/xds/src/main/proto/udpa/annotations/migrate.proto index 5289cb8a742..5f5f389b7d2 100644 --- a/xds/third_party/xds/src/main/proto/udpa/annotations/migrate.proto +++ b/xds/third_party/xds/src/main/proto/udpa/annotations/migrate.proto @@ -8,7 +8,7 @@ package udpa.annotations; import "google/protobuf/descriptor.proto"; -option go_package = "github.com/cncf/xds/go/annotations"; +option go_package = "github.com/cncf/xds/go/udpa/annotations"; // Magic number in this file derived from top 28bit of SHA256 digest of // "udpa.annotation.migrate". diff --git a/xds/third_party/xds/src/main/proto/udpa/annotations/security.proto b/xds/third_party/xds/src/main/proto/udpa/annotations/security.proto index 52801d30d1e..0ef919716da 100644 --- a/xds/third_party/xds/src/main/proto/udpa/annotations/security.proto +++ b/xds/third_party/xds/src/main/proto/udpa/annotations/security.proto @@ -10,7 +10,7 @@ import "udpa/annotations/status.proto"; import "google/protobuf/descriptor.proto"; -option go_package = "github.com/cncf/xds/go/annotations"; +option go_package = "github.com/cncf/xds/go/udpa/annotations"; // All annotations in this file are experimental and subject to change. Their // only consumer today is the Envoy APIs and SecuritAnnotationValidator protoc diff --git a/xds/third_party/xds/src/main/proto/udpa/annotations/sensitive.proto b/xds/third_party/xds/src/main/proto/udpa/annotations/sensitive.proto index ab822fb4884..c7d8af608be 100644 --- a/xds/third_party/xds/src/main/proto/udpa/annotations/sensitive.proto +++ b/xds/third_party/xds/src/main/proto/udpa/annotations/sensitive.proto @@ -8,7 +8,7 @@ package udpa.annotations; import "google/protobuf/descriptor.proto"; -option go_package = "github.com/cncf/xds/go/annotations"; +option go_package = "github.com/cncf/xds/go/udpa/annotations"; extend google.protobuf.FieldOptions { // Magic number is the 28 most significant bits in the sha256sum of "udpa.annotations.sensitive". diff --git a/xds/third_party/xds/src/main/proto/udpa/annotations/status.proto b/xds/third_party/xds/src/main/proto/udpa/annotations/status.proto index 76cfd4dcfef..5a90bde29c7 100644 --- a/xds/third_party/xds/src/main/proto/udpa/annotations/status.proto +++ b/xds/third_party/xds/src/main/proto/udpa/annotations/status.proto @@ -8,7 +8,7 @@ package udpa.annotations; import "google/protobuf/descriptor.proto"; -option go_package = "github.com/cncf/xds/go/annotations"; +option go_package = "github.com/cncf/xds/go/udpa/annotations"; // Magic number in this file derived from top 28bit of SHA256 digest of // "udpa.annotation.status". diff --git a/xds/third_party/xds/src/main/proto/udpa/annotations/versioning.proto b/xds/third_party/xds/src/main/proto/udpa/annotations/versioning.proto index dcb7c85fd4f..06df78d818f 100644 --- a/xds/third_party/xds/src/main/proto/udpa/annotations/versioning.proto +++ b/xds/third_party/xds/src/main/proto/udpa/annotations/versioning.proto @@ -8,7 +8,7 @@ package udpa.annotations; import "google/protobuf/descriptor.proto"; -option go_package = "github.com/cncf/xds/go/annotations"; +option go_package = "github.com/cncf/xds/go/udpa/annotations"; extend google.protobuf.MessageOptions { // Magic number derived from 0x78 ('x') 0x44 ('D') 0x53 ('S') diff --git a/xds/third_party/xds/src/main/proto/xds/type/matcher/v3/string.proto b/xds/third_party/xds/src/main/proto/xds/type/matcher/v3/string.proto index fdc598e174a..e58cb413e96 100644 --- a/xds/third_party/xds/src/main/proto/xds/type/matcher/v3/string.proto +++ b/xds/third_party/xds/src/main/proto/xds/type/matcher/v3/string.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package xds.type.matcher.v3; +import "xds/core/v3/extension.proto"; import "xds/type/matcher/v3/regex.proto"; import "validate/validate.proto"; @@ -14,7 +15,7 @@ option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; // [#protodoc-title: String matcher] // Specifies the way to match a string. -// [#next-free-field: 8] +// [#next-free-field: 9] message StringMatcher { oneof match_pattern { option (validate.required) = true; @@ -52,6 +53,10 @@ message StringMatcher { // // * *abc* matches the value *xyz.abc.def* string contains = 7 [(validate.rules).string = {min_len: 1}]; + + // Use an extension as the matcher type. + // [#extension-category: envoy.string_matcher] + xds.core.v3.TypedExtensionConfig custom = 8; } // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no From 0090a526d7eafd493005bf579934a4689979d6e9 Mon Sep 17 00:00:00 2001 From: Kannan J Date: Wed, 31 Jul 2024 11:13:59 +0530 Subject: [PATCH 16/53] Start 1.67.0 development cycle (#11416) --- MODULE.bazel | 2 +- build.gradle | 2 +- .../src/test/golden/TestDeprecatedService.java.txt | 2 +- compiler/src/test/golden/TestService.java.txt | 2 +- core/src/main/java/io/grpc/internal/GrpcUtil.java | 2 +- examples/android/clientcache/app/build.gradle | 10 +++++----- examples/android/helloworld/app/build.gradle | 8 ++++---- examples/android/routeguide/app/build.gradle | 8 ++++---- examples/android/strictmode/app/build.gradle | 8 ++++---- examples/build.gradle | 2 +- examples/example-alts/build.gradle | 2 +- examples/example-debug/build.gradle | 2 +- examples/example-debug/pom.xml | 4 ++-- examples/example-gauth/build.gradle | 2 +- examples/example-gauth/pom.xml | 4 ++-- examples/example-gcp-csm-observability/build.gradle | 2 +- examples/example-gcp-observability/build.gradle | 2 +- examples/example-hostname/build.gradle | 2 +- examples/example-hostname/pom.xml | 4 ++-- examples/example-jwt-auth/build.gradle | 2 +- examples/example-jwt-auth/pom.xml | 4 ++-- examples/example-oauth/build.gradle | 2 +- examples/example-oauth/pom.xml | 4 ++-- examples/example-opentelemetry/build.gradle | 2 +- examples/example-orca/build.gradle | 2 +- examples/example-reflection/build.gradle | 2 +- examples/example-servlet/build.gradle | 2 +- examples/example-tls/build.gradle | 2 +- examples/example-tls/pom.xml | 4 ++-- examples/example-xds/build.gradle | 2 +- examples/pom.xml | 4 ++-- 31 files changed, 51 insertions(+), 51 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 89ce334d270..2b5d85490f3 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -2,7 +2,7 @@ module( name = "grpc-java", compatibility_level = 0, repo_name = "io_grpc_grpc_java", - version = "1.66.0-SNAPSHOT", # CURRENT_GRPC_VERSION + version = "1.67.0-SNAPSHOT", # CURRENT_GRPC_VERSION ) # GRPC_DEPS_START diff --git a/build.gradle b/build.gradle index 76449ec0107..74cfacb800a 100644 --- a/build.gradle +++ b/build.gradle @@ -21,7 +21,7 @@ subprojects { apply plugin: "net.ltgt.errorprone" group = "io.grpc" - version = "1.66.0-SNAPSHOT" // CURRENT_GRPC_VERSION + version = "1.67.0-SNAPSHOT" // CURRENT_GRPC_VERSION repositories { maven { // The google mirror is less flaky than mavenCentral() diff --git a/compiler/src/test/golden/TestDeprecatedService.java.txt b/compiler/src/test/golden/TestDeprecatedService.java.txt index 5666abe8fda..75e9e0b47e0 100644 --- a/compiler/src/test/golden/TestDeprecatedService.java.txt +++ b/compiler/src/test/golden/TestDeprecatedService.java.txt @@ -8,7 +8,7 @@ import static io.grpc.MethodDescriptor.generateFullMethodName; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.66.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.67.0-SNAPSHOT)", comments = "Source: grpc/testing/compiler/test.proto") @io.grpc.stub.annotations.GrpcGenerated @java.lang.Deprecated diff --git a/compiler/src/test/golden/TestService.java.txt b/compiler/src/test/golden/TestService.java.txt index 52e2a772414..3852b6ee547 100644 --- a/compiler/src/test/golden/TestService.java.txt +++ b/compiler/src/test/golden/TestService.java.txt @@ -8,7 +8,7 @@ import static io.grpc.MethodDescriptor.generateFullMethodName; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.66.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.67.0-SNAPSHOT)", comments = "Source: grpc/testing/compiler/test.proto") @io.grpc.stub.annotations.GrpcGenerated public final class TestServiceGrpc { diff --git a/core/src/main/java/io/grpc/internal/GrpcUtil.java b/core/src/main/java/io/grpc/internal/GrpcUtil.java index fa488f30ef8..593bdbce13f 100644 --- a/core/src/main/java/io/grpc/internal/GrpcUtil.java +++ b/core/src/main/java/io/grpc/internal/GrpcUtil.java @@ -219,7 +219,7 @@ public byte[] parseAsciiString(byte[] serialized) { public static final Splitter ACCEPT_ENCODING_SPLITTER = Splitter.on(',').trimResults(); - public static final String IMPLEMENTATION_VERSION = "1.66.0-SNAPSHOT"; // CURRENT_GRPC_VERSION + public static final String IMPLEMENTATION_VERSION = "1.67.0-SNAPSHOT"; // CURRENT_GRPC_VERSION /** * The default timeout in nanos for a keepalive ping request. diff --git a/examples/android/clientcache/app/build.gradle b/examples/android/clientcache/app/build.gradle index 64e95de4738..0ca032fb0e4 100644 --- a/examples/android/clientcache/app/build.gradle +++ b/examples/android/clientcache/app/build.gradle @@ -34,7 +34,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -54,12 +54,12 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' testImplementation 'junit:junit:4.13.2' testImplementation 'com.google.truth:truth:1.1.5' - testImplementation 'io.grpc:grpc-testing:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + testImplementation 'io.grpc:grpc-testing:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION } diff --git a/examples/android/helloworld/app/build.gradle b/examples/android/helloworld/app/build.gradle index f9433f14010..0f1e8b4047b 100644 --- a/examples/android/helloworld/app/build.gradle +++ b/examples/android/helloworld/app/build.gradle @@ -32,7 +32,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -52,8 +52,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/android/routeguide/app/build.gradle b/examples/android/routeguide/app/build.gradle index 2431b473f29..c33135233ea 100644 --- a/examples/android/routeguide/app/build.gradle +++ b/examples/android/routeguide/app/build.gradle @@ -32,7 +32,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -52,8 +52,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/android/strictmode/app/build.gradle b/examples/android/strictmode/app/build.gradle index 699c8dd9d68..e8e2e8cac29 100644 --- a/examples/android/strictmode/app/build.gradle +++ b/examples/android/strictmode/app/build.gradle @@ -33,7 +33,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -53,8 +53,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/build.gradle b/examples/build.gradle index c9213cc6a21..076e0c4a25b 100644 --- a/examples/build.gradle +++ b/examples/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' def protocVersion = protobufVersion diff --git a/examples/example-alts/build.gradle b/examples/example-alts/build.gradle index 06b7ac501d0..3c998586bb6 100644 --- a/examples/example-alts/build.gradle +++ b/examples/example-alts/build.gradle @@ -24,7 +24,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-debug/build.gradle b/examples/example-debug/build.gradle index 624483f663e..ca151a13c1a 100644 --- a/examples/example-debug/build.gradle +++ b/examples/example-debug/build.gradle @@ -25,7 +25,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' dependencies { diff --git a/examples/example-debug/pom.xml b/examples/example-debug/pom.xml index 5aa8065ad31..10ccf834d86 100644 --- a/examples/example-debug/pom.xml +++ b/examples/example-debug/pom.xml @@ -6,13 +6,13 @@ jar - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT example-debug https://github.com/grpc/grpc-java UTF-8 - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT 3.25.3 1.8 diff --git a/examples/example-gauth/build.gradle b/examples/example-gauth/build.gradle index c43443c3860..40e72afad82 100644 --- a/examples/example-gauth/build.gradle +++ b/examples/example-gauth/build.gradle @@ -24,7 +24,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' def protocVersion = protobufVersion diff --git a/examples/example-gauth/pom.xml b/examples/example-gauth/pom.xml index d91eeb15ded..1e58e21e975 100644 --- a/examples/example-gauth/pom.xml +++ b/examples/example-gauth/pom.xml @@ -6,13 +6,13 @@ jar - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT example-gauth https://github.com/grpc/grpc-java UTF-8 - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT 3.25.3 1.8 diff --git a/examples/example-gcp-csm-observability/build.gradle b/examples/example-gcp-csm-observability/build.gradle index a24490918b5..5de2b1995e2 100644 --- a/examples/example-gcp-csm-observability/build.gradle +++ b/examples/example-gcp-csm-observability/build.gradle @@ -25,7 +25,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' def openTelemetryVersion = '1.40.0' def openTelemetryPrometheusVersion = '1.40.0-alpha' diff --git a/examples/example-gcp-observability/build.gradle b/examples/example-gcp-observability/build.gradle index d6dd1aedc6e..0462c987f52 100644 --- a/examples/example-gcp-observability/build.gradle +++ b/examples/example-gcp-observability/build.gradle @@ -25,7 +25,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-hostname/build.gradle b/examples/example-hostname/build.gradle index ee5e5cf5c70..ab45ee2dc5b 100644 --- a/examples/example-hostname/build.gradle +++ b/examples/example-hostname/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' dependencies { diff --git a/examples/example-hostname/pom.xml b/examples/example-hostname/pom.xml index 05131b89978..19b5f8b3c20 100644 --- a/examples/example-hostname/pom.xml +++ b/examples/example-hostname/pom.xml @@ -6,13 +6,13 @@ jar - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT example-hostname https://github.com/grpc/grpc-java UTF-8 - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT 3.25.3 1.8 diff --git a/examples/example-jwt-auth/build.gradle b/examples/example-jwt-auth/build.gradle index 2ad3c91f190..6fdd4498c7d 100644 --- a/examples/example-jwt-auth/build.gradle +++ b/examples/example-jwt-auth/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' def protocVersion = protobufVersion diff --git a/examples/example-jwt-auth/pom.xml b/examples/example-jwt-auth/pom.xml index 01cf0edce28..ad530e33aa7 100644 --- a/examples/example-jwt-auth/pom.xml +++ b/examples/example-jwt-auth/pom.xml @@ -7,13 +7,13 @@ jar - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT example-jwt-auth https://github.com/grpc/grpc-java UTF-8 - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT 3.25.3 3.25.3 diff --git a/examples/example-oauth/build.gradle b/examples/example-oauth/build.gradle index 23a6633e264..255633b4f9f 100644 --- a/examples/example-oauth/build.gradle +++ b/examples/example-oauth/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' def protocVersion = protobufVersion diff --git a/examples/example-oauth/pom.xml b/examples/example-oauth/pom.xml index afd45aecd39..2c38a05b3e4 100644 --- a/examples/example-oauth/pom.xml +++ b/examples/example-oauth/pom.xml @@ -7,13 +7,13 @@ jar - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT example-oauth https://github.com/grpc/grpc-java UTF-8 - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT 3.25.3 3.25.3 diff --git a/examples/example-opentelemetry/build.gradle b/examples/example-opentelemetry/build.gradle index 55d6685d771..00f7dc101bf 100644 --- a/examples/example-opentelemetry/build.gradle +++ b/examples/example-opentelemetry/build.gradle @@ -24,7 +24,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' def openTelemetryVersion = '1.40.0' def openTelemetryPrometheusVersion = '1.40.0-alpha' diff --git a/examples/example-orca/build.gradle b/examples/example-orca/build.gradle index f3eae10ace4..22feb8cae42 100644 --- a/examples/example-orca/build.gradle +++ b/examples/example-orca/build.gradle @@ -18,7 +18,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-reflection/build.gradle b/examples/example-reflection/build.gradle index 0b5c99898ed..78821391911 100644 --- a/examples/example-reflection/build.gradle +++ b/examples/example-reflection/build.gradle @@ -18,7 +18,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-servlet/build.gradle b/examples/example-servlet/build.gradle index b73d21fbc4c..9542ba0277f 100644 --- a/examples/example-servlet/build.gradle +++ b/examples/example-servlet/build.gradle @@ -16,7 +16,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-tls/build.gradle b/examples/example-tls/build.gradle index 3791cc03271..94257af4758 100644 --- a/examples/example-tls/build.gradle +++ b/examples/example-tls/build.gradle @@ -24,7 +24,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-tls/pom.xml b/examples/example-tls/pom.xml index 1263b347030..bc9c0a7a8ee 100644 --- a/examples/example-tls/pom.xml +++ b/examples/example-tls/pom.xml @@ -6,13 +6,13 @@ jar - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT example-tls https://github.com/grpc/grpc-java UTF-8 - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT 3.25.3 1.8 diff --git a/examples/example-xds/build.gradle b/examples/example-xds/build.gradle index 9807b1f8b74..2554adb0033 100644 --- a/examples/example-xds/build.gradle +++ b/examples/example-xds/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/pom.xml b/examples/pom.xml index a71e9d449c3..2b25d13b50c 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -6,13 +6,13 @@ jar - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT examples https://github.com/grpc/grpc-java UTF-8 - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT 3.25.3 3.25.3 From dc83446d982b53f3f64ad885db23571729cd7c9b Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Thu, 25 Jul 2024 13:09:13 -0700 Subject: [PATCH 17/53] xds: Stop extending RR in WRR They share very little code, and we really don't want RoundRobinLb to be public and non-final. Originally, WRR was expected to share much more code with RR, and even delegated to RR at times. The delegation was removed in 111ff60e. After dca89b25, most of the sharing has been moved out into general-purpose tools that can be used by any LB policy. FixedResultPicker now has equals to makes it as a EmptyPicker replacement. RoundRobinLb still uses EmptyPicker because fixing its tests is a larger change. OutlierDetectionLbTest was changed because FixedResultPicker is used by PickFirstLeafLb, and now RoundRobinLb can squelch some of its updates for ready pickers. --- api/src/main/java/io/grpc/LoadBalancer.java | 14 ++++++ .../io/grpc/util/RoundRobinLoadBalancer.java | 8 ++-- .../OutlierDetectionLoadBalancerTest.java | 4 +- .../xds/WeightedRoundRobinLoadBalancer.java | 45 +++++++++++++++++-- .../WeightedRoundRobinLoadBalancerTest.java | 4 +- 5 files changed, 63 insertions(+), 12 deletions(-) diff --git a/api/src/main/java/io/grpc/LoadBalancer.java b/api/src/main/java/io/grpc/LoadBalancer.java index 80e3f8b89c7..15106a5ffc6 100644 --- a/api/src/main/java/io/grpc/LoadBalancer.java +++ b/api/src/main/java/io/grpc/LoadBalancer.java @@ -1526,5 +1526,19 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { public String toString() { return "FixedResultPicker(" + result + ")"; } + + @Override + public int hashCode() { + return result.hashCode(); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof FixedResultPicker)) { + return false; + } + FixedResultPicker that = (FixedResultPicker) o; + return this.result.equals(that.result); + } } } diff --git a/util/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java b/util/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java index a06bae545df..7c235bb3640 100644 --- a/util/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java +++ b/util/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java @@ -27,7 +27,6 @@ import com.google.common.base.Preconditions; import io.grpc.ConnectivityState; import io.grpc.EquivalentAddressGroup; -import io.grpc.Internal; import io.grpc.LoadBalancer; import io.grpc.NameResolver; import java.util.ArrayList; @@ -41,10 +40,9 @@ * A {@link LoadBalancer} that provides round-robin load-balancing over the {@link * EquivalentAddressGroup}s from the {@link NameResolver}. */ -@Internal -public class RoundRobinLoadBalancer extends MultiChildLoadBalancer { +final class RoundRobinLoadBalancer extends MultiChildLoadBalancer { private final AtomicInteger sequence = new AtomicInteger(new Random().nextInt()); - protected SubchannelPicker currentPicker = new EmptyPicker(); + private SubchannelPicker currentPicker = new EmptyPicker(); public RoundRobinLoadBalancer(Helper helper) { super(helper); @@ -87,7 +85,7 @@ private void updateBalancingState(ConnectivityState state, SubchannelPicker pick } } - protected SubchannelPicker createReadyPicker(Collection children) { + private SubchannelPicker createReadyPicker(Collection children) { List pickerList = new ArrayList<>(); for (ChildLbState child : children) { SubchannelPicker picker = child.getCurrentPicker(); diff --git a/util/src/test/java/io/grpc/util/OutlierDetectionLoadBalancerTest.java b/util/src/test/java/io/grpc/util/OutlierDetectionLoadBalancerTest.java index 8af935d8134..1b0139affef 100644 --- a/util/src/test/java/io/grpc/util/OutlierDetectionLoadBalancerTest.java +++ b/util/src/test/java/io/grpc/util/OutlierDetectionLoadBalancerTest.java @@ -569,7 +569,7 @@ public void successRateOneOutlier_configChange() { loadBalancer.acceptResolvedAddresses(buildResolvedAddress(config, servers)); // The PickFirstLeafLB has an extra level of indirection because of health - int expectedStateChanges = PickFirstLoadBalancerProvider.isEnabledNewPickFirst() ? 16 : 12; + int expectedStateChanges = PickFirstLoadBalancerProvider.isEnabledNewPickFirst() ? 8 : 12; generateLoad(ImmutableMap.of(subchannel2, Status.DEADLINE_EXCEEDED), expectedStateChanges); // Move forward in time to a point where the detection timer has fired. @@ -604,7 +604,7 @@ public void successRateOneOutlier_unejected() { assertEjectedSubchannels(ImmutableSet.of(ImmutableSet.copyOf(servers.get(0).getAddresses()))); // Now we produce more load, but the subchannel has started working and is no longer an outlier. - int expectedStateChanges = PickFirstLoadBalancerProvider.isEnabledNewPickFirst() ? 16 : 12; + int expectedStateChanges = PickFirstLoadBalancerProvider.isEnabledNewPickFirst() ? 8 : 12; generateLoad(ImmutableMap.of(), expectedStateChanges); // Move forward in time to a point where the detection timer has fired. diff --git a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java index c3383148079..115857d43ff 100644 --- a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java @@ -42,7 +42,7 @@ import io.grpc.services.MetricReport; import io.grpc.util.ForwardingLoadBalancerHelper; import io.grpc.util.ForwardingSubchannel; -import io.grpc.util.RoundRobinLoadBalancer; +import io.grpc.util.MultiChildLoadBalancer; import io.grpc.xds.orca.OrcaOobUtil; import io.grpc.xds.orca.OrcaOobUtil.OrcaOobReportListener; import io.grpc.xds.orca.OrcaPerRequestUtil; @@ -90,7 +90,7 @@ * See related documentation: https://cloud.google.com/service-mesh/legacy/load-balancing-apis/proxyless-configure-advanced-traffic-management#custom-lb-config */ @ExperimentalApi("https://github.com/grpc/grpc-java/issues/9885") -final class WeightedRoundRobinLoadBalancer extends RoundRobinLoadBalancer { +final class WeightedRoundRobinLoadBalancer extends MultiChildLoadBalancer { private static final LongCounterMetricInstrument RR_FALLBACK_COUNTER; private static final LongCounterMetricInstrument ENDPOINT_WEIGHT_NOT_YET_USEABLE_COUNTER; @@ -107,6 +107,7 @@ final class WeightedRoundRobinLoadBalancer extends RoundRobinLoadBalancer { private final long infTime; private final Ticker ticker; private String locality = ""; + private SubchannelPicker currentPicker = new FixedResultPicker(PickResult.withNoResult()); // The metric instruments are only registered once and shared by all instances of this LB. static { @@ -209,13 +210,51 @@ public Status acceptResolvedAddresses(ResolvedAddresses resolvedAddresses) { return acceptRetVal.status; } + /** + * Updates picker with the list of active subchannels (state == READY). + */ @Override - public SubchannelPicker createReadyPicker(Collection activeList) { + protected void updateOverallBalancingState() { + List activeList = getReadyChildren(); + if (activeList.isEmpty()) { + // No READY subchannels + + // MultiChildLB will request connection immediately on subchannel IDLE. + boolean isConnecting = false; + for (ChildLbState childLbState : getChildLbStates()) { + ConnectivityState state = childLbState.getCurrentState(); + if (state == ConnectivityState.CONNECTING || state == ConnectivityState.IDLE) { + isConnecting = true; + break; + } + } + + if (isConnecting) { + updateBalancingState( + ConnectivityState.CONNECTING, new FixedResultPicker(PickResult.withNoResult())); + } else { + updateBalancingState( + ConnectivityState.TRANSIENT_FAILURE, createReadyPicker(getChildLbStates())); + } + } else { + updateBalancingState(ConnectivityState.READY, createReadyPicker(activeList)); + } + } + + private SubchannelPicker createReadyPicker(Collection activeList) { return new WeightedRoundRobinPicker(ImmutableList.copyOf(activeList), config.enableOobLoadReport, config.errorUtilizationPenalty, sequence, getHelper(), locality); } + private void updateBalancingState(ConnectivityState state, SubchannelPicker picker) { + if (state != currentConnectivityState || !picker.equals(currentPicker)) { + getHelper().updateBalancingState(state, picker); + currentConnectivityState = state; + currentPicker = picker; + } + } + @VisibleForTesting final class WeightedChildLbState extends ChildLbState { diff --git a/xds/src/test/java/io/grpc/xds/WeightedRoundRobinLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/WeightedRoundRobinLoadBalancerTest.java index a5b5651133b..dd98f1e1ae6 100644 --- a/xds/src/test/java/io/grpc/xds/WeightedRoundRobinLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/WeightedRoundRobinLoadBalancerTest.java @@ -536,8 +536,8 @@ public void emptyConfig() { verify(helper, times(3)).createSubchannel( any(CreateSubchannelArgs.class)); verify(helper).updateBalancingState(eq(CONNECTING), pickerCaptor.capture()); - assertThat(pickerCaptor.getValue().getClass().getName()) - .isEqualTo("io.grpc.util.RoundRobinLoadBalancer$EmptyPicker"); + assertThat(pickerCaptor.getValue().pickSubchannel(mockArgs)) + .isEqualTo(PickResult.withNoResult()); int expectedCount = isEnabledHappyEyeballs() ? servers.size() + 1 : 1; assertThat(fakeClock.forwardTime(11, TimeUnit.SECONDS)).isEqualTo( expectedCount); } From ebffb0a6b2b628f584ce97f943b2739642f58270 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Wed, 31 Jul 2024 13:22:04 -0700 Subject: [PATCH 18/53] Revert "Introduce onResult2 in NameResolver Listener2 that returns Status (#11313)" This reverts commit 9ba2f9dec5c71a5d0afbba0f196331a47844bc07. It causes a channel panic due to unimplemented onResult2(). ``` java.lang.UnsupportedOperationException: Not implemented. at io.grpc.NameResolver$Listener2.onResult2(NameResolver.java:257) at io.grpc.internal.DnsNameResolver$Resolve.lambda$run$0(DnsNameResolver.java:334) at io.grpc.SynchronizationContext.drain(SynchronizationContext.java:94) at io.grpc.SynchronizationContext.execute(SynchronizationContext.java:126) at io.grpc.internal.DnsNameResolver$Resolve.run(DnsNameResolver.java:333) ``` b/356669977 --- api/src/main/java/io/grpc/NameResolver.java | 10 - .../io/grpc/internal/DnsNameResolver.java | 4 +- .../io/grpc/internal/ManagedChannelImpl.java | 255 ++++++++------- .../grpc/internal/RetryingNameResolver.java | 12 - .../io/grpc/internal/DnsNameResolverTest.java | 50 +-- .../grpc/internal/ManagedChannelImplTest.java | 300 +----------------- .../internal/RetryingNameResolverTest.java | 28 +- .../grpc/grpclb/GrpclbNameResolverTest.java | 10 +- 8 files changed, 168 insertions(+), 501 deletions(-) diff --git a/api/src/main/java/io/grpc/NameResolver.java b/api/src/main/java/io/grpc/NameResolver.java index 8af8112ffdb..a74512eb7e3 100644 --- a/api/src/main/java/io/grpc/NameResolver.java +++ b/api/src/main/java/io/grpc/NameResolver.java @@ -246,16 +246,6 @@ public final void onAddresses( */ @Override public abstract void onError(Status error); - - /** - * Handles updates on resolved addresses and attributes. - * - * @param resolutionResult the resolved server addresses, attributes, and Service Config. - * @since 1.66 - */ - public Status onResult2(ResolutionResult resolutionResult) { - throw new UnsupportedOperationException("Not implemented."); - } } /** diff --git a/core/src/main/java/io/grpc/internal/DnsNameResolver.java b/core/src/main/java/io/grpc/internal/DnsNameResolver.java index df51d6f2c5c..5ef6dd863c2 100644 --- a/core/src/main/java/io/grpc/internal/DnsNameResolver.java +++ b/core/src/main/java/io/grpc/internal/DnsNameResolver.java @@ -330,9 +330,7 @@ public void run() { resolutionResultBuilder.setAttributes(result.attributes); } } - syncContext.execute(() -> { - savedListener.onResult2(resolutionResultBuilder.build()); - }); + savedListener.onResult(resolutionResultBuilder.build()); } catch (IOException e) { savedListener.onError( Status.UNAVAILABLE.withDescription("Unable to resolve host " + host).withCause(e)); diff --git a/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java b/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java index 7f45ca967ea..c5c7b66e15d 100644 --- a/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java +++ b/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java @@ -1673,147 +1673,146 @@ final class NameResolverListener extends NameResolver.Listener2 { public void onResult(final ResolutionResult resolutionResult) { final class NamesResolved implements Runnable { + @SuppressWarnings("ReferenceEquality") @Override public void run() { - Status status = onResult2(resolutionResult); - ResolutionResultListener resolutionResultListener = resolutionResult.getAttributes() - .get(RetryingNameResolver.RESOLUTION_RESULT_LISTENER_KEY); - resolutionResultListener.resolutionAttempted(status); - } - } - - syncContext.execute(new NamesResolved()); - } + if (ManagedChannelImpl.this.nameResolver != resolver) { + return; + } - @SuppressWarnings("ReferenceEquality") - @Override - public Status onResult2(final ResolutionResult resolutionResult) { - syncContext.throwIfNotInThisSynchronizationContext(); - if (ManagedChannelImpl.this.nameResolver != resolver) { - return Status.OK; - } - - List servers = resolutionResult.getAddresses(); - channelLogger.log( - ChannelLogLevel.DEBUG, - "Resolved address: {0}, config={1}", - servers, - resolutionResult.getAttributes()); - - if (lastResolutionState != ResolutionState.SUCCESS) { - channelLogger.log(ChannelLogLevel.INFO, "Address resolved: {0}", servers); - lastResolutionState = ResolutionState.SUCCESS; - } - - ConfigOrError configOrError = resolutionResult.getServiceConfig(); - InternalConfigSelector resolvedConfigSelector = - resolutionResult.getAttributes().get(InternalConfigSelector.KEY); - ManagedChannelServiceConfig validServiceConfig = - configOrError != null && configOrError.getConfig() != null - ? (ManagedChannelServiceConfig) configOrError.getConfig() - : null; - Status serviceConfigError = configOrError != null ? configOrError.getError() : null; - - ManagedChannelServiceConfig effectiveServiceConfig; - if (!lookUpServiceConfig) { - if (validServiceConfig != null) { - channelLogger.log( - ChannelLogLevel.INFO, - "Service config from name resolver discarded by channel settings"); - } - effectiveServiceConfig = - defaultServiceConfig == null ? EMPTY_SERVICE_CONFIG : defaultServiceConfig; - if (resolvedConfigSelector != null) { + List servers = resolutionResult.getAddresses(); channelLogger.log( - ChannelLogLevel.INFO, - "Config selector from name resolver discarded by channel settings"); - } - realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); - } else { - // Try to use config if returned from name resolver - // Otherwise, try to use the default config if available - if (validServiceConfig != null) { - effectiveServiceConfig = validServiceConfig; - if (resolvedConfigSelector != null) { - realChannel.updateConfigSelector(resolvedConfigSelector); - if (effectiveServiceConfig.getDefaultConfigSelector() != null) { + ChannelLogLevel.DEBUG, + "Resolved address: {0}, config={1}", + servers, + resolutionResult.getAttributes()); + + if (lastResolutionState != ResolutionState.SUCCESS) { + channelLogger.log(ChannelLogLevel.INFO, "Address resolved: {0}", servers); + lastResolutionState = ResolutionState.SUCCESS; + } + + ConfigOrError configOrError = resolutionResult.getServiceConfig(); + ResolutionResultListener resolutionResultListener = resolutionResult.getAttributes() + .get(RetryingNameResolver.RESOLUTION_RESULT_LISTENER_KEY); + InternalConfigSelector resolvedConfigSelector = + resolutionResult.getAttributes().get(InternalConfigSelector.KEY); + ManagedChannelServiceConfig validServiceConfig = + configOrError != null && configOrError.getConfig() != null + ? (ManagedChannelServiceConfig) configOrError.getConfig() + : null; + Status serviceConfigError = configOrError != null ? configOrError.getError() : null; + + ManagedChannelServiceConfig effectiveServiceConfig; + if (!lookUpServiceConfig) { + if (validServiceConfig != null) { channelLogger.log( - ChannelLogLevel.DEBUG, - "Method configs in service config will be discarded due to presence of" - + "config-selector"); + ChannelLogLevel.INFO, + "Service config from name resolver discarded by channel settings"); + } + effectiveServiceConfig = + defaultServiceConfig == null ? EMPTY_SERVICE_CONFIG : defaultServiceConfig; + if (resolvedConfigSelector != null) { + channelLogger.log( + ChannelLogLevel.INFO, + "Config selector from name resolver discarded by channel settings"); } - } else { realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); - } - } else if (defaultServiceConfig != null) { - effectiveServiceConfig = defaultServiceConfig; - realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); - channelLogger.log( - ChannelLogLevel.INFO, - "Received no service config, using default service config"); - } else if (serviceConfigError != null) { - if (!serviceConfigUpdated) { - // First DNS lookup has invalid service config, and cannot fall back to default - channelLogger.log( - ChannelLogLevel.INFO, - "Fallback to error due to invalid first service config without default config"); - // This error could be an "inappropriate" control plane error that should not bleed - // through to client code using gRPC. We let them flow through here to the LB as - // we later check for these error codes when investigating pick results in - // GrpcUtil.getTransportFromPickResult(). - onError(configOrError.getError()); - return configOrError.getError(); } else { - effectiveServiceConfig = lastServiceConfig; + // Try to use config if returned from name resolver + // Otherwise, try to use the default config if available + if (validServiceConfig != null) { + effectiveServiceConfig = validServiceConfig; + if (resolvedConfigSelector != null) { + realChannel.updateConfigSelector(resolvedConfigSelector); + if (effectiveServiceConfig.getDefaultConfigSelector() != null) { + channelLogger.log( + ChannelLogLevel.DEBUG, + "Method configs in service config will be discarded due to presence of" + + "config-selector"); + } + } else { + realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); + } + } else if (defaultServiceConfig != null) { + effectiveServiceConfig = defaultServiceConfig; + realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); + channelLogger.log( + ChannelLogLevel.INFO, + "Received no service config, using default service config"); + } else if (serviceConfigError != null) { + if (!serviceConfigUpdated) { + // First DNS lookup has invalid service config, and cannot fall back to default + channelLogger.log( + ChannelLogLevel.INFO, + "Fallback to error due to invalid first service config without default config"); + // This error could be an "inappropriate" control plane error that should not bleed + // through to client code using gRPC. We let them flow through here to the LB as + // we later check for these error codes when investigating pick results in + // GrpcUtil.getTransportFromPickResult(). + onError(configOrError.getError()); + if (resolutionResultListener != null) { + resolutionResultListener.resolutionAttempted(configOrError.getError()); + } + return; + } else { + effectiveServiceConfig = lastServiceConfig; + } + } else { + effectiveServiceConfig = EMPTY_SERVICE_CONFIG; + realChannel.updateConfigSelector(null); + } + if (!effectiveServiceConfig.equals(lastServiceConfig)) { + channelLogger.log( + ChannelLogLevel.INFO, + "Service config changed{0}", + effectiveServiceConfig == EMPTY_SERVICE_CONFIG ? " to empty" : ""); + lastServiceConfig = effectiveServiceConfig; + transportProvider.throttle = effectiveServiceConfig.getRetryThrottling(); + } + + try { + // TODO(creamsoup): when `servers` is empty and lastResolutionStateCopy == SUCCESS + // and lbNeedAddress, it shouldn't call the handleServiceConfigUpdate. But, + // lbNeedAddress is not deterministic + serviceConfigUpdated = true; + } catch (RuntimeException re) { + logger.log( + Level.WARNING, + "[" + getLogId() + "] Unexpected exception from parsing service config", + re); + } } - } else { - effectiveServiceConfig = EMPTY_SERVICE_CONFIG; - realChannel.updateConfigSelector(null); - } - if (!effectiveServiceConfig.equals(lastServiceConfig)) { - channelLogger.log( - ChannelLogLevel.INFO, - "Service config changed{0}", - effectiveServiceConfig == EMPTY_SERVICE_CONFIG ? " to empty" : ""); - lastServiceConfig = effectiveServiceConfig; - transportProvider.throttle = effectiveServiceConfig.getRetryThrottling(); - } - try { - // TODO(creamsoup): when `servers` is empty and lastResolutionStateCopy == SUCCESS - // and lbNeedAddress, it shouldn't call the handleServiceConfigUpdate. But, - // lbNeedAddress is not deterministic - serviceConfigUpdated = true; - } catch (RuntimeException re) { - logger.log( - Level.WARNING, - "[" + getLogId() + "] Unexpected exception from parsing service config", - re); + Attributes effectiveAttrs = resolutionResult.getAttributes(); + // Call LB only if it's not shutdown. If LB is shutdown, lbHelper won't match. + if (NameResolverListener.this.helper == ManagedChannelImpl.this.lbHelper) { + Attributes.Builder attrBuilder = + effectiveAttrs.toBuilder().discard(InternalConfigSelector.KEY); + Map healthCheckingConfig = + effectiveServiceConfig.getHealthCheckingConfig(); + if (healthCheckingConfig != null) { + attrBuilder + .set(LoadBalancer.ATTR_HEALTH_CHECKING_CONFIG, healthCheckingConfig) + .build(); + } + Attributes attributes = attrBuilder.build(); + + Status addressAcceptanceStatus = helper.lb.tryAcceptResolvedAddresses( + ResolvedAddresses.newBuilder() + .setAddresses(servers) + .setAttributes(attributes) + .setLoadBalancingPolicyConfig(effectiveServiceConfig.getLoadBalancingConfig()) + .build()); + // If a listener is provided, let it know if the addresses were accepted. + if (resolutionResultListener != null) { + resolutionResultListener.resolutionAttempted(addressAcceptanceStatus); + } + } } } - Attributes effectiveAttrs = resolutionResult.getAttributes(); - // Call LB only if it's not shutdown. If LB is shutdown, lbHelper won't match. - if (NameResolverListener.this.helper == ManagedChannelImpl.this.lbHelper) { - Attributes.Builder attrBuilder = - effectiveAttrs.toBuilder().discard(InternalConfigSelector.KEY); - Map healthCheckingConfig = - effectiveServiceConfig.getHealthCheckingConfig(); - if (healthCheckingConfig != null) { - attrBuilder - .set(LoadBalancer.ATTR_HEALTH_CHECKING_CONFIG, healthCheckingConfig) - .build(); - } - Attributes attributes = attrBuilder.build(); - - return helper.lb.tryAcceptResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setAttributes(attributes) - .setLoadBalancingPolicyConfig(effectiveServiceConfig.getLoadBalancingConfig()) - .build()); - } - return Status.OK; + syncContext.execute(new NamesResolved()); } @Override diff --git a/core/src/main/java/io/grpc/internal/RetryingNameResolver.java b/core/src/main/java/io/grpc/internal/RetryingNameResolver.java index 6dcfcd3534a..6d806e95944 100644 --- a/core/src/main/java/io/grpc/internal/RetryingNameResolver.java +++ b/core/src/main/java/io/grpc/internal/RetryingNameResolver.java @@ -95,24 +95,12 @@ public void onResult(ResolutionResult resolutionResult) { "RetryingNameResolver can only be used once to wrap a NameResolver"); } - // To have retry behavior for name resolvers that haven't migrated to onResult2. delegateListener.onResult(resolutionResult.toBuilder().setAttributes( resolutionResult.getAttributes().toBuilder() .set(RESOLUTION_RESULT_LISTENER_KEY, new ResolutionResultListener()).build()) .build()); } - @Override - public Status onResult2(ResolutionResult resolutionResult) { - Status status = delegateListener.onResult2(resolutionResult); - if (status.isOk()) { - retryScheduler.reset(); - } else { - retryScheduler.schedule(new DelayedNameResolverRefresh()); - } - return status; - } - @Override public void onError(Status error) { delegateListener.onError(error); diff --git a/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java b/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java index 0512171f4e7..14d3fddd290 100644 --- a/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java +++ b/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java @@ -26,6 +26,7 @@ import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.isA; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -225,7 +226,13 @@ public void setUp() { System.getProperty(DnsNameResolver.NETWORKADDRESS_CACHE_TTL_PROPERTY); // By default the mock listener processes the result successfully. - when(mockListener.onResult2(isA(ResolutionResult.class))).thenReturn(Status.OK); + doAnswer(invocation -> { + ResolutionResult result = invocation.getArgument(0); + syncContext.execute( + () -> result.getAttributes().get(RetryingNameResolver.RESOLUTION_RESULT_LISTENER_KEY) + .resolutionAttempted(Status.OK)); + return null; + }).when(mockListener).onResult(isA(ResolutionResult.class)); } @After @@ -312,13 +319,13 @@ private void resolveNeverCache(boolean isAndroid) throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); assertAnswerMatches(answer1, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); resolver.refresh(); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener, times(2)).onResult2(resultCaptor.capture()); + verify(mockListener, times(2)).onResult(resultCaptor.capture()); assertAnswerMatches(answer2, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -340,7 +347,7 @@ public void testExecutor_default() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); assertAnswerMatches(answer, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -382,7 +389,7 @@ public void execute(Runnable command) { resolver.start(mockListener); assertEquals(0, fakeExecutor.runDueTasks()); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); assertAnswerMatches(answer, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -411,7 +418,7 @@ public void resolve_cacheForever() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); assertAnswerMatches(answer1, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); @@ -445,7 +452,7 @@ public void resolve_usingCache() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); assertAnswerMatches(answer, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); @@ -480,14 +487,14 @@ public void resolve_cacheExpired() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); assertAnswerMatches(answer1, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); fakeTicker.advance(ttl + 1, TimeUnit.SECONDS); resolver.refresh(); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener, times(2)).onResult2(resultCaptor.capture()); + verify(mockListener, times(2)).onResult(resultCaptor.capture()); assertAnswerMatches(answer2, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -524,7 +531,7 @@ private void resolveDefaultValue() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); assertAnswerMatches(answer1, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); @@ -537,7 +544,7 @@ private void resolveDefaultValue() throws Exception { fakeTicker.advance(1, TimeUnit.SECONDS); resolver.refresh(); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener, times(2)).onResult2(resultCaptor.capture()); + verify(mockListener, times(2)).onResult(resultCaptor.capture()); assertAnswerMatches(answer2, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -568,7 +575,7 @@ public List resolveAddress(String host) throws Exception { assertThat(fakeExecutor.runDueTasks()).isEqualTo(1); ArgumentCaptor ac = ArgumentCaptor.forClass(ResolutionResult.class); - verify(mockListener).onResult2(ac.capture()); + verify(mockListener).onResult(ac.capture()); verifyNoMoreInteractions(mockListener); assertThat(ac.getValue().getAddresses()).isEmpty(); assertThat(ac.getValue().getServiceConfig()).isNull(); @@ -581,7 +588,12 @@ public List resolveAddress(String host) throws Exception { // Load balancer rejects the empty addresses. @Test public void resolve_emptyResult_notAccepted() throws Exception { - when(mockListener.onResult2(isA(ResolutionResult.class))).thenReturn(Status.UNAVAILABLE); + doAnswer(invocation -> { + ResolutionResult result = invocation.getArgument(0); + result.getAttributes().get(RetryingNameResolver.RESOLUTION_RESULT_LISTENER_KEY) + .resolutionAttempted(Status.UNAVAILABLE); + return null; + }).when(mockListener).onResult(isA(ResolutionResult.class)); DnsNameResolver.enableTxt = true; RetryingNameResolver resolver = newResolver("dns:///addr.fake:1234", 443); @@ -602,7 +614,7 @@ public List resolveAddress(String host) throws Exception { syncContext.execute(() -> assertThat(fakeExecutor.runDueTasks()).isEqualTo(1)); ArgumentCaptor ac = ArgumentCaptor.forClass(ResolutionResult.class); - verify(mockListener).onResult2(ac.capture()); + verify(mockListener).onResult(ac.capture()); verifyNoMoreInteractions(mockListener); assertThat(ac.getValue().getAddresses()).isEmpty(); assertThat(ac.getValue().getServiceConfig()).isNull(); @@ -628,7 +640,7 @@ public void resolve_nullResourceResolver() throws Exception { dnsResolver.setResourceResolver(null); resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -700,7 +712,7 @@ public ConfigOrError parseServiceConfig(Map rawServiceConfig) { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -758,7 +770,7 @@ public void resolve_serviceConfigLookupFails_nullServiceConfig() throws Exceptio dnsResolver.setResourceResolver(mockResourceResolver); resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -790,7 +802,7 @@ public void resolve_serviceConfigMalformed_serviceConfigError() throws Exception dnsResolver.setResourceResolver(mockResourceResolver); resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -858,7 +870,7 @@ public HttpConnectProxiedSocketAddress proxyFor(SocketAddress targetAddress) { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); List result = resultCaptor.getValue().getAddresses(); assertThat(result).hasSize(1); EquivalentAddressGroup eag = result.get(0); diff --git a/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java b/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java index 4d42056b689..1d6492f791c 100644 --- a/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java +++ b/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java @@ -1054,79 +1054,6 @@ public void noMoreCallbackAfterLoadBalancerShutdown() { verifyNoMoreInteractions(mockLoadBalancer); } - @Test - public void noMoreCallbackAfterLoadBalancerShutdown_configError() throws InterruptedException { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) - .build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - Status resolutionError = Status.UNAVAILABLE.withDescription("Resolution failed"); - createChannel(); - - FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.get(0); - verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); - verify(mockLoadBalancer).acceptResolvedAddresses(resolvedAddressCaptor.capture()); - assertThat(resolvedAddressCaptor.getValue().getAddresses()).containsExactly(addressGroup); - - SubchannelStateListener stateListener1 = mock(SubchannelStateListener.class); - SubchannelStateListener stateListener2 = mock(SubchannelStateListener.class); - Subchannel subchannel1 = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, stateListener1); - Subchannel subchannel2 = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, stateListener2); - requestConnectionSafely(helper, subchannel1); - requestConnectionSafely(helper, subchannel2); - verify(mockTransportFactory, times(2)) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - MockClientTransportInfo transportInfo1 = transports.poll(); - MockClientTransportInfo transportInfo2 = transports.poll(); - - // LoadBalancer receives all sorts of callbacks - transportInfo1.listener.transportReady(); - - verify(stateListener1, times(2)).onSubchannelState(stateInfoCaptor.capture()); - assertSame(CONNECTING, stateInfoCaptor.getAllValues().get(0).getState()); - assertSame(READY, stateInfoCaptor.getAllValues().get(1).getState()); - - verify(stateListener2).onSubchannelState(stateInfoCaptor.capture()); - assertSame(CONNECTING, stateInfoCaptor.getValue().getState()); - - resolver.listener.onError(resolutionError); - verify(mockLoadBalancer).handleNameResolutionError(resolutionError); - - verifyNoMoreInteractions(mockLoadBalancer); - - channel.shutdown(); - verify(mockLoadBalancer).shutdown(); - verifyNoMoreInteractions(stateListener1, stateListener2); - - // LoadBalancer will normally shutdown all subchannels - shutdownSafely(helper, subchannel1); - shutdownSafely(helper, subchannel2); - - // Since subchannels are shutdown, SubchannelStateListeners will only get SHUTDOWN regardless of - // the transport states. - transportInfo1.listener.transportShutdown(Status.UNAVAILABLE); - transportInfo2.listener.transportReady(); - verify(stateListener1).onSubchannelState(ConnectivityStateInfo.forNonError(SHUTDOWN)); - verify(stateListener2).onSubchannelState(ConnectivityStateInfo.forNonError(SHUTDOWN)); - verifyNoMoreInteractions(stateListener1, stateListener2); - - // No more callback should be delivered to LoadBalancer after it's shut down - resolver.listener.onResult( - ResolutionResult.newBuilder() - .setAddresses(new ArrayList<>()) - .setServiceConfig( - ConfigOrError.fromError(Status.UNAVAILABLE.withDescription("Resolution failed"))) - .build()); - Thread.sleep(1100); - assertThat(timer.getPendingTasks()).isEmpty(); - resolver.resolved(); - verifyNoMoreInteractions(mockLoadBalancer); - } - @Test public void interceptor() throws Exception { final AtomicLong atomic = new AtomicLong(); @@ -3211,48 +3138,6 @@ public void channelTracing_nameResolvedEvent_zeorAndNonzeroBackends() throws Exc assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); } - @Test - public void channelTracing_nameResolvedEvent_zeorAndNonzeroBackends_usesListener2onResult2() - throws Exception { - timer.forwardNanos(1234); - channelBuilder.maxTraceEvents(10); - List servers = new ArrayList<>(); - servers.add(new EquivalentAddressGroup(socketAddress)); - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).setServers(servers).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - - int prevSize = getStats(channel).channelTrace.events.size(); - ResolutionResult resolutionResult1 = ResolutionResult.newBuilder() - .setAddresses(Collections.singletonList( - new EquivalentAddressGroup( - Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) - .build(); - - channel.syncContext.execute( - () -> nameResolverFactory.resolvers.get(0).listener.onResult2(resolutionResult1)); - assertThat(getStats(channel).channelTrace.events).hasSize(prevSize); - - prevSize = getStats(channel).channelTrace.events.size(); - nameResolverFactory.resolvers.get(0).listener.onError(Status.INTERNAL); - assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); - - prevSize = getStats(channel).channelTrace.events.size(); - nameResolverFactory.resolvers.get(0).listener.onError(Status.INTERNAL); - assertThat(getStats(channel).channelTrace.events).hasSize(prevSize); - - prevSize = getStats(channel).channelTrace.events.size(); - ResolutionResult resolutionResult2 = ResolutionResult.newBuilder() - .setAddresses(Collections.singletonList( - new EquivalentAddressGroup( - Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) - .build(); - channel.syncContext.execute( - () -> nameResolverFactory.resolvers.get(0).listener.onResult2(resolutionResult2)); - assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); - } - @Test public void channelTracing_serviceConfigChange() throws Exception { timer.forwardNanos(1234); @@ -3312,69 +3197,6 @@ public void channelTracing_serviceConfigChange() throws Exception { .build()); } - @Test - public void channelTracing_serviceConfigChange_usesListener2OnResult2() throws Exception { - timer.forwardNanos(1234); - channelBuilder.maxTraceEvents(10); - List servers = new ArrayList<>(); - servers.add(new EquivalentAddressGroup(socketAddress)); - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).setServers(servers).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - - int prevSize = getStats(channel).channelTrace.events.size(); - ManagedChannelServiceConfig mcsc1 = createManagedChannelServiceConfig( - ImmutableMap.of(), - new PolicySelection( - mockLoadBalancerProvider, null)); - ResolutionResult resolutionResult1 = ResolutionResult.newBuilder() - .setAddresses(Collections.singletonList( - new EquivalentAddressGroup( - Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) - .setServiceConfig(ConfigOrError.fromConfig(mcsc1)) - .build(); - - channel.syncContext.execute(() -> - nameResolverFactory.resolvers.get(0).listener.onResult2(resolutionResult1)); - assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); - assertThat(getStats(channel).channelTrace.events.get(prevSize)) - .isEqualTo(new ChannelTrace.Event.Builder() - .setDescription("Service config changed") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .build()); - - prevSize = getStats(channel).channelTrace.events.size(); - ResolutionResult resolutionResult2 = ResolutionResult.newBuilder().setAddresses( - Collections.singletonList( - new EquivalentAddressGroup( - Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) - .setServiceConfig(ConfigOrError.fromConfig(mcsc1)) - .build(); - channel.syncContext.execute(() -> - nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult2)); - assertThat(getStats(channel).channelTrace.events).hasSize(prevSize); - - prevSize = getStats(channel).channelTrace.events.size(); - timer.forwardNanos(1234); - ResolutionResult resolutionResult3 = ResolutionResult.newBuilder() - .setAddresses(Collections.singletonList( - new EquivalentAddressGroup( - Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) - .setServiceConfig(ConfigOrError.fromConfig(ManagedChannelServiceConfig.empty())) - .build(); - channel.syncContext.execute(() -> - nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult3)); - assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); - assertThat(getStats(channel).channelTrace.events.get(prevSize)) - .isEqualTo(new ChannelTrace.Event.Builder() - .setDescription("Service config changed") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .build()); - } - @Test public void channelTracing_stateChangeEvent() throws Exception { channelBuilder.maxTraceEvents(10); @@ -4035,120 +3857,6 @@ public ClientTransportFactory buildClientTransportFactory() { mychannel.shutdownNow(); } - @Test - public void badServiceConfigIsRecoverable_usesListener2OnResult2() throws Exception { - final List addresses = - ImmutableList.of(new EquivalentAddressGroup(new SocketAddress() {})); - final class FakeNameResolver extends NameResolver { - Listener2 listener; - private final SynchronizationContext syncContext; - - FakeNameResolver(Args args) { - this.syncContext = args.getSynchronizationContext(); - } - - @Override - public String getServiceAuthority() { - return "also fake"; - } - - @Override - public void start(Listener2 listener) { - this.listener = listener; - syncContext.execute(() -> - listener.onResult2( - ResolutionResult.newBuilder() - .setAddresses(addresses) - .setServiceConfig( - ConfigOrError.fromError( - Status.INTERNAL.withDescription("kaboom is invalid"))) - .build())); - } - - @Override - public void shutdown() {} - } - - final class FakeNameResolverFactory2 extends NameResolver.Factory { - FakeNameResolver resolver; - ManagedChannelImpl managedChannel; - SynchronizationContext syncContext; - - @Nullable - @Override - public NameResolver newNameResolver(URI targetUri, NameResolver.Args args) { - syncContext = args.getSynchronizationContext(); - return (resolver = new FakeNameResolver(args)); - } - - @Override - public String getDefaultScheme() { - return "fake"; - } - } - - FakeNameResolverFactory2 factory = new FakeNameResolverFactory2(); - - ManagedChannelImplBuilder customBuilder = new ManagedChannelImplBuilder(TARGET, - new ClientTransportFactoryBuilder() { - @Override - public ClientTransportFactory buildClientTransportFactory() { - return mockTransportFactory; - } - }, - null); - when(mockTransportFactory.getSupportedSocketAddressTypes()).thenReturn(Collections.singleton( - InetSocketAddress.class)); - customBuilder.executorPool = executorPool; - customBuilder.channelz = channelz; - ManagedChannel mychannel = customBuilder.nameResolverFactory(factory).build(); - - ClientCall call1 = - mychannel.newCall(TestMethodDescriptors.voidMethod(), CallOptions.DEFAULT); - ListenableFuture future1 = ClientCalls.futureUnaryCall(call1, null); - executor.runDueTasks(); - try { - future1.get(1, TimeUnit.SECONDS); - Assert.fail(); - } catch (ExecutionException e) { - assertThat(Throwables.getStackTraceAsString(e.getCause())).contains("kaboom"); - } - - // ok the service config is bad, let's fix it. - Map rawServiceConfig = - parseConfig("{\"loadBalancingConfig\": [{\"round_robin\": {}}]}"); - Object fakeLbConfig = new Object(); - PolicySelection lbConfigs = - new PolicySelection( - mockLoadBalancerProvider, fakeLbConfig); - mockLoadBalancerProvider.parseLoadBalancingPolicyConfig(rawServiceConfig); - ManagedChannelServiceConfig managedChannelServiceConfig = - createManagedChannelServiceConfig(rawServiceConfig, lbConfigs); - factory.syncContext.execute(() -> - factory.resolver.listener.onResult2( - ResolutionResult.newBuilder() - .setAddresses(addresses) - .setServiceConfig(ConfigOrError.fromConfig(managedChannelServiceConfig)) - .build())); - - ClientCall call2 = mychannel.newCall( - TestMethodDescriptors.voidMethod(), - CallOptions.DEFAULT.withDeadlineAfter(5, TimeUnit.SECONDS)); - ListenableFuture future2 = ClientCalls.futureUnaryCall(call2, null); - - timer.forwardTime(1234, TimeUnit.SECONDS); - - executor.runDueTasks(); - try { - future2.get(); - Assert.fail(); - } catch (ExecutionException e) { - assertThat(Throwables.getStackTraceAsString(e.getCause())).contains("deadline"); - } - - mychannel.shutdownNow(); - } - @Test public void nameResolverArgsPropagation() { final AtomicReference capturedArgs = new AtomicReference<>(); @@ -4810,7 +4518,7 @@ public NameResolver newNameResolver(final URI targetUri, NameResolver.Args args) } assertEquals(DEFAULT_PORT, args.getDefaultPort()); FakeNameResolverFactory.FakeNameResolver resolver = - new FakeNameResolverFactory.FakeNameResolver(targetUri, error, args); + new FakeNameResolverFactory.FakeNameResolver(targetUri, error); resolvers.add(resolver); return resolver; } @@ -4838,16 +4546,14 @@ void allResolved() { final class FakeNameResolver extends NameResolver { final URI targetUri; - final SynchronizationContext syncContext; Listener2 listener; boolean shutdown; int refreshCalled; Status error; - FakeNameResolver(URI targetUri, Status error, Args args) { + FakeNameResolver(URI targetUri, Status error) { this.targetUri = targetUri; this.error = error; - syncContext = args.getSynchronizationContext(); } @Override public String getServiceAuthority() { @@ -4879,7 +4585,7 @@ void resolved() { if (configOrError != null) { builder.setServiceConfig(configOrError); } - syncContext.execute(() -> listener.onResult(builder.build())); + listener.onResult(builder.build()); } @Override public void shutdown() { diff --git a/core/src/test/java/io/grpc/internal/RetryingNameResolverTest.java b/core/src/test/java/io/grpc/internal/RetryingNameResolverTest.java index 6347416f0ca..8801f540394 100644 --- a/core/src/test/java/io/grpc/internal/RetryingNameResolverTest.java +++ b/core/src/test/java/io/grpc/internal/RetryingNameResolverTest.java @@ -21,7 +21,6 @@ import static org.mockito.ArgumentMatchers.isA; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; import io.grpc.NameResolver; import io.grpc.NameResolver.Listener2; @@ -80,7 +79,7 @@ public void startAndShutdown() { // Make sure the ResolutionResultListener callback is added to the ResolutionResult attributes, // and the retry scheduler is reset since the name resolution was successful. @Test - public void onResult_success() { + public void onResult_sucess() { retryingNameResolver.start(mockListener); verify(mockNameResolver).start(listenerCaptor.capture()); @@ -95,18 +94,6 @@ public void onResult_success() { verify(mockRetryScheduler).reset(); } - @Test - public void onResult2_sucesss() { - when(mockListener.onResult2(isA(ResolutionResult.class))).thenReturn(Status.OK); - retryingNameResolver.start(mockListener); - verify(mockNameResolver).start(listenerCaptor.capture()); - - assertThat(listenerCaptor.getValue().onResult2(ResolutionResult.newBuilder().build())) - .isEqualTo(Status.OK); - - verify(mockRetryScheduler).reset(); - } - // Make sure the ResolutionResultListener callback is added to the ResolutionResult attributes, // and that a retry gets scheduled when the resolution results are rejected. @Test @@ -125,19 +112,6 @@ public void onResult_failure() { verify(mockRetryScheduler).schedule(isA(Runnable.class)); } - // Make sure that a retry gets scheduled when the resolution results are rejected. - @Test - public void onResult2_failure() { - when(mockListener.onResult2(isA(ResolutionResult.class))).thenReturn(Status.UNAVAILABLE); - retryingNameResolver.start(mockListener); - verify(mockNameResolver).start(listenerCaptor.capture()); - - assertThat(listenerCaptor.getValue().onResult2(ResolutionResult.newBuilder().build())) - .isEqualTo(Status.UNAVAILABLE); - - verify(mockRetryScheduler).schedule(isA(Runnable.class)); - } - // Wrapping a NameResolver more than once is a misconfiguration. @Test public void onResult_failure_doubleWrapped() { diff --git a/grpclb/src/test/java/io/grpc/grpclb/GrpclbNameResolverTest.java b/grpclb/src/test/java/io/grpc/grpclb/GrpclbNameResolverTest.java index c195a78e6f4..3e2cf22605f 100644 --- a/grpclb/src/test/java/io/grpc/grpclb/GrpclbNameResolverTest.java +++ b/grpclb/src/test/java/io/grpc/grpclb/GrpclbNameResolverTest.java @@ -152,7 +152,7 @@ public List resolveSrv(String host) throws Exception { resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); assertThat(result.getAddresses()).isEmpty(); assertThat(result.getAttributes()).isEqualTo(Attributes.EMPTY); @@ -192,7 +192,7 @@ public ConfigOrError answer(InvocationOnMock invocation) { resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -225,7 +225,7 @@ public void resolve_nullResourceResolver() throws Exception { resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); assertThat(result.getAddresses()) .containsExactly( @@ -272,7 +272,7 @@ public void resolve_addressFailure_stillLookUpBalancersAndServiceConfig() throws resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); assertThat(result.getAddresses()).isEmpty(); EquivalentAddressGroup resolvedBalancerAddr = @@ -306,7 +306,7 @@ public void resolveAll_balancerLookupFails_stillLookUpServiceConfig() throws Exc resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult2(resultCaptor.capture()); + verify(mockListener).onResult(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = From 941a1c37a3cf9078da633da38ceaa15009975ad4 Mon Sep 17 00:00:00 2001 From: Larry Safran Date: Wed, 31 Jul 2024 14:56:07 -0700 Subject: [PATCH 19/53] Add dualstack interop test kokoro config (#11422) --- buildscripts/kokoro/psm-dualstack.cfg | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 buildscripts/kokoro/psm-dualstack.cfg diff --git a/buildscripts/kokoro/psm-dualstack.cfg b/buildscripts/kokoro/psm-dualstack.cfg new file mode 100644 index 00000000000..55c906bc4ec --- /dev/null +++ b/buildscripts/kokoro/psm-dualstack.cfg @@ -0,0 +1,17 @@ +# Config file for internal CI + +# Location of the continuous shell script in repository. +build_file: "grpc-java/buildscripts/kokoro/psm-interop-test-java.sh" +timeout_mins: 120 + +action { + define_artifacts { + regex: "artifacts/**/*sponge_log.xml" + regex: "artifacts/**/*.log" + strip_prefix: "artifacts" + } +} +env_vars { + key: "PSM_TEST_SUITE" + value: "dualstack" +} From cc1cbe987191cf1e72e25cd3fd7bef392910a0d6 Mon Sep 17 00:00:00 2001 From: Larry Safran Date: Thu, 1 Aug 2024 11:30:36 -0700 Subject: [PATCH 20/53] Revert "Enable new PickFirst LB (#11348)" (#11425) This reverts commit ccfd351a2e9e3a1dc6d3d0130a6cfa561fba7321. --- .../java/io/grpc/internal/PickFirstLoadBalancerProvider.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/io/grpc/internal/PickFirstLoadBalancerProvider.java b/core/src/main/java/io/grpc/internal/PickFirstLoadBalancerProvider.java index 6591a4a0a7d..92178ccae24 100644 --- a/core/src/main/java/io/grpc/internal/PickFirstLoadBalancerProvider.java +++ b/core/src/main/java/io/grpc/internal/PickFirstLoadBalancerProvider.java @@ -37,7 +37,7 @@ public final class PickFirstLoadBalancerProvider extends LoadBalancerProvider { private static final String SHUFFLE_ADDRESS_LIST_KEY = "shuffleAddressList"; private static boolean enableNewPickFirst = - GrpcUtil.getFlag("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true); + GrpcUtil.getFlag("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false); public static boolean isEnabledHappyEyeballs() { From 9bc1a93f6eddff76ad76e28f561d191d2eeb455c Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Thu, 1 Aug 2024 16:30:02 -0700 Subject: [PATCH 21/53] xds: Add test that uses real DnsNR with ClusterResolverLB This can detect failures like the UnsupportedOperationException from ebffb0a6. --- .../FakeControlPlaneXdsIntegrationTest.java | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/xds/src/test/java/io/grpc/xds/FakeControlPlaneXdsIntegrationTest.java b/xds/src/test/java/io/grpc/xds/FakeControlPlaneXdsIntegrationTest.java index 16e6d22631f..30c2403396e 100644 --- a/xds/src/test/java/io/grpc/xds/FakeControlPlaneXdsIntegrationTest.java +++ b/xds/src/test/java/io/grpc/xds/FakeControlPlaneXdsIntegrationTest.java @@ -24,10 +24,17 @@ import com.google.protobuf.Any; import com.google.protobuf.Struct; import com.google.protobuf.Value; +import io.envoyproxy.envoy.config.cluster.v3.Cluster; import io.envoyproxy.envoy.config.cluster.v3.Cluster.LbPolicy; import io.envoyproxy.envoy.config.cluster.v3.LoadBalancingPolicy; import io.envoyproxy.envoy.config.cluster.v3.LoadBalancingPolicy.Policy; +import io.envoyproxy.envoy.config.core.v3.Address; +import io.envoyproxy.envoy.config.core.v3.SocketAddress; import io.envoyproxy.envoy.config.core.v3.TypedExtensionConfig; +import io.envoyproxy.envoy.config.endpoint.v3.ClusterLoadAssignment; +import io.envoyproxy.envoy.config.endpoint.v3.Endpoint; +import io.envoyproxy.envoy.config.endpoint.v3.LbEndpoint; +import io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints; import io.envoyproxy.envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality; import io.grpc.CallOptions; import io.grpc.Channel; @@ -42,6 +49,7 @@ import io.grpc.testing.protobuf.SimpleRequest; import io.grpc.testing.protobuf.SimpleResponse; import io.grpc.testing.protobuf.SimpleServiceGrpc; +import java.net.InetSocketAddress; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; @@ -179,4 +187,34 @@ public void pingPong_ringHash() { .build(); assertEquals(goldenResponse, blockingStub.unaryRpc(request)); } + + @Test + public void pingPong_logicalDns() { + InetSocketAddress serverAddress = + (InetSocketAddress) dataPlane.getServer().getListenSockets().get(0); + controlPlane.setCdsConfig( + ControlPlaneRule.buildCluster().toBuilder() + .setType(Cluster.DiscoveryType.LOGICAL_DNS) + .setLoadAssignment( + ClusterLoadAssignment.newBuilder().addEndpoints( + LocalityLbEndpoints.newBuilder().addLbEndpoints( + LbEndpoint.newBuilder().setEndpoint( + Endpoint.newBuilder().setAddress( + Address.newBuilder().setSocketAddress( + SocketAddress.newBuilder() + .setAddress("localhost") + .setPortValue(serverAddress.getPort())))))) + .build()) + .build()); + + ManagedChannel channel = dataPlane.getManagedChannel(); + SimpleServiceGrpc.SimpleServiceBlockingStub blockingStub = SimpleServiceGrpc.newBlockingStub( + channel); + SimpleRequest request = SimpleRequest.newBuilder() + .build(); + SimpleResponse goldenResponse = SimpleResponse.newBuilder() + .setResponseMessage("Hi, xDS!") + .build(); + assertEquals(goldenResponse, blockingStub.unaryRpc(request)); + } } From 90d0fabb1f100231ab6544cf8e352c623771dee1 Mon Sep 17 00:00:00 2001 From: Kannan J Date: Fri, 2 Aug 2024 20:40:31 +0530 Subject: [PATCH 22/53] Introduce onResult2 in NameResolver Listener2 that returns Status Lets the Name Resolver receive the status of the acceptance of the name resolution by the load balancer. --- api/src/main/java/io/grpc/NameResolver.java | 11 + .../io/grpc/internal/DnsNameResolver.java | 4 +- .../io/grpc/internal/ManagedChannelImpl.java | 255 +++++++-------- .../grpc/internal/RetryingNameResolver.java | 12 + .../io/grpc/internal/DnsNameResolverTest.java | 50 ++- .../grpc/internal/ManagedChannelImplTest.java | 300 +++++++++++++++++- .../internal/RetryingNameResolverTest.java | 28 +- .../grpc/grpclb/GrpclbNameResolverTest.java | 10 +- 8 files changed, 502 insertions(+), 168 deletions(-) diff --git a/api/src/main/java/io/grpc/NameResolver.java b/api/src/main/java/io/grpc/NameResolver.java index a74512eb7e3..bfb9c2a43a1 100644 --- a/api/src/main/java/io/grpc/NameResolver.java +++ b/api/src/main/java/io/grpc/NameResolver.java @@ -246,6 +246,17 @@ public final void onAddresses( */ @Override public abstract void onError(Status error); + + /** + * Handles updates on resolved addresses and attributes. + * + * @param resolutionResult the resolved server addresses, attributes, and Service Config. + * @since 1.66 + */ + public Status onResult2(ResolutionResult resolutionResult) { + onResult(resolutionResult); + return Status.OK; + } } /** diff --git a/core/src/main/java/io/grpc/internal/DnsNameResolver.java b/core/src/main/java/io/grpc/internal/DnsNameResolver.java index 5ef6dd863c2..df51d6f2c5c 100644 --- a/core/src/main/java/io/grpc/internal/DnsNameResolver.java +++ b/core/src/main/java/io/grpc/internal/DnsNameResolver.java @@ -330,7 +330,9 @@ public void run() { resolutionResultBuilder.setAttributes(result.attributes); } } - savedListener.onResult(resolutionResultBuilder.build()); + syncContext.execute(() -> { + savedListener.onResult2(resolutionResultBuilder.build()); + }); } catch (IOException e) { savedListener.onError( Status.UNAVAILABLE.withDescription("Unable to resolve host " + host).withCause(e)); diff --git a/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java b/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java index c5c7b66e15d..7f45ca967ea 100644 --- a/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java +++ b/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java @@ -1673,146 +1673,147 @@ final class NameResolverListener extends NameResolver.Listener2 { public void onResult(final ResolutionResult resolutionResult) { final class NamesResolved implements Runnable { - @SuppressWarnings("ReferenceEquality") @Override public void run() { - if (ManagedChannelImpl.this.nameResolver != resolver) { - return; - } - - List servers = resolutionResult.getAddresses(); - channelLogger.log( - ChannelLogLevel.DEBUG, - "Resolved address: {0}, config={1}", - servers, - resolutionResult.getAttributes()); - - if (lastResolutionState != ResolutionState.SUCCESS) { - channelLogger.log(ChannelLogLevel.INFO, "Address resolved: {0}", servers); - lastResolutionState = ResolutionState.SUCCESS; - } - - ConfigOrError configOrError = resolutionResult.getServiceConfig(); + Status status = onResult2(resolutionResult); ResolutionResultListener resolutionResultListener = resolutionResult.getAttributes() .get(RetryingNameResolver.RESOLUTION_RESULT_LISTENER_KEY); - InternalConfigSelector resolvedConfigSelector = - resolutionResult.getAttributes().get(InternalConfigSelector.KEY); - ManagedChannelServiceConfig validServiceConfig = - configOrError != null && configOrError.getConfig() != null - ? (ManagedChannelServiceConfig) configOrError.getConfig() - : null; - Status serviceConfigError = configOrError != null ? configOrError.getError() : null; - - ManagedChannelServiceConfig effectiveServiceConfig; - if (!lookUpServiceConfig) { - if (validServiceConfig != null) { - channelLogger.log( - ChannelLogLevel.INFO, - "Service config from name resolver discarded by channel settings"); - } - effectiveServiceConfig = - defaultServiceConfig == null ? EMPTY_SERVICE_CONFIG : defaultServiceConfig; - if (resolvedConfigSelector != null) { + resolutionResultListener.resolutionAttempted(status); + } + } + + syncContext.execute(new NamesResolved()); + } + + @SuppressWarnings("ReferenceEquality") + @Override + public Status onResult2(final ResolutionResult resolutionResult) { + syncContext.throwIfNotInThisSynchronizationContext(); + if (ManagedChannelImpl.this.nameResolver != resolver) { + return Status.OK; + } + + List servers = resolutionResult.getAddresses(); + channelLogger.log( + ChannelLogLevel.DEBUG, + "Resolved address: {0}, config={1}", + servers, + resolutionResult.getAttributes()); + + if (lastResolutionState != ResolutionState.SUCCESS) { + channelLogger.log(ChannelLogLevel.INFO, "Address resolved: {0}", servers); + lastResolutionState = ResolutionState.SUCCESS; + } + + ConfigOrError configOrError = resolutionResult.getServiceConfig(); + InternalConfigSelector resolvedConfigSelector = + resolutionResult.getAttributes().get(InternalConfigSelector.KEY); + ManagedChannelServiceConfig validServiceConfig = + configOrError != null && configOrError.getConfig() != null + ? (ManagedChannelServiceConfig) configOrError.getConfig() + : null; + Status serviceConfigError = configOrError != null ? configOrError.getError() : null; + + ManagedChannelServiceConfig effectiveServiceConfig; + if (!lookUpServiceConfig) { + if (validServiceConfig != null) { + channelLogger.log( + ChannelLogLevel.INFO, + "Service config from name resolver discarded by channel settings"); + } + effectiveServiceConfig = + defaultServiceConfig == null ? EMPTY_SERVICE_CONFIG : defaultServiceConfig; + if (resolvedConfigSelector != null) { + channelLogger.log( + ChannelLogLevel.INFO, + "Config selector from name resolver discarded by channel settings"); + } + realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); + } else { + // Try to use config if returned from name resolver + // Otherwise, try to use the default config if available + if (validServiceConfig != null) { + effectiveServiceConfig = validServiceConfig; + if (resolvedConfigSelector != null) { + realChannel.updateConfigSelector(resolvedConfigSelector); + if (effectiveServiceConfig.getDefaultConfigSelector() != null) { channelLogger.log( - ChannelLogLevel.INFO, - "Config selector from name resolver discarded by channel settings"); + ChannelLogLevel.DEBUG, + "Method configs in service config will be discarded due to presence of" + + "config-selector"); } + } else { realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); + } + } else if (defaultServiceConfig != null) { + effectiveServiceConfig = defaultServiceConfig; + realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); + channelLogger.log( + ChannelLogLevel.INFO, + "Received no service config, using default service config"); + } else if (serviceConfigError != null) { + if (!serviceConfigUpdated) { + // First DNS lookup has invalid service config, and cannot fall back to default + channelLogger.log( + ChannelLogLevel.INFO, + "Fallback to error due to invalid first service config without default config"); + // This error could be an "inappropriate" control plane error that should not bleed + // through to client code using gRPC. We let them flow through here to the LB as + // we later check for these error codes when investigating pick results in + // GrpcUtil.getTransportFromPickResult(). + onError(configOrError.getError()); + return configOrError.getError(); } else { - // Try to use config if returned from name resolver - // Otherwise, try to use the default config if available - if (validServiceConfig != null) { - effectiveServiceConfig = validServiceConfig; - if (resolvedConfigSelector != null) { - realChannel.updateConfigSelector(resolvedConfigSelector); - if (effectiveServiceConfig.getDefaultConfigSelector() != null) { - channelLogger.log( - ChannelLogLevel.DEBUG, - "Method configs in service config will be discarded due to presence of" - + "config-selector"); - } - } else { - realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); - } - } else if (defaultServiceConfig != null) { - effectiveServiceConfig = defaultServiceConfig; - realChannel.updateConfigSelector(effectiveServiceConfig.getDefaultConfigSelector()); - channelLogger.log( - ChannelLogLevel.INFO, - "Received no service config, using default service config"); - } else if (serviceConfigError != null) { - if (!serviceConfigUpdated) { - // First DNS lookup has invalid service config, and cannot fall back to default - channelLogger.log( - ChannelLogLevel.INFO, - "Fallback to error due to invalid first service config without default config"); - // This error could be an "inappropriate" control plane error that should not bleed - // through to client code using gRPC. We let them flow through here to the LB as - // we later check for these error codes when investigating pick results in - // GrpcUtil.getTransportFromPickResult(). - onError(configOrError.getError()); - if (resolutionResultListener != null) { - resolutionResultListener.resolutionAttempted(configOrError.getError()); - } - return; - } else { - effectiveServiceConfig = lastServiceConfig; - } - } else { - effectiveServiceConfig = EMPTY_SERVICE_CONFIG; - realChannel.updateConfigSelector(null); - } - if (!effectiveServiceConfig.equals(lastServiceConfig)) { - channelLogger.log( - ChannelLogLevel.INFO, - "Service config changed{0}", - effectiveServiceConfig == EMPTY_SERVICE_CONFIG ? " to empty" : ""); - lastServiceConfig = effectiveServiceConfig; - transportProvider.throttle = effectiveServiceConfig.getRetryThrottling(); - } - - try { - // TODO(creamsoup): when `servers` is empty and lastResolutionStateCopy == SUCCESS - // and lbNeedAddress, it shouldn't call the handleServiceConfigUpdate. But, - // lbNeedAddress is not deterministic - serviceConfigUpdated = true; - } catch (RuntimeException re) { - logger.log( - Level.WARNING, - "[" + getLogId() + "] Unexpected exception from parsing service config", - re); - } + effectiveServiceConfig = lastServiceConfig; } + } else { + effectiveServiceConfig = EMPTY_SERVICE_CONFIG; + realChannel.updateConfigSelector(null); + } + if (!effectiveServiceConfig.equals(lastServiceConfig)) { + channelLogger.log( + ChannelLogLevel.INFO, + "Service config changed{0}", + effectiveServiceConfig == EMPTY_SERVICE_CONFIG ? " to empty" : ""); + lastServiceConfig = effectiveServiceConfig; + transportProvider.throttle = effectiveServiceConfig.getRetryThrottling(); + } - Attributes effectiveAttrs = resolutionResult.getAttributes(); - // Call LB only if it's not shutdown. If LB is shutdown, lbHelper won't match. - if (NameResolverListener.this.helper == ManagedChannelImpl.this.lbHelper) { - Attributes.Builder attrBuilder = - effectiveAttrs.toBuilder().discard(InternalConfigSelector.KEY); - Map healthCheckingConfig = - effectiveServiceConfig.getHealthCheckingConfig(); - if (healthCheckingConfig != null) { - attrBuilder - .set(LoadBalancer.ATTR_HEALTH_CHECKING_CONFIG, healthCheckingConfig) - .build(); - } - Attributes attributes = attrBuilder.build(); - - Status addressAcceptanceStatus = helper.lb.tryAcceptResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setAttributes(attributes) - .setLoadBalancingPolicyConfig(effectiveServiceConfig.getLoadBalancingConfig()) - .build()); - // If a listener is provided, let it know if the addresses were accepted. - if (resolutionResultListener != null) { - resolutionResultListener.resolutionAttempted(addressAcceptanceStatus); - } - } + try { + // TODO(creamsoup): when `servers` is empty and lastResolutionStateCopy == SUCCESS + // and lbNeedAddress, it shouldn't call the handleServiceConfigUpdate. But, + // lbNeedAddress is not deterministic + serviceConfigUpdated = true; + } catch (RuntimeException re) { + logger.log( + Level.WARNING, + "[" + getLogId() + "] Unexpected exception from parsing service config", + re); } } - syncContext.execute(new NamesResolved()); + Attributes effectiveAttrs = resolutionResult.getAttributes(); + // Call LB only if it's not shutdown. If LB is shutdown, lbHelper won't match. + if (NameResolverListener.this.helper == ManagedChannelImpl.this.lbHelper) { + Attributes.Builder attrBuilder = + effectiveAttrs.toBuilder().discard(InternalConfigSelector.KEY); + Map healthCheckingConfig = + effectiveServiceConfig.getHealthCheckingConfig(); + if (healthCheckingConfig != null) { + attrBuilder + .set(LoadBalancer.ATTR_HEALTH_CHECKING_CONFIG, healthCheckingConfig) + .build(); + } + Attributes attributes = attrBuilder.build(); + + return helper.lb.tryAcceptResolvedAddresses( + ResolvedAddresses.newBuilder() + .setAddresses(servers) + .setAttributes(attributes) + .setLoadBalancingPolicyConfig(effectiveServiceConfig.getLoadBalancingConfig()) + .build()); + } + return Status.OK; } @Override diff --git a/core/src/main/java/io/grpc/internal/RetryingNameResolver.java b/core/src/main/java/io/grpc/internal/RetryingNameResolver.java index 6d806e95944..6dcfcd3534a 100644 --- a/core/src/main/java/io/grpc/internal/RetryingNameResolver.java +++ b/core/src/main/java/io/grpc/internal/RetryingNameResolver.java @@ -95,12 +95,24 @@ public void onResult(ResolutionResult resolutionResult) { "RetryingNameResolver can only be used once to wrap a NameResolver"); } + // To have retry behavior for name resolvers that haven't migrated to onResult2. delegateListener.onResult(resolutionResult.toBuilder().setAttributes( resolutionResult.getAttributes().toBuilder() .set(RESOLUTION_RESULT_LISTENER_KEY, new ResolutionResultListener()).build()) .build()); } + @Override + public Status onResult2(ResolutionResult resolutionResult) { + Status status = delegateListener.onResult2(resolutionResult); + if (status.isOk()) { + retryScheduler.reset(); + } else { + retryScheduler.schedule(new DelayedNameResolverRefresh()); + } + return status; + } + @Override public void onError(Status error) { delegateListener.onError(error); diff --git a/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java b/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java index 14d3fddd290..0512171f4e7 100644 --- a/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java +++ b/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java @@ -26,7 +26,6 @@ import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.isA; -import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -226,13 +225,7 @@ public void setUp() { System.getProperty(DnsNameResolver.NETWORKADDRESS_CACHE_TTL_PROPERTY); // By default the mock listener processes the result successfully. - doAnswer(invocation -> { - ResolutionResult result = invocation.getArgument(0); - syncContext.execute( - () -> result.getAttributes().get(RetryingNameResolver.RESOLUTION_RESULT_LISTENER_KEY) - .resolutionAttempted(Status.OK)); - return null; - }).when(mockListener).onResult(isA(ResolutionResult.class)); + when(mockListener.onResult2(isA(ResolutionResult.class))).thenReturn(Status.OK); } @After @@ -319,13 +312,13 @@ private void resolveNeverCache(boolean isAndroid) throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); assertAnswerMatches(answer1, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); resolver.refresh(); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener, times(2)).onResult(resultCaptor.capture()); + verify(mockListener, times(2)).onResult2(resultCaptor.capture()); assertAnswerMatches(answer2, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -347,7 +340,7 @@ public void testExecutor_default() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); assertAnswerMatches(answer, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -389,7 +382,7 @@ public void execute(Runnable command) { resolver.start(mockListener); assertEquals(0, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); assertAnswerMatches(answer, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -418,7 +411,7 @@ public void resolve_cacheForever() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); assertAnswerMatches(answer1, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); @@ -452,7 +445,7 @@ public void resolve_usingCache() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); assertAnswerMatches(answer, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); @@ -487,14 +480,14 @@ public void resolve_cacheExpired() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); assertAnswerMatches(answer1, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); fakeTicker.advance(ttl + 1, TimeUnit.SECONDS); resolver.refresh(); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener, times(2)).onResult(resultCaptor.capture()); + verify(mockListener, times(2)).onResult2(resultCaptor.capture()); assertAnswerMatches(answer2, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -531,7 +524,7 @@ private void resolveDefaultValue() throws Exception { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); assertAnswerMatches(answer1, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); @@ -544,7 +537,7 @@ private void resolveDefaultValue() throws Exception { fakeTicker.advance(1, TimeUnit.SECONDS); resolver.refresh(); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener, times(2)).onResult(resultCaptor.capture()); + verify(mockListener, times(2)).onResult2(resultCaptor.capture()); assertAnswerMatches(answer2, 81, resultCaptor.getValue()); assertEquals(0, fakeClock.numPendingTasks()); assertEquals(0, fakeExecutor.numPendingTasks()); @@ -575,7 +568,7 @@ public List resolveAddress(String host) throws Exception { assertThat(fakeExecutor.runDueTasks()).isEqualTo(1); ArgumentCaptor ac = ArgumentCaptor.forClass(ResolutionResult.class); - verify(mockListener).onResult(ac.capture()); + verify(mockListener).onResult2(ac.capture()); verifyNoMoreInteractions(mockListener); assertThat(ac.getValue().getAddresses()).isEmpty(); assertThat(ac.getValue().getServiceConfig()).isNull(); @@ -588,12 +581,7 @@ public List resolveAddress(String host) throws Exception { // Load balancer rejects the empty addresses. @Test public void resolve_emptyResult_notAccepted() throws Exception { - doAnswer(invocation -> { - ResolutionResult result = invocation.getArgument(0); - result.getAttributes().get(RetryingNameResolver.RESOLUTION_RESULT_LISTENER_KEY) - .resolutionAttempted(Status.UNAVAILABLE); - return null; - }).when(mockListener).onResult(isA(ResolutionResult.class)); + when(mockListener.onResult2(isA(ResolutionResult.class))).thenReturn(Status.UNAVAILABLE); DnsNameResolver.enableTxt = true; RetryingNameResolver resolver = newResolver("dns:///addr.fake:1234", 443); @@ -614,7 +602,7 @@ public List resolveAddress(String host) throws Exception { syncContext.execute(() -> assertThat(fakeExecutor.runDueTasks()).isEqualTo(1)); ArgumentCaptor ac = ArgumentCaptor.forClass(ResolutionResult.class); - verify(mockListener).onResult(ac.capture()); + verify(mockListener).onResult2(ac.capture()); verifyNoMoreInteractions(mockListener); assertThat(ac.getValue().getAddresses()).isEmpty(); assertThat(ac.getValue().getServiceConfig()).isNull(); @@ -640,7 +628,7 @@ public void resolve_nullResourceResolver() throws Exception { dnsResolver.setResourceResolver(null); resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -712,7 +700,7 @@ public ConfigOrError parseServiceConfig(Map rawServiceConfig) { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -770,7 +758,7 @@ public void resolve_serviceConfigLookupFails_nullServiceConfig() throws Exceptio dnsResolver.setResourceResolver(mockResourceResolver); resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -802,7 +790,7 @@ public void resolve_serviceConfigMalformed_serviceConfigError() throws Exception dnsResolver.setResourceResolver(mockResourceResolver); resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -870,7 +858,7 @@ public HttpConnectProxiedSocketAddress proxyFor(SocketAddress targetAddress) { resolver.start(mockListener); assertEquals(1, fakeExecutor.runDueTasks()); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); List result = resultCaptor.getValue().getAddresses(); assertThat(result).hasSize(1); EquivalentAddressGroup eag = result.get(0); diff --git a/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java b/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java index 1d6492f791c..4d42056b689 100644 --- a/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java +++ b/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java @@ -1054,6 +1054,79 @@ public void noMoreCallbackAfterLoadBalancerShutdown() { verifyNoMoreInteractions(mockLoadBalancer); } + @Test + public void noMoreCallbackAfterLoadBalancerShutdown_configError() throws InterruptedException { + FakeNameResolverFactory nameResolverFactory = + new FakeNameResolverFactory.Builder(expectedUri) + .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) + .build(); + channelBuilder.nameResolverFactory(nameResolverFactory); + Status resolutionError = Status.UNAVAILABLE.withDescription("Resolution failed"); + createChannel(); + + FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.get(0); + verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); + verify(mockLoadBalancer).acceptResolvedAddresses(resolvedAddressCaptor.capture()); + assertThat(resolvedAddressCaptor.getValue().getAddresses()).containsExactly(addressGroup); + + SubchannelStateListener stateListener1 = mock(SubchannelStateListener.class); + SubchannelStateListener stateListener2 = mock(SubchannelStateListener.class); + Subchannel subchannel1 = + createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, stateListener1); + Subchannel subchannel2 = + createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, stateListener2); + requestConnectionSafely(helper, subchannel1); + requestConnectionSafely(helper, subchannel2); + verify(mockTransportFactory, times(2)) + .newClientTransport( + any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); + MockClientTransportInfo transportInfo1 = transports.poll(); + MockClientTransportInfo transportInfo2 = transports.poll(); + + // LoadBalancer receives all sorts of callbacks + transportInfo1.listener.transportReady(); + + verify(stateListener1, times(2)).onSubchannelState(stateInfoCaptor.capture()); + assertSame(CONNECTING, stateInfoCaptor.getAllValues().get(0).getState()); + assertSame(READY, stateInfoCaptor.getAllValues().get(1).getState()); + + verify(stateListener2).onSubchannelState(stateInfoCaptor.capture()); + assertSame(CONNECTING, stateInfoCaptor.getValue().getState()); + + resolver.listener.onError(resolutionError); + verify(mockLoadBalancer).handleNameResolutionError(resolutionError); + + verifyNoMoreInteractions(mockLoadBalancer); + + channel.shutdown(); + verify(mockLoadBalancer).shutdown(); + verifyNoMoreInteractions(stateListener1, stateListener2); + + // LoadBalancer will normally shutdown all subchannels + shutdownSafely(helper, subchannel1); + shutdownSafely(helper, subchannel2); + + // Since subchannels are shutdown, SubchannelStateListeners will only get SHUTDOWN regardless of + // the transport states. + transportInfo1.listener.transportShutdown(Status.UNAVAILABLE); + transportInfo2.listener.transportReady(); + verify(stateListener1).onSubchannelState(ConnectivityStateInfo.forNonError(SHUTDOWN)); + verify(stateListener2).onSubchannelState(ConnectivityStateInfo.forNonError(SHUTDOWN)); + verifyNoMoreInteractions(stateListener1, stateListener2); + + // No more callback should be delivered to LoadBalancer after it's shut down + resolver.listener.onResult( + ResolutionResult.newBuilder() + .setAddresses(new ArrayList<>()) + .setServiceConfig( + ConfigOrError.fromError(Status.UNAVAILABLE.withDescription("Resolution failed"))) + .build()); + Thread.sleep(1100); + assertThat(timer.getPendingTasks()).isEmpty(); + resolver.resolved(); + verifyNoMoreInteractions(mockLoadBalancer); + } + @Test public void interceptor() throws Exception { final AtomicLong atomic = new AtomicLong(); @@ -3138,6 +3211,48 @@ public void channelTracing_nameResolvedEvent_zeorAndNonzeroBackends() throws Exc assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); } + @Test + public void channelTracing_nameResolvedEvent_zeorAndNonzeroBackends_usesListener2onResult2() + throws Exception { + timer.forwardNanos(1234); + channelBuilder.maxTraceEvents(10); + List servers = new ArrayList<>(); + servers.add(new EquivalentAddressGroup(socketAddress)); + FakeNameResolverFactory nameResolverFactory = + new FakeNameResolverFactory.Builder(expectedUri).setServers(servers).build(); + channelBuilder.nameResolverFactory(nameResolverFactory); + createChannel(); + + int prevSize = getStats(channel).channelTrace.events.size(); + ResolutionResult resolutionResult1 = ResolutionResult.newBuilder() + .setAddresses(Collections.singletonList( + new EquivalentAddressGroup( + Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) + .build(); + + channel.syncContext.execute( + () -> nameResolverFactory.resolvers.get(0).listener.onResult2(resolutionResult1)); + assertThat(getStats(channel).channelTrace.events).hasSize(prevSize); + + prevSize = getStats(channel).channelTrace.events.size(); + nameResolverFactory.resolvers.get(0).listener.onError(Status.INTERNAL); + assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); + + prevSize = getStats(channel).channelTrace.events.size(); + nameResolverFactory.resolvers.get(0).listener.onError(Status.INTERNAL); + assertThat(getStats(channel).channelTrace.events).hasSize(prevSize); + + prevSize = getStats(channel).channelTrace.events.size(); + ResolutionResult resolutionResult2 = ResolutionResult.newBuilder() + .setAddresses(Collections.singletonList( + new EquivalentAddressGroup( + Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) + .build(); + channel.syncContext.execute( + () -> nameResolverFactory.resolvers.get(0).listener.onResult2(resolutionResult2)); + assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); + } + @Test public void channelTracing_serviceConfigChange() throws Exception { timer.forwardNanos(1234); @@ -3197,6 +3312,69 @@ public void channelTracing_serviceConfigChange() throws Exception { .build()); } + @Test + public void channelTracing_serviceConfigChange_usesListener2OnResult2() throws Exception { + timer.forwardNanos(1234); + channelBuilder.maxTraceEvents(10); + List servers = new ArrayList<>(); + servers.add(new EquivalentAddressGroup(socketAddress)); + FakeNameResolverFactory nameResolverFactory = + new FakeNameResolverFactory.Builder(expectedUri).setServers(servers).build(); + channelBuilder.nameResolverFactory(nameResolverFactory); + createChannel(); + + int prevSize = getStats(channel).channelTrace.events.size(); + ManagedChannelServiceConfig mcsc1 = createManagedChannelServiceConfig( + ImmutableMap.of(), + new PolicySelection( + mockLoadBalancerProvider, null)); + ResolutionResult resolutionResult1 = ResolutionResult.newBuilder() + .setAddresses(Collections.singletonList( + new EquivalentAddressGroup( + Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) + .setServiceConfig(ConfigOrError.fromConfig(mcsc1)) + .build(); + + channel.syncContext.execute(() -> + nameResolverFactory.resolvers.get(0).listener.onResult2(resolutionResult1)); + assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); + assertThat(getStats(channel).channelTrace.events.get(prevSize)) + .isEqualTo(new ChannelTrace.Event.Builder() + .setDescription("Service config changed") + .setSeverity(ChannelTrace.Event.Severity.CT_INFO) + .setTimestampNanos(timer.getTicker().read()) + .build()); + + prevSize = getStats(channel).channelTrace.events.size(); + ResolutionResult resolutionResult2 = ResolutionResult.newBuilder().setAddresses( + Collections.singletonList( + new EquivalentAddressGroup( + Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) + .setServiceConfig(ConfigOrError.fromConfig(mcsc1)) + .build(); + channel.syncContext.execute(() -> + nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult2)); + assertThat(getStats(channel).channelTrace.events).hasSize(prevSize); + + prevSize = getStats(channel).channelTrace.events.size(); + timer.forwardNanos(1234); + ResolutionResult resolutionResult3 = ResolutionResult.newBuilder() + .setAddresses(Collections.singletonList( + new EquivalentAddressGroup( + Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) + .setServiceConfig(ConfigOrError.fromConfig(ManagedChannelServiceConfig.empty())) + .build(); + channel.syncContext.execute(() -> + nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult3)); + assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); + assertThat(getStats(channel).channelTrace.events.get(prevSize)) + .isEqualTo(new ChannelTrace.Event.Builder() + .setDescription("Service config changed") + .setSeverity(ChannelTrace.Event.Severity.CT_INFO) + .setTimestampNanos(timer.getTicker().read()) + .build()); + } + @Test public void channelTracing_stateChangeEvent() throws Exception { channelBuilder.maxTraceEvents(10); @@ -3857,6 +4035,120 @@ public ClientTransportFactory buildClientTransportFactory() { mychannel.shutdownNow(); } + @Test + public void badServiceConfigIsRecoverable_usesListener2OnResult2() throws Exception { + final List addresses = + ImmutableList.of(new EquivalentAddressGroup(new SocketAddress() {})); + final class FakeNameResolver extends NameResolver { + Listener2 listener; + private final SynchronizationContext syncContext; + + FakeNameResolver(Args args) { + this.syncContext = args.getSynchronizationContext(); + } + + @Override + public String getServiceAuthority() { + return "also fake"; + } + + @Override + public void start(Listener2 listener) { + this.listener = listener; + syncContext.execute(() -> + listener.onResult2( + ResolutionResult.newBuilder() + .setAddresses(addresses) + .setServiceConfig( + ConfigOrError.fromError( + Status.INTERNAL.withDescription("kaboom is invalid"))) + .build())); + } + + @Override + public void shutdown() {} + } + + final class FakeNameResolverFactory2 extends NameResolver.Factory { + FakeNameResolver resolver; + ManagedChannelImpl managedChannel; + SynchronizationContext syncContext; + + @Nullable + @Override + public NameResolver newNameResolver(URI targetUri, NameResolver.Args args) { + syncContext = args.getSynchronizationContext(); + return (resolver = new FakeNameResolver(args)); + } + + @Override + public String getDefaultScheme() { + return "fake"; + } + } + + FakeNameResolverFactory2 factory = new FakeNameResolverFactory2(); + + ManagedChannelImplBuilder customBuilder = new ManagedChannelImplBuilder(TARGET, + new ClientTransportFactoryBuilder() { + @Override + public ClientTransportFactory buildClientTransportFactory() { + return mockTransportFactory; + } + }, + null); + when(mockTransportFactory.getSupportedSocketAddressTypes()).thenReturn(Collections.singleton( + InetSocketAddress.class)); + customBuilder.executorPool = executorPool; + customBuilder.channelz = channelz; + ManagedChannel mychannel = customBuilder.nameResolverFactory(factory).build(); + + ClientCall call1 = + mychannel.newCall(TestMethodDescriptors.voidMethod(), CallOptions.DEFAULT); + ListenableFuture future1 = ClientCalls.futureUnaryCall(call1, null); + executor.runDueTasks(); + try { + future1.get(1, TimeUnit.SECONDS); + Assert.fail(); + } catch (ExecutionException e) { + assertThat(Throwables.getStackTraceAsString(e.getCause())).contains("kaboom"); + } + + // ok the service config is bad, let's fix it. + Map rawServiceConfig = + parseConfig("{\"loadBalancingConfig\": [{\"round_robin\": {}}]}"); + Object fakeLbConfig = new Object(); + PolicySelection lbConfigs = + new PolicySelection( + mockLoadBalancerProvider, fakeLbConfig); + mockLoadBalancerProvider.parseLoadBalancingPolicyConfig(rawServiceConfig); + ManagedChannelServiceConfig managedChannelServiceConfig = + createManagedChannelServiceConfig(rawServiceConfig, lbConfigs); + factory.syncContext.execute(() -> + factory.resolver.listener.onResult2( + ResolutionResult.newBuilder() + .setAddresses(addresses) + .setServiceConfig(ConfigOrError.fromConfig(managedChannelServiceConfig)) + .build())); + + ClientCall call2 = mychannel.newCall( + TestMethodDescriptors.voidMethod(), + CallOptions.DEFAULT.withDeadlineAfter(5, TimeUnit.SECONDS)); + ListenableFuture future2 = ClientCalls.futureUnaryCall(call2, null); + + timer.forwardTime(1234, TimeUnit.SECONDS); + + executor.runDueTasks(); + try { + future2.get(); + Assert.fail(); + } catch (ExecutionException e) { + assertThat(Throwables.getStackTraceAsString(e.getCause())).contains("deadline"); + } + + mychannel.shutdownNow(); + } + @Test public void nameResolverArgsPropagation() { final AtomicReference capturedArgs = new AtomicReference<>(); @@ -4518,7 +4810,7 @@ public NameResolver newNameResolver(final URI targetUri, NameResolver.Args args) } assertEquals(DEFAULT_PORT, args.getDefaultPort()); FakeNameResolverFactory.FakeNameResolver resolver = - new FakeNameResolverFactory.FakeNameResolver(targetUri, error); + new FakeNameResolverFactory.FakeNameResolver(targetUri, error, args); resolvers.add(resolver); return resolver; } @@ -4546,14 +4838,16 @@ void allResolved() { final class FakeNameResolver extends NameResolver { final URI targetUri; + final SynchronizationContext syncContext; Listener2 listener; boolean shutdown; int refreshCalled; Status error; - FakeNameResolver(URI targetUri, Status error) { + FakeNameResolver(URI targetUri, Status error, Args args) { this.targetUri = targetUri; this.error = error; + syncContext = args.getSynchronizationContext(); } @Override public String getServiceAuthority() { @@ -4585,7 +4879,7 @@ void resolved() { if (configOrError != null) { builder.setServiceConfig(configOrError); } - listener.onResult(builder.build()); + syncContext.execute(() -> listener.onResult(builder.build())); } @Override public void shutdown() { diff --git a/core/src/test/java/io/grpc/internal/RetryingNameResolverTest.java b/core/src/test/java/io/grpc/internal/RetryingNameResolverTest.java index 8801f540394..6347416f0ca 100644 --- a/core/src/test/java/io/grpc/internal/RetryingNameResolverTest.java +++ b/core/src/test/java/io/grpc/internal/RetryingNameResolverTest.java @@ -21,6 +21,7 @@ import static org.mockito.ArgumentMatchers.isA; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import io.grpc.NameResolver; import io.grpc.NameResolver.Listener2; @@ -79,7 +80,7 @@ public void startAndShutdown() { // Make sure the ResolutionResultListener callback is added to the ResolutionResult attributes, // and the retry scheduler is reset since the name resolution was successful. @Test - public void onResult_sucess() { + public void onResult_success() { retryingNameResolver.start(mockListener); verify(mockNameResolver).start(listenerCaptor.capture()); @@ -94,6 +95,18 @@ public void onResult_sucess() { verify(mockRetryScheduler).reset(); } + @Test + public void onResult2_sucesss() { + when(mockListener.onResult2(isA(ResolutionResult.class))).thenReturn(Status.OK); + retryingNameResolver.start(mockListener); + verify(mockNameResolver).start(listenerCaptor.capture()); + + assertThat(listenerCaptor.getValue().onResult2(ResolutionResult.newBuilder().build())) + .isEqualTo(Status.OK); + + verify(mockRetryScheduler).reset(); + } + // Make sure the ResolutionResultListener callback is added to the ResolutionResult attributes, // and that a retry gets scheduled when the resolution results are rejected. @Test @@ -112,6 +125,19 @@ public void onResult_failure() { verify(mockRetryScheduler).schedule(isA(Runnable.class)); } + // Make sure that a retry gets scheduled when the resolution results are rejected. + @Test + public void onResult2_failure() { + when(mockListener.onResult2(isA(ResolutionResult.class))).thenReturn(Status.UNAVAILABLE); + retryingNameResolver.start(mockListener); + verify(mockNameResolver).start(listenerCaptor.capture()); + + assertThat(listenerCaptor.getValue().onResult2(ResolutionResult.newBuilder().build())) + .isEqualTo(Status.UNAVAILABLE); + + verify(mockRetryScheduler).schedule(isA(Runnable.class)); + } + // Wrapping a NameResolver more than once is a misconfiguration. @Test public void onResult_failure_doubleWrapped() { diff --git a/grpclb/src/test/java/io/grpc/grpclb/GrpclbNameResolverTest.java b/grpclb/src/test/java/io/grpc/grpclb/GrpclbNameResolverTest.java index 3e2cf22605f..c195a78e6f4 100644 --- a/grpclb/src/test/java/io/grpc/grpclb/GrpclbNameResolverTest.java +++ b/grpclb/src/test/java/io/grpc/grpclb/GrpclbNameResolverTest.java @@ -152,7 +152,7 @@ public List resolveSrv(String host) throws Exception { resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); assertThat(result.getAddresses()).isEmpty(); assertThat(result.getAttributes()).isEqualTo(Attributes.EMPTY); @@ -192,7 +192,7 @@ public ConfigOrError answer(InvocationOnMock invocation) { resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = (InetSocketAddress) Iterables.getOnlyElement( @@ -225,7 +225,7 @@ public void resolve_nullResourceResolver() throws Exception { resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); assertThat(result.getAddresses()) .containsExactly( @@ -272,7 +272,7 @@ public void resolve_addressFailure_stillLookUpBalancersAndServiceConfig() throws resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); assertThat(result.getAddresses()).isEmpty(); EquivalentAddressGroup resolvedBalancerAddr = @@ -306,7 +306,7 @@ public void resolveAll_balancerLookupFails_stillLookUpServiceConfig() throws Exc resolver.start(mockListener); assertThat(fakeClock.runDueTasks()).isEqualTo(1); - verify(mockListener).onResult(resultCaptor.capture()); + verify(mockListener).onResult2(resultCaptor.capture()); ResolutionResult result = resultCaptor.getValue(); InetSocketAddress resolvedBackendAddr = From b8e3ae9a4b486c6dfc322cf533178a1ace853985 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Tue, 18 Jul 2023 10:22:28 -0700 Subject: [PATCH 23/53] android-interop-testing: Enable -Xlint:deprecation --- android-interop-testing/build.gradle | 1 - 1 file changed, 1 deletion(-) diff --git a/android-interop-testing/build.gradle b/android-interop-testing/build.gradle index 4d96adbd0dc..22aa5f2288d 100644 --- a/android-interop-testing/build.gradle +++ b/android-interop-testing/build.gradle @@ -116,7 +116,6 @@ import net.ltgt.gradle.errorprone.CheckSeverity tasks.withType(JavaCompile).configureEach { options.compilerArgs += [ "-Xlint:-cast", - "-Xlint:-deprecation", // https://github.com/grpc/grpc-java/issues/10298 ] appendToProperty(it.options.errorprone.excludedPaths, ".*/R.java", "|") appendToProperty( From 780e4ba086265a33ed67c6de8b7f3d60c86f15a6 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Tue, 2 Apr 2024 10:42:25 -0700 Subject: [PATCH 24/53] api: Move ClientStreamTracerTest from core to api It uses nothing from core and tests an api class. --- {core => api}/src/test/java/io/grpc/ClientStreamTracerTest.java | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {core => api}/src/test/java/io/grpc/ClientStreamTracerTest.java (100%) diff --git a/core/src/test/java/io/grpc/ClientStreamTracerTest.java b/api/src/test/java/io/grpc/ClientStreamTracerTest.java similarity index 100% rename from core/src/test/java/io/grpc/ClientStreamTracerTest.java rename to api/src/test/java/io/grpc/ClientStreamTracerTest.java From 1f9d5022618c8b63babfd352dbb469f5cbf7ac54 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Wed, 29 May 2024 15:48:12 -0700 Subject: [PATCH 25/53] interop-testing: Remove unused implementation deps googleapis and rls can still be used at runtime. --- interop-testing/build.gradle | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/interop-testing/build.gradle b/interop-testing/build.gradle index d6155761c04..a19efb00155 100644 --- a/interop-testing/build.gradle +++ b/interop-testing/build.gradle @@ -13,12 +13,9 @@ dependencies { implementation project(path: ':grpc-alts', configuration: 'shadow'), project(':grpc-auth'), project(':grpc-census'), - project(':grpc-core'), project(':grpc-gcp-csm-observability'), - project(':grpc-googleapis'), project(':grpc-netty'), project(':grpc-okhttp'), - project(':grpc-rls'), project(':grpc-services'), project(':grpc-testing'), project(':grpc-protobuf-lite'), @@ -45,6 +42,7 @@ dependencies { libraries.netty.tcnative, libraries.netty.tcnative.classes, libraries.opentelemetry.exporter.prometheus, // For xds interop client + project(':grpc-googleapis'), project(':grpc-grpclb'), project(':grpc-rls') testImplementation testFixtures(project(':grpc-api')), From 00136096ed2180abc55ad2742d16a1df1a45fb3a Mon Sep 17 00:00:00 2001 From: Kurt Alfred Kluever Date: Fri, 2 Aug 2024 07:42:09 -0700 Subject: [PATCH 26/53] Migrate from the deprecated Charsets constants (in Guava) to the StandardCharsets constants (in the JDK). cl/658546708 --- .../Http2ClientStreamTransportState.java | 6 ++--- .../io/grpc/internal/MessageDeframerTest.java | 22 +++++++++---------- .../ObservabilityConfigImpl.java | 4 ++-- .../ObservabilityConfigImplTest.java | 5 +++-- .../GoogleCloudToProdNameResolver.java | 4 ++-- .../java/io/grpc/okhttp/AsyncSinkTest.java | 14 ++++++------ .../grpc/protobuf/services/BinlogHelper.java | 4 ++-- .../services/ChannelzProtoUtilTest.java | 14 ++++++------ .../io/grpc/util/CertificateUtilsTest.java | 6 ++--- 9 files changed, 40 insertions(+), 39 deletions(-) diff --git a/core/src/main/java/io/grpc/internal/Http2ClientStreamTransportState.java b/core/src/main/java/io/grpc/internal/Http2ClientStreamTransportState.java index cc36ed3c0bb..e92bb7a4af1 100644 --- a/core/src/main/java/io/grpc/internal/Http2ClientStreamTransportState.java +++ b/core/src/main/java/io/grpc/internal/Http2ClientStreamTransportState.java @@ -16,7 +16,6 @@ package io.grpc.internal; -import com.google.common.base.Charsets; import com.google.common.base.Preconditions; import io.grpc.CallOptions; import io.grpc.InternalMetadata; @@ -24,6 +23,7 @@ import io.grpc.Metadata; import io.grpc.Status; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import javax.annotation.Nullable; /** @@ -62,7 +62,7 @@ public Integer parseAsciiString(byte[] serialized) { /** When non-{@code null}, {@link #transportErrorMetadata} must also be non-{@code null}. */ private Status transportError; private Metadata transportErrorMetadata; - private Charset errorCharset = Charsets.UTF_8; + private Charset errorCharset = StandardCharsets.UTF_8; private boolean headersReceived; protected Http2ClientStreamTransportState( @@ -241,7 +241,7 @@ private static Charset extractCharset(Metadata headers) { // Ignore and assume UTF-8 } } - return Charsets.UTF_8; + return StandardCharsets.UTF_8; } /** diff --git a/core/src/test/java/io/grpc/internal/MessageDeframerTest.java b/core/src/test/java/io/grpc/internal/MessageDeframerTest.java index 98ed0691458..1ec1ccb2082 100644 --- a/core/src/test/java/io/grpc/internal/MessageDeframerTest.java +++ b/core/src/test/java/io/grpc/internal/MessageDeframerTest.java @@ -31,7 +31,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; -import com.google.common.base.Charsets; import com.google.common.io.ByteStreams; import com.google.common.primitives.Bytes; import io.grpc.Codec; @@ -46,6 +45,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -347,7 +347,7 @@ public static class SizeEnforcingInputStreamTests { @Test public void sizeEnforcingInputStream_readByteBelowLimit() throws IOException { - ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(Charsets.UTF_8)); + ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); SizeEnforcingInputStream stream = new MessageDeframer.SizeEnforcingInputStream(in, 4, statsTraceCtx); @@ -360,7 +360,7 @@ public void sizeEnforcingInputStream_readByteBelowLimit() throws IOException { @Test public void sizeEnforcingInputStream_readByteAtLimit() throws IOException { - ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(Charsets.UTF_8)); + ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); SizeEnforcingInputStream stream = new MessageDeframer.SizeEnforcingInputStream(in, 3, statsTraceCtx); @@ -373,7 +373,7 @@ public void sizeEnforcingInputStream_readByteAtLimit() throws IOException { @Test public void sizeEnforcingInputStream_readByteAboveLimit() throws IOException { - ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(Charsets.UTF_8)); + ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); SizeEnforcingInputStream stream = new MessageDeframer.SizeEnforcingInputStream(in, 2, statsTraceCtx); @@ -390,7 +390,7 @@ public void sizeEnforcingInputStream_readByteAboveLimit() throws IOException { @Test public void sizeEnforcingInputStream_readBelowLimit() throws IOException { - ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(Charsets.UTF_8)); + ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); SizeEnforcingInputStream stream = new MessageDeframer.SizeEnforcingInputStream(in, 4, statsTraceCtx); byte[] buf = new byte[10]; @@ -404,7 +404,7 @@ public void sizeEnforcingInputStream_readBelowLimit() throws IOException { @Test public void sizeEnforcingInputStream_readAtLimit() throws IOException { - ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(Charsets.UTF_8)); + ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); SizeEnforcingInputStream stream = new MessageDeframer.SizeEnforcingInputStream(in, 3, statsTraceCtx); byte[] buf = new byte[10]; @@ -418,7 +418,7 @@ public void sizeEnforcingInputStream_readAtLimit() throws IOException { @Test public void sizeEnforcingInputStream_readAboveLimit() throws IOException { - ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(Charsets.UTF_8)); + ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); SizeEnforcingInputStream stream = new MessageDeframer.SizeEnforcingInputStream(in, 2, statsTraceCtx); byte[] buf = new byte[10]; @@ -435,7 +435,7 @@ public void sizeEnforcingInputStream_readAboveLimit() throws IOException { @Test public void sizeEnforcingInputStream_skipBelowLimit() throws IOException { - ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(Charsets.UTF_8)); + ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); SizeEnforcingInputStream stream = new MessageDeframer.SizeEnforcingInputStream(in, 4, statsTraceCtx); @@ -449,7 +449,7 @@ public void sizeEnforcingInputStream_skipBelowLimit() throws IOException { @Test public void sizeEnforcingInputStream_skipAtLimit() throws IOException { - ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(Charsets.UTF_8)); + ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); SizeEnforcingInputStream stream = new MessageDeframer.SizeEnforcingInputStream(in, 3, statsTraceCtx); @@ -462,7 +462,7 @@ public void sizeEnforcingInputStream_skipAtLimit() throws IOException { @Test public void sizeEnforcingInputStream_skipAboveLimit() throws IOException { - ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(Charsets.UTF_8)); + ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); SizeEnforcingInputStream stream = new MessageDeframer.SizeEnforcingInputStream(in, 2, statsTraceCtx); @@ -478,7 +478,7 @@ public void sizeEnforcingInputStream_skipAboveLimit() throws IOException { @Test public void sizeEnforcingInputStream_markReset() throws IOException { - ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(Charsets.UTF_8)); + ByteArrayInputStream in = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); SizeEnforcingInputStream stream = new MessageDeframer.SizeEnforcingInputStream(in, 3, statsTraceCtx); // stream currently looks like: |foo diff --git a/gcp-observability/src/main/java/io/grpc/gcp/observability/ObservabilityConfigImpl.java b/gcp-observability/src/main/java/io/grpc/gcp/observability/ObservabilityConfigImpl.java index 2b0a44473d0..ae74bf10c43 100644 --- a/gcp-observability/src/main/java/io/grpc/gcp/observability/ObservabilityConfigImpl.java +++ b/gcp-observability/src/main/java/io/grpc/gcp/observability/ObservabilityConfigImpl.java @@ -19,7 +19,6 @@ import static com.google.common.base.Preconditions.checkArgument; import com.google.cloud.ServiceOptions; -import com.google.common.base.Charsets; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -28,6 +27,7 @@ import io.opencensus.trace.Sampler; import io.opencensus.trace.samplers.Samplers; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.util.Collections; @@ -75,7 +75,7 @@ static ObservabilityConfigImpl getInstance() throws IOException { void parseFile(String configFile) throws IOException { String configFileContent = - new String(Files.readAllBytes(Paths.get(configFile)), Charsets.UTF_8); + new String(Files.readAllBytes(Paths.get(configFile)), StandardCharsets.UTF_8); checkArgument(!configFileContent.isEmpty(), CONFIG_FILE_ENV_VAR_NAME + " is empty!"); parse(configFileContent); } diff --git a/gcp-observability/src/test/java/io/grpc/gcp/observability/ObservabilityConfigImplTest.java b/gcp-observability/src/test/java/io/grpc/gcp/observability/ObservabilityConfigImplTest.java index d6f23fbcc9a..a9e0d6e2235 100644 --- a/gcp-observability/src/test/java/io/grpc/gcp/observability/ObservabilityConfigImplTest.java +++ b/gcp-observability/src/test/java/io/grpc/gcp/observability/ObservabilityConfigImplTest.java @@ -21,12 +21,12 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import com.google.common.base.Charsets; import io.grpc.gcp.observability.ObservabilityConfig.LogFilter; import io.opencensus.trace.Sampler; import io.opencensus.trace.samplers.Samplers; import java.io.File; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.util.Collections; @@ -401,7 +401,8 @@ public void badProbabilisticSampler_error() throws IOException { public void configFileLogFilters() throws Exception { File configFile = tempFolder.newFile(); Files.write( - Paths.get(configFile.getAbsolutePath()), CLIENT_LOG_FILTERS.getBytes(Charsets.US_ASCII)); + Paths.get(configFile.getAbsolutePath()), + CLIENT_LOG_FILTERS.getBytes(StandardCharsets.US_ASCII)); observabilityConfig.parseFile(configFile.getAbsolutePath()); assertTrue(observabilityConfig.isEnableCloudLogging()); assertThat(observabilityConfig.getProjectId()).isEqualTo("grpc-testing"); diff --git a/googleapis/src/main/java/io/grpc/googleapis/GoogleCloudToProdNameResolver.java b/googleapis/src/main/java/io/grpc/googleapis/GoogleCloudToProdNameResolver.java index 64c2e0f9c86..ebc7dd05ea4 100644 --- a/googleapis/src/main/java/io/grpc/googleapis/GoogleCloudToProdNameResolver.java +++ b/googleapis/src/main/java/io/grpc/googleapis/GoogleCloudToProdNameResolver.java @@ -19,7 +19,6 @@ import static com.google.common.base.Preconditions.checkNotNull; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Charsets; import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; @@ -41,6 +40,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.URL; +import java.nio.charset.StandardCharsets; import java.util.Map; import java.util.Random; import java.util.concurrent.Executor; @@ -263,7 +263,7 @@ private String queryZoneMetadata(String url) throws IOException { if (con.getResponseCode() != 200) { return ""; } - try (Reader reader = new InputStreamReader(con.getInputStream(), Charsets.UTF_8)) { + try (Reader reader = new InputStreamReader(con.getInputStream(), StandardCharsets.UTF_8)) { respBody = CharStreams.toString(reader); } } finally { diff --git a/okhttp/src/test/java/io/grpc/okhttp/AsyncSinkTest.java b/okhttp/src/test/java/io/grpc/okhttp/AsyncSinkTest.java index 46011588b16..478e18d0a2b 100644 --- a/okhttp/src/test/java/io/grpc/okhttp/AsyncSinkTest.java +++ b/okhttp/src/test/java/io/grpc/okhttp/AsyncSinkTest.java @@ -30,11 +30,11 @@ import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.verify; -import com.google.common.base.Charsets; import io.grpc.internal.SerializingExecutor; import io.grpc.okhttp.ExceptionHandlingFrameWriter.TransportExceptionHandler; import java.io.IOException; import java.net.Socket; +import java.nio.charset.StandardCharsets; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.Executor; @@ -73,8 +73,8 @@ public void noCoalesceRequired() throws IOException { @Test public void flushCoalescing_shouldNotMergeTwoDistinctFlushes() throws IOException { - byte[] firstData = "a string".getBytes(Charsets.UTF_8); - byte[] secondData = "a longer string".getBytes(Charsets.UTF_8); + byte[] firstData = "a string".getBytes(StandardCharsets.UTF_8); + byte[] secondData = "a longer string".getBytes(StandardCharsets.UTF_8); sink.becomeConnected(mockedSink, socket); Buffer buffer = new Buffer(); @@ -95,8 +95,8 @@ public void flushCoalescing_shouldNotMergeTwoDistinctFlushes() throws IOExceptio @Test public void flushCoalescing_shouldMergeTwoQueuedFlushesAndWrites() throws IOException { - byte[] firstData = "a string".getBytes(Charsets.UTF_8); - byte[] secondData = "a longer string".getBytes(Charsets.UTF_8); + byte[] firstData = "a string".getBytes(StandardCharsets.UTF_8); + byte[] secondData = "a longer string".getBytes(StandardCharsets.UTF_8); Buffer buffer = new Buffer().write(firstData); sink.becomeConnected(mockedSink, socket); sink.write(buffer, buffer.size()); @@ -115,8 +115,8 @@ public void flushCoalescing_shouldMergeTwoQueuedFlushesAndWrites() throws IOExce @Test public void flushCoalescing_shouldMergeWrites() throws IOException { - byte[] firstData = "a string".getBytes(Charsets.UTF_8); - byte[] secondData = "a longer string".getBytes(Charsets.UTF_8); + byte[] firstData = "a string".getBytes(StandardCharsets.UTF_8); + byte[] secondData = "a longer string".getBytes(StandardCharsets.UTF_8); Buffer buffer = new Buffer(); sink.becomeConnected(mockedSink, socket); sink.write(buffer.write(firstData), buffer.size()); diff --git a/services/src/main/java/io/grpc/protobuf/services/BinlogHelper.java b/services/src/main/java/io/grpc/protobuf/services/BinlogHelper.java index 845ec1036ad..e810c983beb 100644 --- a/services/src/main/java/io/grpc/protobuf/services/BinlogHelper.java +++ b/services/src/main/java/io/grpc/protobuf/services/BinlogHelper.java @@ -22,7 +22,6 @@ import static io.grpc.protobuf.services.BinaryLogProvider.BYTEARRAY_MARSHALLER; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Charsets; import com.google.common.base.Preconditions; import com.google.common.base.Splitter; import com.google.protobuf.ByteString; @@ -59,6 +58,7 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.SocketAddress; +import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -841,7 +841,7 @@ static MaybeTruncated createMetadataProto if (serialized != null) { int curBytes = 0; for (int i = 0; i < serialized.length; i += 2) { - String key = new String(serialized[i], Charsets.UTF_8); + String key = new String(serialized[i], StandardCharsets.UTF_8); byte[] value = serialized[i + 1]; if (NEVER_INCLUDED_METADATA.contains(key)) { continue; diff --git a/services/src/test/java/io/grpc/protobuf/services/ChannelzProtoUtilTest.java b/services/src/test/java/io/grpc/protobuf/services/ChannelzProtoUtilTest.java index 4098885fd0d..0d2e6063d5e 100644 --- a/services/src/test/java/io/grpc/protobuf/services/ChannelzProtoUtilTest.java +++ b/services/src/test/java/io/grpc/protobuf/services/ChannelzProtoUtilTest.java @@ -22,7 +22,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import com.google.common.base.Charsets; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.protobuf.Any; @@ -82,6 +81,7 @@ import java.net.Inet4Address; import java.net.InetSocketAddress; import java.net.SocketAddress; +import java.nio.charset.StandardCharsets; import java.security.cert.Certificate; import java.util.Arrays; import java.util.Collections; @@ -437,8 +437,8 @@ public void toSocketData() throws Exception { public void socketSecurityTls() throws Exception { Certificate local = mock(Certificate.class); Certificate remote = mock(Certificate.class); - when(local.getEncoded()).thenReturn("localcert".getBytes(Charsets.UTF_8)); - when(remote.getEncoded()).thenReturn("remotecert".getBytes(Charsets.UTF_8)); + when(local.getEncoded()).thenReturn("localcert".getBytes(StandardCharsets.UTF_8)); + when(remote.getEncoded()).thenReturn("remotecert".getBytes(StandardCharsets.UTF_8)); socket.security = new InternalChannelz.Security( new InternalChannelz.Tls("TLS_NULL_WITH_NULL_NULL", local, remote)); @@ -446,8 +446,8 @@ public void socketSecurityTls() throws Exception { Security.newBuilder().setTls( Tls.newBuilder() .setStandardName("TLS_NULL_WITH_NULL_NULL") - .setLocalCertificate(ByteString.copyFrom("localcert", Charsets.UTF_8)) - .setRemoteCertificate(ByteString.copyFrom("remotecert", Charsets.UTF_8))) + .setLocalCertificate(ByteString.copyFrom("localcert", StandardCharsets.UTF_8)) + .setRemoteCertificate(ByteString.copyFrom("remotecert", StandardCharsets.UTF_8))) .build(), ChannelzProtoUtil.toSocket(socket).getSecurity()); @@ -457,7 +457,7 @@ public void socketSecurityTls() throws Exception { Security.newBuilder().setTls( Tls.newBuilder() .setStandardName("TLS_NULL_WITH_NULL_NULL") - .setRemoteCertificate(ByteString.copyFrom("remotecert", Charsets.UTF_8))) + .setRemoteCertificate(ByteString.copyFrom("remotecert", StandardCharsets.UTF_8))) .build(), ChannelzProtoUtil.toSocket(socket).getSecurity()); @@ -467,7 +467,7 @@ public void socketSecurityTls() throws Exception { Security.newBuilder().setTls( Tls.newBuilder() .setStandardName("TLS_NULL_WITH_NULL_NULL") - .setLocalCertificate(ByteString.copyFrom("localcert", Charsets.UTF_8))) + .setLocalCertificate(ByteString.copyFrom("localcert", StandardCharsets.UTF_8))) .build(), ChannelzProtoUtil.toSocket(socket).getSecurity()); } diff --git a/util/src/test/java/io/grpc/util/CertificateUtilsTest.java b/util/src/test/java/io/grpc/util/CertificateUtilsTest.java index aef99c0f378..dbddd35bca3 100644 --- a/util/src/test/java/io/grpc/util/CertificateUtilsTest.java +++ b/util/src/test/java/io/grpc/util/CertificateUtilsTest.java @@ -18,12 +18,12 @@ import static com.google.common.truth.Truth.assertThat; -import com.google.common.base.Charsets; import io.grpc.internal.testing.TestUtils; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.math.BigInteger; +import java.nio.charset.StandardCharsets; import java.security.PrivateKey; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; @@ -80,7 +80,7 @@ public void readCaPemFile() throws CertificateException, IOException { @Test public void readBadFormatKeyFile() throws Exception { - InputStream in = new ByteArrayInputStream(BAD_PEM_FORMAT.getBytes(Charsets.UTF_8)); + InputStream in = new ByteArrayInputStream(BAD_PEM_FORMAT.getBytes(StandardCharsets.UTF_8)); try { CertificateUtils.getPrivateKey(in); Assert.fail("no exception thrown"); @@ -92,7 +92,7 @@ public void readBadFormatKeyFile() throws Exception { @Test public void readBadContentKeyFile() { - InputStream in = new ByteArrayInputStream(BAD_PEM_CONTENT.getBytes(Charsets.UTF_8)); + InputStream in = new ByteArrayInputStream(BAD_PEM_CONTENT.getBytes(StandardCharsets.UTF_8)); try { CertificateUtils.getPrivateKey(in); Assert.fail("no exception thrown"); From c29763d88671247dee608d0108de658b893c58f7 Mon Sep 17 00:00:00 2001 From: Sergii Tkachenko Date: Fri, 2 Aug 2024 13:35:29 -0400 Subject: [PATCH 27/53] xds: Import RLQS protos (#11418) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Imports the protos of Rate Limiting Quota Service (RLQS) and Rate Limit Quota HTTP Filter. Note: the list below only shows the new top-level protos, and excludes their direct and transitional dependencies (those from import statements). #### RLQS Imports - Service — envoy/service/rate_limit_quota/v3/rlqs.proto (Service): 7b8a304 - HTTP Filter — envoy/extensions/filters/http/rate_limit_quota/v3/rate_limit_quota.proto: 49c77c4 #### CEL Imports - Initial third-party repo setup: 99a64bd - Parsed CEL Expression: cel/expr/syntax.proto: 99a64bd - Parsed and type-checked CEL Expression: cel/expr/checked.proto: 99a64bd #### Required typed_config extensions ##### `bucket_matchers` predicate input - `HttpAttributesCelMatchInput` — xds/type/matcher/v3/http_inputs.proto: 54924e0 - `HttpRequestHeaderMatchInput` — envoy/type/matcher/v3/http_inputs.proto: 49c77c4 ##### `bucket_matchers` predicate custom_match - `CelMatcher` — xds/type/matcher/v3/cel.proto: 54924e0 --- xds/build.gradle | 3 + .../v3/RateLimitQuotaServiceGrpc.java | 303 +++++++++++++ xds/third_party/cel-spec/LICENSE | 202 +++++++++ xds/third_party/cel-spec/import.sh | 59 +++ .../src/main/proto/cel/expr/checked.proto | 344 ++++++++++++++ .../src/main/proto/cel/expr/syntax.proto | 393 ++++++++++++++++ xds/third_party/envoy/import.sh | 7 + .../v3/rate_limit_quota.proto | 423 ++++++++++++++++++ .../service/rate_limit_quota/v3/rlqs.proto | 258 +++++++++++ .../envoy/type/matcher/v3/http_inputs.proto | 71 +++ .../proto/envoy/type/v3/http_status.proto | 143 ++++++ .../envoy/type/v3/ratelimit_strategy.proto | 79 ++++ .../proto/envoy/type/v3/ratelimit_unit.proto | 37 ++ .../proto/envoy/type/v3/token_bucket.proto | 39 ++ xds/third_party/xds/import.sh | 3 + .../main/proto/xds/type/matcher/v3/cel.proto | 42 ++ .../xds/type/matcher/v3/http_inputs.proto | 27 ++ .../xds/src/main/proto/xds/type/v3/cel.proto | 70 +++ 18 files changed, 2503 insertions(+) create mode 100644 xds/src/generated/thirdparty/grpc/io/envoyproxy/envoy/service/rate_limit_quota/v3/RateLimitQuotaServiceGrpc.java create mode 100644 xds/third_party/cel-spec/LICENSE create mode 100755 xds/third_party/cel-spec/import.sh create mode 100644 xds/third_party/cel-spec/src/main/proto/cel/expr/checked.proto create mode 100644 xds/third_party/cel-spec/src/main/proto/cel/expr/syntax.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/extensions/filters/http/rate_limit_quota/v3/rate_limit_quota.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/service/rate_limit_quota/v3/rlqs.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/type/matcher/v3/http_inputs.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/type/v3/http_status.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/type/v3/ratelimit_strategy.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/type/v3/ratelimit_unit.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/type/v3/token_bucket.proto create mode 100644 xds/third_party/xds/src/main/proto/xds/type/matcher/v3/cel.proto create mode 100644 xds/third_party/xds/src/main/proto/xds/type/matcher/v3/http_inputs.proto create mode 100644 xds/third_party/xds/src/main/proto/xds/type/v3/cel.proto diff --git a/xds/build.gradle b/xds/build.gradle index cb3046db183..a1d5aa753cb 100644 --- a/xds/build.gradle +++ b/xds/build.gradle @@ -20,6 +20,7 @@ sourceSets { srcDir 'third_party/envoy/src/main/proto' srcDir 'third_party/protoc-gen-validate/src/main/proto' srcDir 'third_party/xds/src/main/proto' + srcDir 'third_party/cel-spec/src/main/proto' srcDir 'third_party/googleapis/src/main/proto' srcDir 'third_party/istio/src/main/proto' } @@ -185,6 +186,7 @@ tasks.named("shadowJar").configure { relocate 'com.google.api.expr', "${prefixName}.shaded.com.google.api.expr" relocate 'com.google.security', "${prefixName}.shaded.com.google.security" // TODO: missing java_package option in .proto + relocate 'dev.cel.expr', "${prefixName}.shaded.dev.cel.expr" relocate 'envoy.annotations', "${prefixName}.shaded.envoy.annotations" relocate 'io.envoyproxy', "${prefixName}.shaded.io.envoyproxy" relocate 'io.grpc.netty', 'io.grpc.netty.shaded.io.grpc.netty' @@ -212,6 +214,7 @@ tasks.named("jacocoTestReport").configure { '**/com/github/xds/**', '**/com/google/api/expr/**', '**/com/google/security/**', + '**/cel/expr/**', '**/envoy/annotations/**', '**/io/envoyproxy/**', '**/udpa/annotations/**', diff --git a/xds/src/generated/thirdparty/grpc/io/envoyproxy/envoy/service/rate_limit_quota/v3/RateLimitQuotaServiceGrpc.java b/xds/src/generated/thirdparty/grpc/io/envoyproxy/envoy/service/rate_limit_quota/v3/RateLimitQuotaServiceGrpc.java new file mode 100644 index 00000000000..2cbb7536d4c --- /dev/null +++ b/xds/src/generated/thirdparty/grpc/io/envoyproxy/envoy/service/rate_limit_quota/v3/RateLimitQuotaServiceGrpc.java @@ -0,0 +1,303 @@ +package io.envoyproxy.envoy.service.rate_limit_quota.v3; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + *

+ * Defines the Rate Limit Quota Service (RLQS).
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler", + comments = "Source: envoy/service/rate_limit_quota/v3/rlqs.proto") +@io.grpc.stub.annotations.GrpcGenerated +public final class RateLimitQuotaServiceGrpc { + + private RateLimitQuotaServiceGrpc() {} + + public static final java.lang.String SERVICE_NAME = "envoy.service.rate_limit_quota.v3.RateLimitQuotaService"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor getStreamRateLimitQuotasMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "StreamRateLimitQuotas", + requestType = io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaUsageReports.class, + responseType = io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + public static io.grpc.MethodDescriptor getStreamRateLimitQuotasMethod() { + io.grpc.MethodDescriptor getStreamRateLimitQuotasMethod; + if ((getStreamRateLimitQuotasMethod = RateLimitQuotaServiceGrpc.getStreamRateLimitQuotasMethod) == null) { + synchronized (RateLimitQuotaServiceGrpc.class) { + if ((getStreamRateLimitQuotasMethod = RateLimitQuotaServiceGrpc.getStreamRateLimitQuotasMethod) == null) { + RateLimitQuotaServiceGrpc.getStreamRateLimitQuotasMethod = getStreamRateLimitQuotasMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "StreamRateLimitQuotas")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaUsageReports.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaResponse.getDefaultInstance())) + .setSchemaDescriptor(new RateLimitQuotaServiceMethodDescriptorSupplier("StreamRateLimitQuotas")) + .build(); + } + } + } + return getStreamRateLimitQuotasMethod; + } + + /** + * Creates a new async stub that supports all call types for the service + */ + public static RateLimitQuotaServiceStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public RateLimitQuotaServiceStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new RateLimitQuotaServiceStub(channel, callOptions); + } + }; + return RateLimitQuotaServiceStub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static RateLimitQuotaServiceBlockingStub newBlockingStub( + io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public RateLimitQuotaServiceBlockingStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new RateLimitQuotaServiceBlockingStub(channel, callOptions); + } + }; + return RateLimitQuotaServiceBlockingStub.newStub(factory, channel); + } + + /** + * Creates a new ListenableFuture-style stub that supports unary calls on the service + */ + public static RateLimitQuotaServiceFutureStub newFutureStub( + io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public RateLimitQuotaServiceFutureStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new RateLimitQuotaServiceFutureStub(channel, callOptions); + } + }; + return RateLimitQuotaServiceFutureStub.newStub(factory, channel); + } + + /** + *
+   * Defines the Rate Limit Quota Service (RLQS).
+   * 
+ */ + public interface AsyncService { + + /** + *
+     * Main communication channel: the data plane sends usage reports to the RLQS server,
+     * and the server asynchronously responding with the assignments.
+     * 
+ */ + default io.grpc.stub.StreamObserver streamRateLimitQuotas( + io.grpc.stub.StreamObserver responseObserver) { + return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getStreamRateLimitQuotasMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service RateLimitQuotaService. + *
+   * Defines the Rate Limit Quota Service (RLQS).
+   * 
+ */ + public static abstract class RateLimitQuotaServiceImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { + return RateLimitQuotaServiceGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service RateLimitQuotaService. + *
+   * Defines the Rate Limit Quota Service (RLQS).
+   * 
+ */ + public static final class RateLimitQuotaServiceStub + extends io.grpc.stub.AbstractAsyncStub { + private RateLimitQuotaServiceStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected RateLimitQuotaServiceStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new RateLimitQuotaServiceStub(channel, callOptions); + } + + /** + *
+     * Main communication channel: the data plane sends usage reports to the RLQS server,
+     * and the server asynchronously responding with the assignments.
+     * 
+ */ + public io.grpc.stub.StreamObserver streamRateLimitQuotas( + io.grpc.stub.StreamObserver responseObserver) { + return io.grpc.stub.ClientCalls.asyncBidiStreamingCall( + getChannel().newCall(getStreamRateLimitQuotasMethod(), getCallOptions()), responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service RateLimitQuotaService. + *
+   * Defines the Rate Limit Quota Service (RLQS).
+   * 
+ */ + public static final class RateLimitQuotaServiceBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private RateLimitQuotaServiceBlockingStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected RateLimitQuotaServiceBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new RateLimitQuotaServiceBlockingStub(channel, callOptions); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service RateLimitQuotaService. + *
+   * Defines the Rate Limit Quota Service (RLQS).
+   * 
+ */ + public static final class RateLimitQuotaServiceFutureStub + extends io.grpc.stub.AbstractFutureStub { + private RateLimitQuotaServiceFutureStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected RateLimitQuotaServiceFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new RateLimitQuotaServiceFutureStub(channel, callOptions); + } + } + + private static final int METHODID_STREAM_RATE_LIMIT_QUOTAS = 0; + + private static final class MethodHandlers implements + io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_STREAM_RATE_LIMIT_QUOTAS: + return (io.grpc.stub.StreamObserver) serviceImpl.streamRateLimitQuotas( + (io.grpc.stub.StreamObserver) responseObserver); + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getStreamRateLimitQuotasMethod(), + io.grpc.stub.ServerCalls.asyncBidiStreamingCall( + new MethodHandlers< + io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaUsageReports, + io.envoyproxy.envoy.service.rate_limit_quota.v3.RateLimitQuotaResponse>( + service, METHODID_STREAM_RATE_LIMIT_QUOTAS))) + .build(); + } + + private static abstract class RateLimitQuotaServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier { + RateLimitQuotaServiceBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return io.envoyproxy.envoy.service.rate_limit_quota.v3.RlqsProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("RateLimitQuotaService"); + } + } + + private static final class RateLimitQuotaServiceFileDescriptorSupplier + extends RateLimitQuotaServiceBaseDescriptorSupplier { + RateLimitQuotaServiceFileDescriptorSupplier() {} + } + + private static final class RateLimitQuotaServiceMethodDescriptorSupplier + extends RateLimitQuotaServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + RateLimitQuotaServiceMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (RateLimitQuotaServiceGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new RateLimitQuotaServiceFileDescriptorSupplier()) + .addMethod(getStreamRateLimitQuotasMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/xds/third_party/cel-spec/LICENSE b/xds/third_party/cel-spec/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/xds/third_party/cel-spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/xds/third_party/cel-spec/import.sh b/xds/third_party/cel-spec/import.sh new file mode 100755 index 00000000000..bba8214fdfb --- /dev/null +++ b/xds/third_party/cel-spec/import.sh @@ -0,0 +1,59 @@ +#!/bin/bash +# Copyright 2024 The gRPC Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Update VERSION then execute this script + +set -e +VERSION="v0.15.0" +DOWNLOAD_URL="https://github.com/google/cel-spec/archive/refs/tags/${VERSION}.tar.gz" +DOWNLOAD_BASE_DIR="cel-spec-${VERSION#v}" +SOURCE_PROTO_BASE_DIR="${DOWNLOAD_BASE_DIR}/proto" +TARGET_PROTO_BASE_DIR="src/main/proto" +# Sorted alphabetically. +FILES=( +cel/expr/checked.proto +cel/expr/syntax.proto +) + +pushd `git rev-parse --show-toplevel`/xds/third_party/cel-spec > /dev/null + +# put the repo in a tmp directory +tmpdir="$(mktemp -d)" +trap "rm -rf ${tmpdir}" EXIT +curl -Ls "${DOWNLOAD_URL}" | tar xz -C "${tmpdir}" + +cp -p "${tmpdir}/${DOWNLOAD_BASE_DIR}/LICENSE" LICENSE + +rm -rf "${TARGET_PROTO_BASE_DIR}" +mkdir -p "${TARGET_PROTO_BASE_DIR}" +pushd "${TARGET_PROTO_BASE_DIR}" > /dev/null + +# copy proto files to project directory +TOTAL=${#FILES[@]} +COPIED=0 +for file in "${FILES[@]}" +do + mkdir -p "$(dirname "${file}")" + cp -p "${tmpdir}/${SOURCE_PROTO_BASE_DIR}/${file}" "${file}" && (( ++COPIED )) +done +popd > /dev/null + +popd > /dev/null + +echo "Imported ${COPIED} files." +if (( COPIED != TOTAL )); then + echo "Failed importing $(( TOTAL - COPIED )) files." 1>&2 + exit 1 +fi diff --git a/xds/third_party/cel-spec/src/main/proto/cel/expr/checked.proto b/xds/third_party/cel-spec/src/main/proto/cel/expr/checked.proto new file mode 100644 index 00000000000..e327db9b225 --- /dev/null +++ b/xds/third_party/cel-spec/src/main/proto/cel/expr/checked.proto @@ -0,0 +1,344 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package cel.expr; + +import "cel/expr/syntax.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/struct.proto"; + +option cc_enable_arenas = true; +option go_package = "cel.dev/expr"; +option java_multiple_files = true; +option java_outer_classname = "DeclProto"; +option java_package = "dev.cel.expr"; + +// Protos for representing CEL declarations and typed checked expressions. + +// A CEL expression which has been successfully type checked. +message CheckedExpr { + // A map from expression ids to resolved references. + // + // The following entries are in this table: + // + // - An Ident or Select expression is represented here if it resolves to a + // declaration. For instance, if `a.b.c` is represented by + // `select(select(id(a), b), c)`, and `a.b` resolves to a declaration, + // while `c` is a field selection, then the reference is attached to the + // nested select expression (but not to the id or or the outer select). + // In turn, if `a` resolves to a declaration and `b.c` are field selections, + // the reference is attached to the ident expression. + // - Every Call expression has an entry here, identifying the function being + // called. + // - Every CreateStruct expression for a message has an entry, identifying + // the message. + map reference_map = 2; + + // A map from expression ids to types. + // + // Every expression node which has a type different than DYN has a mapping + // here. If an expression has type DYN, it is omitted from this map to save + // space. + map type_map = 3; + + // The source info derived from input that generated the parsed `expr` and + // any optimizations made during the type-checking pass. + SourceInfo source_info = 5; + + // The expr version indicates the major / minor version number of the `expr` + // representation. + // + // The most common reason for a version change will be to indicate to the CEL + // runtimes that transformations have been performed on the expr during static + // analysis. In some cases, this will save the runtime the work of applying + // the same or similar transformations prior to evaluation. + string expr_version = 6; + + // The checked expression. Semantically equivalent to the parsed `expr`, but + // may have structural differences. + Expr expr = 4; +} + +// Represents a CEL type. +message Type { + // List type with typed elements, e.g. `list`. + message ListType { + // The element type. + Type elem_type = 1; + } + + // Map type with parameterized key and value types, e.g. `map`. + message MapType { + // The type of the key. + Type key_type = 1; + + // The type of the value. + Type value_type = 2; + } + + // Function type with result and arg types. + message FunctionType { + // Result type of the function. + Type result_type = 1; + + // Argument types of the function. + repeated Type arg_types = 2; + } + + // Application defined abstract type. + message AbstractType { + // The fully qualified name of this abstract type. + string name = 1; + + // Parameter types for this abstract type. + repeated Type parameter_types = 2; + } + + // CEL primitive types. + enum PrimitiveType { + // Unspecified type. + PRIMITIVE_TYPE_UNSPECIFIED = 0; + + // Boolean type. + BOOL = 1; + + // Int64 type. + // + // 32-bit integer values are widened to int64. + INT64 = 2; + + // Uint64 type. + // + // 32-bit unsigned integer values are widened to uint64. + UINT64 = 3; + + // Double type. + // + // 32-bit float values are widened to double values. + DOUBLE = 4; + + // String type. + STRING = 5; + + // Bytes type. + BYTES = 6; + } + + // Well-known protobuf types treated with first-class support in CEL. + enum WellKnownType { + // Unspecified type. + WELL_KNOWN_TYPE_UNSPECIFIED = 0; + + // Well-known protobuf.Any type. + // + // Any types are a polymorphic message type. During type-checking they are + // treated like `DYN` types, but at runtime they are resolved to a specific + // message type specified at evaluation time. + ANY = 1; + + // Well-known protobuf.Timestamp type, internally referenced as `timestamp`. + TIMESTAMP = 2; + + // Well-known protobuf.Duration type, internally referenced as `duration`. + DURATION = 3; + } + + // The kind of type. + oneof type_kind { + // Dynamic type. + google.protobuf.Empty dyn = 1; + + // Null value. + google.protobuf.NullValue null = 2; + + // Primitive types: `true`, `1u`, `-2.0`, `'string'`, `b'bytes'`. + PrimitiveType primitive = 3; + + // Wrapper of a primitive type, e.g. `google.protobuf.Int64Value`. + PrimitiveType wrapper = 4; + + // Well-known protobuf type such as `google.protobuf.Timestamp`. + WellKnownType well_known = 5; + + // Parameterized list with elements of `list_type`, e.g. `list`. + ListType list_type = 6; + + // Parameterized map with typed keys and values. + MapType map_type = 7; + + // Function type. + FunctionType function = 8; + + // Protocol buffer message type. + // + // The `message_type` string specifies the qualified message type name. For + // example, `google.type.PhoneNumber`. + string message_type = 9; + + // Type param type. + // + // The `type_param` string specifies the type parameter name, e.g. `list` + // would be a `list_type` whose element type was a `type_param` type + // named `E`. + string type_param = 10; + + // Type type. + // + // The `type` value specifies the target type. e.g. int is type with a + // target type of `Primitive.INT64`. + Type type = 11; + + // Error type. + // + // During type-checking if an expression is an error, its type is propagated + // as the `ERROR` type. This permits the type-checker to discover other + // errors present in the expression. + google.protobuf.Empty error = 12; + + // Abstract, application defined type. + // + // An abstract type has no accessible field names, and it can only be + // inspected via helper / member functions. + AbstractType abstract_type = 14; + } +} + +// Represents a declaration of a named value or function. +// +// A declaration is part of the contract between the expression, the agent +// evaluating that expression, and the caller requesting evaluation. +message Decl { + // Identifier declaration which specifies its type and optional `Expr` value. + // + // An identifier without a value is a declaration that must be provided at + // evaluation time. An identifier with a value should resolve to a constant, + // but may be used in conjunction with other identifiers bound at evaluation + // time. + message IdentDecl { + // Required. The type of the identifier. + Type type = 1; + + // The constant value of the identifier. If not specified, the identifier + // must be supplied at evaluation time. + Constant value = 2; + + // Documentation string for the identifier. + string doc = 3; + } + + // Function declaration specifies one or more overloads which indicate the + // function's parameter types and return type. + // + // Functions have no observable side-effects (there may be side-effects like + // logging which are not observable from CEL). + message FunctionDecl { + // An overload indicates a function's parameter types and return type, and + // may optionally include a function body described in terms of + // [Expr][cel.expr.Expr] values. + // + // Functions overloads are declared in either a function or method + // call-style. For methods, the `params[0]` is the expected type of the + // target receiver. + // + // Overloads must have non-overlapping argument types after erasure of all + // parameterized type variables (similar as type erasure in Java). + message Overload { + // Required. Globally unique overload name of the function which reflects + // the function name and argument types. + // + // This will be used by a [Reference][cel.expr.Reference] to + // indicate the `overload_id` that was resolved for the function `name`. + string overload_id = 1; + + // List of function parameter [Type][cel.expr.Type] values. + // + // Param types are disjoint after generic type parameters have been + // replaced with the type `DYN`. Since the `DYN` type is compatible with + // any other type, this means that if `A` is a type parameter, the + // function types `int` and `int` are not disjoint. Likewise, + // `map` is not disjoint from `map`. + // + // When the `result_type` of a function is a generic type param, the + // type param name also appears as the `type` of on at least one params. + repeated Type params = 2; + + // The type param names associated with the function declaration. + // + // For example, `function ex(K key, map map) : V` would yield + // the type params of `K, V`. + repeated string type_params = 3; + + // Required. The result type of the function. For example, the operator + // `string.isEmpty()` would have `result_type` of `kind: BOOL`. + Type result_type = 4; + + // Whether the function is to be used in a method call-style `x.f(...)` + // of a function call-style `f(x, ...)`. + // + // For methods, the first parameter declaration, `params[0]` is the + // expected type of the target receiver. + bool is_instance_function = 5; + + // Documentation string for the overload. + string doc = 6; + } + + // Required. List of function overloads, must contain at least one overload. + repeated Overload overloads = 1; + } + + // The fully qualified name of the declaration. + // + // Declarations are organized in containers and this represents the full path + // to the declaration in its container, as in `cel.expr.Decl`. + // + // Declarations used as + // [FunctionDecl.Overload][cel.expr.Decl.FunctionDecl.Overload] + // parameters may or may not have a name depending on whether the overload is + // function declaration or a function definition containing a result + // [Expr][cel.expr.Expr]. + string name = 1; + + // Required. The declaration kind. + oneof decl_kind { + // Identifier declaration. + IdentDecl ident = 2; + + // Function declaration. + FunctionDecl function = 3; + } +} + +// Describes a resolved reference to a declaration. +message Reference { + // The fully qualified name of the declaration. + string name = 1; + + // For references to functions, this is a list of `Overload.overload_id` + // values which match according to typing rules. + // + // If the list has more than one element, overload resolution among the + // presented candidates must happen at runtime because of dynamic types. The + // type checker attempts to narrow down this list as much as possible. + // + // Empty if this is not a reference to a + // [Decl.FunctionDecl][cel.expr.Decl.FunctionDecl]. + repeated string overload_id = 3; + + // For references to constants, this may contain the value of the + // constant if known at compile time. + Constant value = 4; +} diff --git a/xds/third_party/cel-spec/src/main/proto/cel/expr/syntax.proto b/xds/third_party/cel-spec/src/main/proto/cel/expr/syntax.proto new file mode 100644 index 00000000000..ed124a74384 --- /dev/null +++ b/xds/third_party/cel-spec/src/main/proto/cel/expr/syntax.proto @@ -0,0 +1,393 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package cel.expr; + +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; + +option cc_enable_arenas = true; +option go_package = "cel.dev/expr"; +option java_multiple_files = true; +option java_outer_classname = "SyntaxProto"; +option java_package = "dev.cel.expr"; + +// A representation of the abstract syntax of the Common Expression Language. + +// An expression together with source information as returned by the parser. +message ParsedExpr { + // The parsed expression. + Expr expr = 2; + + // The source info derived from input that generated the parsed `expr`. + SourceInfo source_info = 3; +} + +// An abstract representation of a common expression. +// +// Expressions are abstractly represented as a collection of identifiers, +// select statements, function calls, literals, and comprehensions. All +// operators with the exception of the '.' operator are modelled as function +// calls. This makes it easy to represent new operators into the existing AST. +// +// All references within expressions must resolve to a +// [Decl][cel.expr.Decl] provided at type-check for an expression to be +// valid. A reference may either be a bare identifier `name` or a qualified +// identifier `google.api.name`. References may either refer to a value or a +// function declaration. +// +// For example, the expression `google.api.name.startsWith('expr')` references +// the declaration `google.api.name` within a +// [Expr.Select][cel.expr.Expr.Select] expression, and the function +// declaration `startsWith`. +message Expr { + // An identifier expression. e.g. `request`. + message Ident { + // Required. Holds a single, unqualified identifier, possibly preceded by a + // '.'. + // + // Qualified names are represented by the + // [Expr.Select][cel.expr.Expr.Select] expression. + string name = 1; + } + + // A field selection expression. e.g. `request.auth`. + message Select { + // Required. The target of the selection expression. + // + // For example, in the select expression `request.auth`, the `request` + // portion of the expression is the `operand`. + Expr operand = 1; + + // Required. The name of the field to select. + // + // For example, in the select expression `request.auth`, the `auth` portion + // of the expression would be the `field`. + string field = 2; + + // Whether the select is to be interpreted as a field presence test. + // + // This results from the macro `has(request.auth)`. + bool test_only = 3; + } + + // A call expression, including calls to predefined functions and operators. + // + // For example, `value == 10`, `size(map_value)`. + message Call { + // The target of an method call-style expression. For example, `x` in + // `x.f()`. + Expr target = 1; + + // Required. The name of the function or method being called. + string function = 2; + + // The arguments. + repeated Expr args = 3; + } + + // A list creation expression. + // + // Lists may either be homogenous, e.g. `[1, 2, 3]`, or heterogeneous, e.g. + // `dyn([1, 'hello', 2.0])` + message CreateList { + // The elements part of the list. + repeated Expr elements = 1; + + // The indices within the elements list which are marked as optional + // elements. + // + // When an optional-typed value is present, the value it contains + // is included in the list. If the optional-typed value is absent, the list + // element is omitted from the CreateList result. + repeated int32 optional_indices = 2; + } + + // A map or message creation expression. + // + // Maps are constructed as `{'key_name': 'value'}`. Message construction is + // similar, but prefixed with a type name and composed of field ids: + // `types.MyType{field_id: 'value'}`. + message CreateStruct { + // Represents an entry. + message Entry { + // Required. An id assigned to this node by the parser which is unique + // in a given expression tree. This is used to associate type + // information and other attributes to the node. + int64 id = 1; + + // The `Entry` key kinds. + oneof key_kind { + // The field key for a message creator statement. + string field_key = 2; + + // The key expression for a map creation statement. + Expr map_key = 3; + } + + // Required. The value assigned to the key. + // + // If the optional_entry field is true, the expression must resolve to an + // optional-typed value. If the optional value is present, the key will be + // set; however, if the optional value is absent, the key will be unset. + Expr value = 4; + + // Whether the key-value pair is optional. + bool optional_entry = 5; + } + + // The type name of the message to be created, empty when creating map + // literals. + string message_name = 1; + + // The entries in the creation expression. + repeated Entry entries = 2; + } + + // A comprehension expression applied to a list or map. + // + // Comprehensions are not part of the core syntax, but enabled with macros. + // A macro matches a specific call signature within a parsed AST and replaces + // the call with an alternate AST block. Macro expansion happens at parse + // time. + // + // The following macros are supported within CEL: + // + // Aggregate type macros may be applied to all elements in a list or all keys + // in a map: + // + // * `all`, `exists`, `exists_one` - test a predicate expression against + // the inputs and return `true` if the predicate is satisfied for all, + // any, or only one value `list.all(x, x < 10)`. + // * `filter` - test a predicate expression against the inputs and return + // the subset of elements which satisfy the predicate: + // `payments.filter(p, p > 1000)`. + // * `map` - apply an expression to all elements in the input and return the + // output aggregate type: `[1, 2, 3].map(i, i * i)`. + // + // The `has(m.x)` macro tests whether the property `x` is present in struct + // `m`. The semantics of this macro depend on the type of `m`. For proto2 + // messages `has(m.x)` is defined as 'defined, but not set`. For proto3, the + // macro tests whether the property is set to its default. For map and struct + // types, the macro tests whether the property `x` is defined on `m`. + // + // Comprehension evaluation can be best visualized as the following + // pseudocode: + // + // ``` + // let `accu_var` = `accu_init` + // for (let `iter_var` in `iter_range`) { + // if (!`loop_condition`) { + // break + // } + // `accu_var` = `loop_step` + // } + // return `result` + // ``` + message Comprehension { + // The name of the iteration variable. + string iter_var = 1; + + // The range over which var iterates. + Expr iter_range = 2; + + // The name of the variable used for accumulation of the result. + string accu_var = 3; + + // The initial value of the accumulator. + Expr accu_init = 4; + + // An expression which can contain iter_var and accu_var. + // + // Returns false when the result has been computed and may be used as + // a hint to short-circuit the remainder of the comprehension. + Expr loop_condition = 5; + + // An expression which can contain iter_var and accu_var. + // + // Computes the next value of accu_var. + Expr loop_step = 6; + + // An expression which can contain accu_var. + // + // Computes the result. + Expr result = 7; + } + + // Required. An id assigned to this node by the parser which is unique in a + // given expression tree. This is used to associate type information and other + // attributes to a node in the parse tree. + int64 id = 2; + + // Required. Variants of expressions. + oneof expr_kind { + // A constant expression. + Constant const_expr = 3; + + // An identifier expression. + Ident ident_expr = 4; + + // A field selection expression, e.g. `request.auth`. + Select select_expr = 5; + + // A call expression, including calls to predefined functions and operators. + Call call_expr = 6; + + // A list creation expression. + CreateList list_expr = 7; + + // A map or message creation expression. + CreateStruct struct_expr = 8; + + // A comprehension expression. + Comprehension comprehension_expr = 9; + } +} + +// Represents a primitive literal. +// +// Named 'Constant' here for backwards compatibility. +// +// This is similar as the primitives supported in the well-known type +// `google.protobuf.Value`, but richer so it can represent CEL's full range of +// primitives. +// +// Lists and structs are not included as constants as these aggregate types may +// contain [Expr][cel.expr.Expr] elements which require evaluation and +// are thus not constant. +// +// Examples of constants include: `"hello"`, `b'bytes'`, `1u`, `4.2`, `-2`, +// `true`, `null`. +message Constant { + // Required. The valid constant kinds. + oneof constant_kind { + // null value. + google.protobuf.NullValue null_value = 1; + + // boolean value. + bool bool_value = 2; + + // int64 value. + int64 int64_value = 3; + + // uint64 value. + uint64 uint64_value = 4; + + // double value. + double double_value = 5; + + // string value. + string string_value = 6; + + // bytes value. + bytes bytes_value = 7; + + // protobuf.Duration value. + // + // Deprecated: duration is no longer considered a builtin cel type. + google.protobuf.Duration duration_value = 8 [deprecated = true]; + + // protobuf.Timestamp value. + // + // Deprecated: timestamp is no longer considered a builtin cel type. + google.protobuf.Timestamp timestamp_value = 9 [deprecated = true]; + } +} + +// Source information collected at parse time. +message SourceInfo { + // The syntax version of the source, e.g. `cel1`. + string syntax_version = 1; + + // The location name. All position information attached to an expression is + // relative to this location. + // + // The location could be a file, UI element, or similar. For example, + // `acme/app/AnvilPolicy.cel`. + string location = 2; + + // Monotonically increasing list of code point offsets where newlines + // `\n` appear. + // + // The line number of a given position is the index `i` where for a given + // `id` the `line_offsets[i] < id_positions[id] < line_offsets[i+1]`. The + // column may be derived from `id_positions[id] - line_offsets[i]`. + repeated int32 line_offsets = 3; + + // A map from the parse node id (e.g. `Expr.id`) to the code point offset + // within the source. + map positions = 4; + + // A map from the parse node id where a macro replacement was made to the + // call `Expr` that resulted in a macro expansion. + // + // For example, `has(value.field)` is a function call that is replaced by a + // `test_only` field selection in the AST. Likewise, the call + // `list.exists(e, e > 10)` translates to a comprehension expression. The key + // in the map corresponds to the expression id of the expanded macro, and the + // value is the call `Expr` that was replaced. + map macro_calls = 5; + + // A list of tags for extensions that were used while parsing or type checking + // the source expression. For example, optimizations that require special + // runtime support may be specified. + // + // These are used to check feature support between components in separate + // implementations. This can be used to either skip redundant work or + // report an error if the extension is unsupported. + repeated Extension extensions = 6; + + // An extension that was requested for the source expression. + message Extension { + // Version + message Version { + // Major version changes indicate different required support level from + // the required components. + int64 major = 1; + // Minor version changes must not change the observed behavior from + // existing implementations, but may be provided informationally. + int64 minor = 2; + } + + // CEL component specifier. + enum Component { + // Unspecified, default. + COMPONENT_UNSPECIFIED = 0; + // Parser. Converts a CEL string to an AST. + COMPONENT_PARSER = 1; + // Type checker. Checks that references in an AST are defined and types + // agree. + COMPONENT_TYPE_CHECKER = 2; + // Runtime. Evaluates a parsed and optionally checked CEL AST against a + // context. + COMPONENT_RUNTIME = 3; + } + + // Identifier for the extension. Example: constant_folding + string id = 1; + + // If set, the listed components must understand the extension for the + // expression to evaluate correctly. + // + // This field has set semantics, repeated values should be deduplicated. + repeated Component affected_components = 2; + + // Version info. May be skipped if it isn't meaningful for the extension. + // (for example constant_folding might always be v0.0). + Version version = 3; + } +} diff --git a/xds/third_party/envoy/import.sh b/xds/third_party/envoy/import.sh index 3eeb46cf664..41506c2ed32 100755 --- a/xds/third_party/envoy/import.sh +++ b/xds/third_party/envoy/import.sh @@ -74,6 +74,7 @@ envoy/data/accesslog/v3/accesslog.proto envoy/extensions/clusters/aggregate/v3/cluster.proto envoy/extensions/filters/common/fault/v3/fault.proto envoy/extensions/filters/http/fault/v3/fault.proto +envoy/extensions/filters/http/rate_limit_quota/v3/rate_limit_quota.proto envoy/extensions/filters/http/rbac/v3/rbac.proto envoy/extensions/filters/http/router/v3/router.proto envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -91,9 +92,11 @@ envoy/extensions/transport_sockets/tls/v3/tls.proto envoy/service/discovery/v3/ads.proto envoy/service/discovery/v3/discovery.proto envoy/service/load_stats/v3/lrs.proto +envoy/service/rate_limit_quota/v3/rlqs.proto envoy/service/status/v3/csds.proto envoy/type/http/v3/path_transformation.proto envoy/type/matcher/v3/filter_state.proto +envoy/type/matcher/v3/http_inputs.proto envoy/type/matcher/v3/metadata.proto envoy/type/matcher/v3/node.proto envoy/type/matcher/v3/number.proto @@ -105,9 +108,13 @@ envoy/type/matcher/v3/value.proto envoy/type/metadata/v3/metadata.proto envoy/type/tracing/v3/custom_tag.proto envoy/type/v3/http.proto +envoy/type/v3/http_status.proto envoy/type/v3/percent.proto envoy/type/v3/range.proto +envoy/type/v3/ratelimit_strategy.proto +envoy/type/v3/ratelimit_unit.proto envoy/type/v3/semantic_version.proto +envoy/type/v3/token_bucket.proto ) pushd "$(git rev-parse --show-toplevel)/xds/third_party/envoy" > /dev/null diff --git a/xds/third_party/envoy/src/main/proto/envoy/extensions/filters/http/rate_limit_quota/v3/rate_limit_quota.proto b/xds/third_party/envoy/src/main/proto/envoy/extensions/filters/http/rate_limit_quota/v3/rate_limit_quota.proto new file mode 100644 index 00000000000..57b8bdecd78 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/extensions/filters/http/rate_limit_quota/v3/rate_limit_quota.proto @@ -0,0 +1,423 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.rate_limit_quota.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; +import "envoy/config/core/v3/grpc_service.proto"; +import "envoy/type/v3/http_status.proto"; +import "envoy/type/v3/ratelimit_strategy.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; + +import "xds/annotations/v3/status.proto"; +import "xds/type/matcher/v3/matcher.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.rate_limit_quota.v3"; +option java_outer_classname = "RateLimitQuotaProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rate_limit_quota/v3;rate_limit_quotav3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (xds.annotations.v3.file_status).work_in_progress = true; + +// [#protodoc-title: Rate Limit Quota] +// Rate Limit Quota :ref:`configuration overview `. +// [#extension: envoy.filters.http.rate_limit_quota] + +// Configures the Rate Limit Quota filter. +// +// Can be overridden in the per-route and per-host configurations. +// The more specific definition completely overrides the less specific definition. +// [#next-free-field: 7] +message RateLimitQuotaFilterConfig { + // Configures the gRPC Rate Limit Quota Service (RLQS) RateLimitQuotaService. + config.core.v3.GrpcService rlqs_server = 1 [(validate.rules).message = {required: true}]; + + // The application domain to use when calling the service. This enables sharing the quota + // server between different applications without fear of overlap. + // E.g., "envoy". + string domain = 2 [(validate.rules).string = {min_len: 1}]; + + // The match tree to use for grouping incoming requests into buckets. + // + // Example: + // + // .. validated-code-block:: yaml + // :type-name: xds.type.matcher.v3.Matcher + // + // matcher_list: + // matchers: + // # Assign requests with header['env'] set to 'staging' to the bucket { name: 'staging' } + // - predicate: + // single_predicate: + // input: + // typed_config: + // '@type': type.googleapis.com/envoy.type.matcher.v3.HttpRequestHeaderMatchInput + // header_name: env + // value_match: + // exact: staging + // on_match: + // action: + // typed_config: + // '@type': type.googleapis.com/envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings + // bucket_id_builder: + // bucket_id_builder: + // name: + // string_value: staging + // + // # Assign requests with header['user_group'] set to 'admin' to the bucket { acl: 'admin_users' } + // - predicate: + // single_predicate: + // input: + // typed_config: + // '@type': type.googleapis.com/xds.type.matcher.v3.HttpAttributesCelMatchInput + // custom_match: + // typed_config: + // '@type': type.googleapis.com/xds.type.matcher.v3.CelMatcher + // expr_match: + // # Shortened for illustration purposes. Here should be parsed CEL expression: + // # request.headers['user_group'] == 'admin' + // parsed_expr: {} + // on_match: + // action: + // typed_config: + // '@type': type.googleapis.com/envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings + // bucket_id_builder: + // bucket_id_builder: + // acl: + // string_value: admin_users + // + // # Catch-all clause for the requests not matched by any of the matchers. + // # In this example, deny all requests. + // on_no_match: + // action: + // typed_config: + // '@type': type.googleapis.com/envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings + // no_assignment_behavior: + // fallback_rate_limit: + // blanket_rule: DENY_ALL + // + // .. attention:: + // The first matched group wins. Once the request is matched into a bucket, matcher + // evaluation ends. + // + // Use ``on_no_match`` field to assign the catch-all bucket. If a request is not matched + // into any bucket, and there's no ``on_no_match`` field configured, the request will be + // ALLOWED by default. It will NOT be reported to the RLQS server. + // + // Refer to :ref:`Unified Matcher API ` + // documentation for more information on the matcher trees. + xds.type.matcher.v3.Matcher bucket_matchers = 3 [(validate.rules).message = {required: true}]; + + // If set, this will enable -- but not necessarily enforce -- the rate limit for the given + // fraction of requests. + // + // Defaults to 100% of requests. + config.core.v3.RuntimeFractionalPercent filter_enabled = 4; + + // If set, this will enforce the rate limit decisions for the given fraction of requests. + // For requests that are not enforced the filter will still obtain the quota and include it + // in the load computation, however the request will always be allowed regardless of the outcome + // of quota application. This allows validation or testing of the rate limiting service + // infrastructure without disrupting existing traffic. + // + // Note: this only applies to the fraction of enabled requests. + // + // Defaults to 100% of requests. + config.core.v3.RuntimeFractionalPercent filter_enforced = 5; + + // Specifies a list of HTTP headers that should be added to each request that + // has been rate limited and is also forwarded upstream. This can only occur when the + // filter is enabled but not enforced. + repeated config.core.v3.HeaderValueOption request_headers_to_add_when_not_enforced = 6 + [(validate.rules).repeated = {max_items: 10}]; +} + +// Per-route and per-host configuration overrides. The more specific definition completely +// overrides the less specific definition. +message RateLimitQuotaOverride { + // The application domain to use when calling the service. This enables sharing the quota + // server between different applications without fear of overlap. + // E.g., "envoy". + // + // If empty, inherits the value from the less specific definition. + string domain = 1; + + // The match tree to use for grouping incoming requests into buckets. + // + // If set, fully overrides the bucket matchers provided on the less specific definition. + // If not set, inherits the value from the less specific definition. + // + // See usage example: :ref:`RateLimitQuotaFilterConfig.bucket_matchers + // `. + xds.type.matcher.v3.Matcher bucket_matchers = 2; +} + +// Rate Limit Quota Bucket Settings to apply on the successful ``bucket_matchers`` match. +// +// Specify this message in the :ref:`Matcher.OnMatch.action +// ` field of the +// ``bucket_matchers`` matcher tree to assign the matched requests to the Quota Bucket. +// Usage example: :ref:`RateLimitQuotaFilterConfig.bucket_matchers +// `. +// [#next-free-field: 6] +message RateLimitQuotaBucketSettings { + // Configures the behavior after the first request has been matched to the bucket, and before the + // the RLQS server returns the first quota assignment. + message NoAssignmentBehavior { + oneof no_assignment_behavior { + option (validate.required) = true; + + // Apply pre-configured rate limiting strategy until the server sends the first assignment. + type.v3.RateLimitStrategy fallback_rate_limit = 1; + } + } + + // Specifies the behavior when the bucket's assignment has expired, and cannot be refreshed for + // any reason. + message ExpiredAssignmentBehavior { + // Reuse the last known quota assignment, effectively extending it for the duration + // specified in the :ref:`expired_assignment_behavior_timeout + // ` + // field. + message ReuseLastAssignment { + } + + // Limit the time :ref:`ExpiredAssignmentBehavior + // ` + // is applied. If the server doesn't respond within this duration: + // + // 1. Selected ``ExpiredAssignmentBehavior`` is no longer applied. + // 2. The bucket is abandoned. The process of abandoning the bucket is described in the + // :ref:`AbandonAction ` + // message. + // 3. If a new request is matched into the bucket that has become abandoned, + // the data plane restarts the subscription to the bucket. The process of restarting the + // subscription is described in the :ref:`AbandonAction + // ` + // message. + // + // If not set, defaults to zero, and the bucket is abandoned immediately. + google.protobuf.Duration expired_assignment_behavior_timeout = 1 + [(validate.rules).duration = {gt {}}]; + + oneof expired_assignment_behavior { + option (validate.required) = true; + + // Apply the rate limiting strategy to all requests matched into the bucket until the RLQS + // server sends a new assignment, or the :ref:`expired_assignment_behavior_timeout + // ` + // runs out. + type.v3.RateLimitStrategy fallback_rate_limit = 2; + + // Reuse the last ``active`` assignment until the RLQS server sends a new assignment, or the + // :ref:`expired_assignment_behavior_timeout + // ` + // runs out. + ReuseLastAssignment reuse_last_assignment = 3; + } + } + + // Customize the deny response to the requests over the rate limit. + message DenyResponseSettings { + // HTTP response code to deny for HTTP requests (gRPC excluded). + // Defaults to 429 (:ref:`StatusCode.TooManyRequests`). + type.v3.HttpStatus http_status = 1; + + // HTTP response body used to deny for HTTP requests (gRPC excluded). + // If not set, an empty body is returned. + google.protobuf.BytesValue http_body = 2; + + // Configure the deny response for gRPC requests over the rate limit. + // Allows to specify the `RPC status code + // `_, + // and the error message. + // Defaults to the Status with the RPC Code ``UNAVAILABLE`` and empty message. + // + // To identify gRPC requests, Envoy checks that the ``Content-Type`` header is + // ``application/grpc``, or one of the various ``application/grpc+`` values. + // + // .. note:: + // The HTTP code for a gRPC response is always 200. + google.rpc.Status grpc_status = 3; + + // Specifies a list of HTTP headers that should be added to each response for requests that + // have been rate limited. Applies both to plain HTTP, and gRPC requests. + // The headers are added even when the rate limit quota was not enforced. + repeated config.core.v3.HeaderValueOption response_headers_to_add = 4 + [(validate.rules).repeated = {max_items: 10}]; + } + + // ``BucketIdBuilder`` makes it possible to build :ref:`BucketId + // ` with values substituted + // from the dynamic properties associated with each individual request. See usage examples in + // the docs to :ref:`bucket_id_builder + // ` + // field. + message BucketIdBuilder { + // Produces the value of the :ref:`BucketId + // ` map. + message ValueBuilder { + oneof value_specifier { + option (validate.required) = true; + + // Static string value — becomes the value in the :ref:`BucketId + // ` map as is. + string string_value = 1; + + // Dynamic value — evaluated for each request. Must produce a string output, which becomes + // the value in the :ref:`BucketId ` + // map. For example, extensions with the ``envoy.matching.http.input`` category can be used. + config.core.v3.TypedExtensionConfig custom_value = 2; + } + } + + // The map translated into the ``BucketId`` map. + // + // The ``string key`` of this map and becomes the key of ``BucketId`` map as is. + // + // The ``ValueBuilder value`` for the key can be: + // + // * static ``StringValue string_value`` — becomes the value in the ``BucketId`` map as is. + // * dynamic ``TypedExtensionConfig custom_value`` — evaluated for each request. Must produce + // a string output, which becomes the value in the the ``BucketId`` map. + // + // See usage examples in the docs to :ref:`bucket_id_builder + // ` + // field. + map bucket_id_builder = 1 [(validate.rules).map = {min_pairs: 1}]; + } + + // ``BucketId`` builder. + // + // :ref:`BucketId ` is a map from + // the string key to the string value which serves as bucket identifier common for on + // the control plane and the data plane. + // + // While ``BucketId`` is always static, ``BucketIdBuilder`` allows to populate map values + // with the dynamic properties associated with the each individual request. + // + // Example 1: static fields only + // + // ``BucketIdBuilder``: + // + // .. validated-code-block:: yaml + // :type-name: envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings.BucketIdBuilder + // + // bucket_id_builder: + // name: + // string_value: my_bucket + // hello: + // string_value: world + // + // Produces the following ``BucketId`` for all requests: + // + // .. validated-code-block:: yaml + // :type-name: envoy.service.rate_limit_quota.v3.BucketId + // + // bucket: + // name: my_bucket + // hello: world + // + // Example 2: static and dynamic fields + // + // .. validated-code-block:: yaml + // :type-name: envoy.extensions.filters.http.rate_limit_quota.v3.RateLimitQuotaBucketSettings.BucketIdBuilder + // + // bucket_id_builder: + // name: + // string_value: my_bucket + // env: + // custom_value: + // typed_config: + // '@type': type.googleapis.com/envoy.type.matcher.v3.HttpRequestHeaderMatchInput + // header_name: environment + // + // In this example, the value of ``BucketId`` key ``env`` is substituted from the ``environment`` + // request header. + // + // This is equivalent to the following ``pseudo-code``: + // + // .. code-block:: yaml + // + // name: 'my_bucket' + // env: $header['environment'] + // + // For example, the request with the HTTP header ``env`` set to ``staging`` will produce + // the following ``BucketId``: + // + // .. validated-code-block:: yaml + // :type-name: envoy.service.rate_limit_quota.v3.BucketId + // + // bucket: + // name: my_bucket + // env: staging + // + // For the request with the HTTP header ``environment`` set to ``prod``, will produce: + // + // .. validated-code-block:: yaml + // :type-name: envoy.service.rate_limit_quota.v3.BucketId + // + // bucket: + // name: my_bucket + // env: prod + // + // .. note:: + // The order of ``BucketId`` keys do not matter. Buckets ``{ a: 'A', b: 'B' }`` and + // ``{ b: 'B', a: 'A' }`` are identical. + // + // If not set, requests will NOT be reported to the server, and will always limited + // according to :ref:`no_assignment_behavior + // ` + // configuration. + BucketIdBuilder bucket_id_builder = 1; + + // The interval at which the data plane (RLQS client) is to report quota usage for this bucket. + // + // When the first request is matched to a bucket with no assignment, the data plane is to report + // the request immediately in the :ref:`RateLimitQuotaUsageReports + // ` message. + // For the RLQS server, this signals that the data plane is now subscribed to + // the quota assignments in this bucket, and will start sending the assignment as described in + // the :ref:`RLQS documentation `. + // + // After sending the initial report, the data plane is to continue reporting the bucket usage with + // the internal specified in this field. + // + // If for any reason RLQS client doesn't receive the initial assignment for the reported bucket, + // the data plane will eventually consider the bucket abandoned and stop sending the usage + // reports. This is explained in more details at :ref:`Rate Limit Quota Service (RLQS) + // `. + // + // [#comment: 100000000 nanoseconds = 0.1 seconds] + google.protobuf.Duration reporting_interval = 2 [(validate.rules).duration = { + required: true + gt {nanos: 100000000} + }]; + + // Customize the deny response to the requests over the rate limit. + // If not set, the filter will be configured as if an empty message is set, + // and will behave according to the defaults specified in :ref:`DenyResponseSettings + // `. + DenyResponseSettings deny_response_settings = 3; + + // Configures the behavior in the "no assignment" state: after the first request has been + // matched to the bucket, and before the the RLQS server returns the first quota assignment. + // + // If not set, the default behavior is to allow all requests. + NoAssignmentBehavior no_assignment_behavior = 4; + + // Configures the behavior in the "expired assignment" state: the bucket's assignment has expired, + // and cannot be refreshed. + // + // If not set, the bucket is abandoned when its ``active`` assignment expires. + // The process of abandoning the bucket, and restarting the subscription is described in the + // :ref:`AbandonAction ` + // message. + ExpiredAssignmentBehavior expired_assignment_behavior = 5; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/service/rate_limit_quota/v3/rlqs.proto b/xds/third_party/envoy/src/main/proto/envoy/service/rate_limit_quota/v3/rlqs.proto new file mode 100644 index 00000000000..b8fa2cd8982 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/service/rate_limit_quota/v3/rlqs.proto @@ -0,0 +1,258 @@ +syntax = "proto3"; + +package envoy.service.rate_limit_quota.v3; + +import "envoy/type/v3/ratelimit_strategy.proto"; + +import "google/protobuf/duration.proto"; + +import "xds/annotations/v3/status.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.rate_limit_quota.v3"; +option java_outer_classname = "RlqsProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/service/rate_limit_quota/v3;rate_limit_quotav3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (xds.annotations.v3.file_status).work_in_progress = true; + +// [#protodoc-title: Rate Limit Quota Service (RLQS)] + +// The Rate Limit Quota Service (RLQS) is a Envoy global rate limiting service that allows to +// delegate rate limit decisions to a remote service. The service will aggregate the usage reports +// from multiple data plane instances, and distribute Rate Limit Assignments to each instance +// based on its business logic. The logic is outside of the scope of the protocol API. +// +// The protocol is designed as a streaming-first API. It utilizes watch-like subscription model. +// The data plane groups requests into Quota Buckets as directed by the filter config, +// and periodically reports them to the RLQS server along with the Bucket identifier, :ref:`BucketId +// `. Once RLQS server has collected enough +// reports to make a decision, it'll send back the assignment with the rate limiting instructions. +// +// The first report sent by the data plane is interpreted by the RLQS server as a "watch" request, +// indicating that the data plane instance is interested in receiving further updates for the +// ``BucketId``. From then on, RLQS server may push assignments to this instance at will, even if +// the instance is not sending usage reports. It's the responsibility of the RLQS server +// to determine when the data plane instance didn't send ``BucketId`` reports for too long, +// and to respond with the :ref:`AbandonAction +// `, +// indicating that the server has now stopped sending quota assignments for the ``BucketId`` bucket, +// and the data plane instance should :ref:`abandon +// ` +// it. +// +// If for any reason the RLQS client doesn't receive the initial assignment for the reported bucket, +// in order to prevent memory exhaustion, the data plane will limit the time such bucket +// is retained. The exact time to wait for the initial assignment is chosen by the filter, +// and may vary based on the implementation. +// Once the duration ends, the data plane will stop reporting bucket usage, reject any enqueued +// requests, and purge the bucket from the memory. Subsequent requests matched into the bucket +// will re-initialize the bucket in the "no assignment" state, restarting the reports. +// +// Refer to Rate Limit Quota :ref:`configuration overview ` +// for further details. + +// Defines the Rate Limit Quota Service (RLQS). +service RateLimitQuotaService { + // Main communication channel: the data plane sends usage reports to the RLQS server, + // and the server asynchronously responding with the assignments. + rpc StreamRateLimitQuotas(stream RateLimitQuotaUsageReports) + returns (stream RateLimitQuotaResponse) { + } +} + +message RateLimitQuotaUsageReports { + // The usage report for a bucket. + // + // .. note:: + // Note that the first report sent for a ``BucketId`` indicates to the RLQS server that + // the RLQS client is subscribing for the future assignments for this ``BucketId``. + message BucketQuotaUsage { + // ``BucketId`` for which request quota usage is reported. + BucketId bucket_id = 1 [(validate.rules).message = {required: true}]; + + // Time elapsed since the last report. + google.protobuf.Duration time_elapsed = 2 [(validate.rules).duration = { + required: true + gt {} + }]; + + // Requests the data plane has allowed through. + uint64 num_requests_allowed = 3; + + // Requests throttled. + uint64 num_requests_denied = 4; + } + + // All quota requests must specify the domain. This enables sharing the quota + // server between different applications without fear of overlap. + // E.g., "envoy". + // + // Should only be provided in the first report, all subsequent messages on the same + // stream are considered to be in the same domain. In case the domain needs to be + // changes, close the stream, and reopen a new one with the different domain. + string domain = 1 [(validate.rules).string = {min_len: 1}]; + + // A list of quota usage reports. The list is processed by the RLQS server in the same order + // it's provided by the client. + repeated BucketQuotaUsage bucket_quota_usages = 2 [(validate.rules).repeated = {min_items: 1}]; +} + +message RateLimitQuotaResponse { + // Commands the data plane to apply one of the actions to the bucket with the + // :ref:`bucket_id `. + message BucketAction { + // Quota assignment for the bucket. Configures the rate limiting strategy and the duration + // for the given :ref:`bucket_id + // `. + // + // **Applying the first assignment to the bucket** + // + // Once the data plane receives the ``QuotaAssignmentAction``, it must send the current usage + // report for the bucket, and start rate limiting requests matched into the bucket + // using the strategy configured in the :ref:`rate_limit_strategy + // ` + // field. The assignment becomes bucket's ``active`` assignment. + // + // **Expiring the assignment** + // + // The duration of the assignment defined in the :ref:`assignment_time_to_live + // ` + // field. When the duration runs off, the assignment is ``expired``, and no longer ``active``. + // The data plane should stop applying the rate limiting strategy to the bucket, and transition + // the bucket to the "expired assignment" state. This activates the behavior configured in the + // :ref:`expired_assignment_behavior ` + // field. + // + // **Replacing the assignment** + // + // * If the rate limiting strategy is different from bucket's ``active`` assignment, or + // the current bucket assignment is ``expired``, the data plane must immediately + // end the current assignment, report the bucket usage, and apply the new assignment. + // The new assignment becomes bucket's ``active`` assignment. + // * If the rate limiting strategy is the same as the bucket's ``active`` (not ``expired``) + // assignment, the data plane should extend the duration of the ``active`` assignment + // for the duration of the new assignment provided in the :ref:`assignment_time_to_live + // ` + // field. The ``active`` assignment is considered unchanged. + message QuotaAssignmentAction { + // A duration after which the assignment is be considered ``expired``. The process of the + // expiration is described :ref:`above + // `. + // + // * If unset, the assignment has no expiration date. + // * If set to ``0``, the assignment expires immediately, forcing the client into the + // :ref:`"expired assignment" + // ` + // state. This may be used by the RLQS server in cases when it needs clients to proactively + // fall back to the pre-configured :ref:`ExpiredAssignmentBehavior + // `, + // f.e. before the server going into restart. + // + // .. attention:: + // Note that :ref:`expiring + // ` + // the assignment is not the same as :ref:`abandoning + // ` + // the assignment. While expiring the assignment just transitions the bucket to + // the "expired assignment" state; abandoning the assignment completely erases + // the bucket from the data plane memory, and stops the usage reports. + google.protobuf.Duration assignment_time_to_live = 2 [(validate.rules).duration = {gte {}}]; + + // Configures the local rate limiter for the request matched to the bucket. + // If not set, allow all requests. + type.v3.RateLimitStrategy rate_limit_strategy = 3; + } + + // Abandon action for the bucket. Indicates that the RLQS server will no longer be + // sending updates for the given :ref:`bucket_id + // `. + // + // If no requests are reported for a bucket, after some time the server considers the bucket + // inactive. The server stops tracking the bucket, and instructs the the data plane to abandon + // the bucket via this message. + // + // **Abandoning the assignment** + // + // The data plane is to erase the bucket (including its usage data) from the memory. + // It should stop tracking the bucket, and stop reporting its usage. This effectively resets + // the data plane to the state prior to matching the first request into the bucket. + // + // **Restarting the subscription** + // + // If a new request is matched into a bucket previously abandoned, the data plane must behave + // as if it has never tracked the bucket, and it's the first request matched into it: + // + // 1. The process of :ref:`subscription and reporting + // ` + // starts from the beginning. + // + // 2. The bucket transitions to the :ref:`"no assignment" + // ` + // state. + // + // 3. Once the new assignment is received, it's applied per + // "Applying the first assignment to the bucket" section of the :ref:`QuotaAssignmentAction + // `. + message AbandonAction { + } + + // ``BucketId`` for which request the action is applied. + BucketId bucket_id = 1 [(validate.rules).message = {required: true}]; + + oneof bucket_action { + option (validate.required) = true; + + // Apply the quota assignment to the bucket. + // + // Commands the data plane to apply a rate limiting strategy to the bucket. + // The process of applying and expiring the rate limiting strategy is detailed in the + // :ref:`QuotaAssignmentAction + // ` + // message. + QuotaAssignmentAction quota_assignment_action = 2; + + // Abandon the bucket. + // + // Commands the data plane to abandon the bucket. + // The process of abandoning the bucket is described in the :ref:`AbandonAction + // ` + // message. + AbandonAction abandon_action = 3; + } + } + + // An ordered list of actions to be applied to the buckets. The actions are applied in the + // given order, from top to bottom. + repeated BucketAction bucket_action = 1 [(validate.rules).repeated = {min_items: 1}]; +} + +// The identifier for the bucket. Used to match the bucket between the control plane (RLQS server), +// and the data plane (RLQS client), f.e.: +// +// * the data plane sends a usage report for requests matched into the bucket with ``BucketId`` +// to the control plane +// * the control plane sends an assignment for the bucket with ``BucketId`` to the data plane +// Bucket ID. +// +// Example: +// +// .. validated-code-block:: yaml +// :type-name: envoy.service.rate_limit_quota.v3.BucketId +// +// bucket: +// name: my_bucket +// env: staging +// +// .. note:: +// The order of ``BucketId`` keys do not matter. Buckets ``{ a: 'A', b: 'B' }`` and +// ``{ b: 'B', a: 'A' }`` are identical. +message BucketId { + map bucket = 1 [(validate.rules).map = { + min_pairs: 1 + keys {string {min_len: 1}} + values {string {min_len: 1}} + }]; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/type/matcher/v3/http_inputs.proto b/xds/third_party/envoy/src/main/proto/envoy/type/matcher/v3/http_inputs.proto new file mode 100644 index 00000000000..c90199eb618 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/type/matcher/v3/http_inputs.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package envoy.type.matcher.v3; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v3"; +option java_outer_classname = "HttpInputsProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Common HTTP inputs] + +// Match input indicates that matching should be done on a specific request header. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.request_headers] +message HttpRequestHeaderMatchInput { + // The request header to match on. + string header_name = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; +} + +// Match input indicates that matching should be done on a specific request trailer. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.request_trailers] +message HttpRequestTrailerMatchInput { + // The request trailer to match on. + string header_name = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; +} + +// Match input indicating that matching should be done on a specific response header. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the response contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.response_headers] +message HttpResponseHeaderMatchInput { + // The response header to match on. + string header_name = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; +} + +// Match input indicates that matching should be done on a specific response trailer. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.response_trailers] +message HttpResponseTrailerMatchInput { + // The response trailer to match on. + string header_name = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; +} + +// Match input indicates that matching should be done on a specific query parameter. +// The resulting input string will be the first query parameter for the value +// 'query_param'. +// [#extension: envoy.matching.inputs.query_params] +message HttpRequestQueryParamMatchInput { + // The query parameter to match on. + string query_param = 1 [(validate.rules).string = {min_len: 1}]; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/type/v3/http_status.proto b/xds/third_party/envoy/src/main/proto/envoy/type/v3/http_status.proto new file mode 100644 index 00000000000..ab03e1b2b72 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/type/v3/http_status.proto @@ -0,0 +1,143 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "HttpStatusProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: HTTP status codes] + +// HTTP response codes supported in Envoy. +// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml +enum StatusCode { + // Empty - This code not part of the HTTP status code specification, but it is needed for proto + // `enum` type. + Empty = 0; + + Continue = 100; + + OK = 200; + + Created = 201; + + Accepted = 202; + + NonAuthoritativeInformation = 203; + + NoContent = 204; + + ResetContent = 205; + + PartialContent = 206; + + MultiStatus = 207; + + AlreadyReported = 208; + + IMUsed = 226; + + MultipleChoices = 300; + + MovedPermanently = 301; + + Found = 302; + + SeeOther = 303; + + NotModified = 304; + + UseProxy = 305; + + TemporaryRedirect = 307; + + PermanentRedirect = 308; + + BadRequest = 400; + + Unauthorized = 401; + + PaymentRequired = 402; + + Forbidden = 403; + + NotFound = 404; + + MethodNotAllowed = 405; + + NotAcceptable = 406; + + ProxyAuthenticationRequired = 407; + + RequestTimeout = 408; + + Conflict = 409; + + Gone = 410; + + LengthRequired = 411; + + PreconditionFailed = 412; + + PayloadTooLarge = 413; + + URITooLong = 414; + + UnsupportedMediaType = 415; + + RangeNotSatisfiable = 416; + + ExpectationFailed = 417; + + MisdirectedRequest = 421; + + UnprocessableEntity = 422; + + Locked = 423; + + FailedDependency = 424; + + UpgradeRequired = 426; + + PreconditionRequired = 428; + + TooManyRequests = 429; + + RequestHeaderFieldsTooLarge = 431; + + InternalServerError = 500; + + NotImplemented = 501; + + BadGateway = 502; + + ServiceUnavailable = 503; + + GatewayTimeout = 504; + + HTTPVersionNotSupported = 505; + + VariantAlsoNegotiates = 506; + + InsufficientStorage = 507; + + LoopDetected = 508; + + NotExtended = 510; + + NetworkAuthenticationRequired = 511; +} + +// HTTP status. +message HttpStatus { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.HttpStatus"; + + // Supplies HTTP response code. + StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/type/v3/ratelimit_strategy.proto b/xds/third_party/envoy/src/main/proto/envoy/type/v3/ratelimit_strategy.proto new file mode 100644 index 00000000000..a86da55b854 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/type/v3/ratelimit_strategy.proto @@ -0,0 +1,79 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "envoy/type/v3/ratelimit_unit.proto"; +import "envoy/type/v3/token_bucket.proto"; + +import "xds/annotations/v3/status.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "RatelimitStrategyProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (xds.annotations.v3.file_status).work_in_progress = true; + +// [#protodoc-title: Rate Limit Strategies] + +message RateLimitStrategy { + // Choose between allow all and deny all. + enum BlanketRule { + ALLOW_ALL = 0; + DENY_ALL = 1; + } + + // Best-effort limit of the number of requests per time unit. + // + // Allows to specify the desired requests per second (RPS, QPS), requests per minute (QPM, RPM), + // etc., without specifying a rate limiting algorithm implementation. + // + // ``RequestsPerTimeUnit`` strategy does not demand any specific rate limiting algorithm to be + // used (in contrast to the :ref:`TokenBucket `, + // for example). It implies that the implementation details of rate limiting algorithm are + // irrelevant as long as the configured number of "requests per time unit" is achieved. + // + // Note that the ``TokenBucket`` is still a valid implementation of the ``RequestsPerTimeUnit`` + // strategy, and may be chosen to enforce the rate limit. However, there's no guarantee it will be + // the ``TokenBucket`` in particular, and not the Leaky Bucket, the Sliding Window, or any other + // rate limiting algorithm that fulfills the requirements. + message RequestsPerTimeUnit { + // The desired number of requests per :ref:`time_unit + // ` to allow. + // If set to ``0``, deny all (equivalent to ``BlanketRule.DENY_ALL``). + // + // .. note:: + // Note that the algorithm implementation determines the course of action for the requests + // over the limit. As long as the ``requests_per_time_unit`` converges on the desired value, + // it's allowed to treat this field as a soft-limit: allow bursts, redistribute the allowance + // over time, etc. + // + uint64 requests_per_time_unit = 1; + + // The unit of time. Ignored when :ref:`requests_per_time_unit + // ` + // is ``0`` (deny all). + RateLimitUnit time_unit = 2 [(validate.rules).enum = {defined_only: true}]; + } + + oneof strategy { + option (validate.required) = true; + + // Allow or Deny the requests. + // If unset, allow all. + BlanketRule blanket_rule = 1 [(validate.rules).enum = {defined_only: true}]; + + // Best-effort limit of the number of requests per time unit, f.e. requests per second. + // Does not prescribe any specific rate limiting algorithm, see :ref:`RequestsPerTimeUnit + // ` for details. + RequestsPerTimeUnit requests_per_time_unit = 2; + + // Limit the requests by consuming tokens from the Token Bucket. + // Allow the same number of requests as the number of tokens available in + // the token bucket. + TokenBucket token_bucket = 3; + } +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/type/v3/ratelimit_unit.proto b/xds/third_party/envoy/src/main/proto/envoy/type/v3/ratelimit_unit.proto new file mode 100644 index 00000000000..1a96497926d --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/type/v3/ratelimit_unit.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "RatelimitUnitProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Ratelimit Time Unit] + +// Identifies the unit of of time for rate limit. +enum RateLimitUnit { + // The time unit is not known. + UNKNOWN = 0; + + // The time unit representing a second. + SECOND = 1; + + // The time unit representing a minute. + MINUTE = 2; + + // The time unit representing an hour. + HOUR = 3; + + // The time unit representing a day. + DAY = 4; + + // The time unit representing a month. + MONTH = 5; + + // The time unit representing a year. + YEAR = 6; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/type/v3/token_bucket.proto b/xds/third_party/envoy/src/main/proto/envoy/type/v3/token_bucket.proto new file mode 100644 index 00000000000..157a271efc9 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/type/v3/token_bucket.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "TokenBucketProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Token bucket] + +// Configures a token bucket, typically used for rate limiting. +message TokenBucket { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.TokenBucket"; + + // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket + // initially contains. + uint32 max_tokens = 1 [(validate.rules).uint32 = {gt: 0}]; + + // The number of tokens added to the bucket during each fill interval. If not specified, defaults + // to a single token. + google.protobuf.UInt32Value tokens_per_fill = 2 [(validate.rules).uint32 = {gt: 0}]; + + // The fill interval that tokens are added to the bucket. During each fill interval + // ``tokens_per_fill`` are added to the bucket. The bucket will never contain more than + // ``max_tokens`` tokens. + google.protobuf.Duration fill_interval = 3 [(validate.rules).duration = { + required: true + gt {} + }]; +} diff --git a/xds/third_party/xds/import.sh b/xds/third_party/xds/import.sh index 44f9ad12ed4..9e4bf71d52f 100755 --- a/xds/third_party/xds/import.sh +++ b/xds/third_party/xds/import.sh @@ -45,9 +45,12 @@ xds/core/v3/resource_locator.proto xds/core/v3/resource_name.proto xds/data/orca/v3/orca_load_report.proto xds/service/orca/v3/orca.proto +xds/type/matcher/v3/cel.proto xds/type/matcher/v3/matcher.proto xds/type/matcher/v3/regex.proto xds/type/matcher/v3/string.proto +xds/type/v3/cel.proto +xds/type/matcher/v3/http_inputs.proto xds/type/v3/typed_struct.proto ) diff --git a/xds/third_party/xds/src/main/proto/xds/type/matcher/v3/cel.proto b/xds/third_party/xds/src/main/proto/xds/type/matcher/v3/cel.proto new file mode 100644 index 00000000000..b1ad1faa281 --- /dev/null +++ b/xds/third_party/xds/src/main/proto/xds/type/matcher/v3/cel.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package xds.type.matcher.v3; + +import "xds/annotations/v3/status.proto"; +import "xds/type/v3/cel.proto"; + +import "validate/validate.proto"; + +option java_package = "com.github.xds.type.matcher.v3"; +option java_outer_classname = "CelProto"; +option java_multiple_files = true; +option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +// [#protodoc-title: Common Expression Language (CEL) matchers] + +// Performs a match by evaluating a `Common Expression Language +// `_ (CEL) expression against the standardized set of +// :ref:`HTTP attributes ` specified via ``HttpAttributesCelMatchInput``. +// +// .. attention:: +// +// The match is ``true``, iff the result of the evaluation is a bool AND true. +// In all other cases, the match is ``false``, including but not limited to: non-bool types, +// ``false``, ``null``,`` int(1)``, etc. +// In case CEL expression raises an error, the result of the evaluation is interpreted "no match". +// +// Refer to :ref:`Unified Matcher API ` documentation +// for usage details. +// +// [#comment:TODO(sergiitk): Link HttpAttributesMatchInput + usage example.] +// [#comment:TODO(sergiitk): When implemented, add the extension tag.] +message CelMatcher { + // Either parsed or checked representation of the CEL program. + type.v3.CelExpression expr_match = 1 [(validate.rules).message = {required: true}]; + + // Free-form description of the CEL AST, e.g. the original expression text, to be + // used for debugging assistance. + string description = 2; +} diff --git a/xds/third_party/xds/src/main/proto/xds/type/matcher/v3/http_inputs.proto b/xds/third_party/xds/src/main/proto/xds/type/matcher/v3/http_inputs.proto new file mode 100644 index 00000000000..0dd80cd6f66 --- /dev/null +++ b/xds/third_party/xds/src/main/proto/xds/type/matcher/v3/http_inputs.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package xds.type.matcher.v3; + +import "xds/annotations/v3/status.proto"; + +option java_package = "com.github.xds.type.matcher.v3"; +option java_outer_classname = "HttpInputsProto"; +option java_multiple_files = true; +option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +// [#protodoc-title: Common HTTP Inputs] + +// Specifies that matching should be performed on the set of :ref:`HTTP attributes +// `. +// +// The attributes will be exposed via `Common Expression Language +// `_ runtime to associated CEL matcher. +// +// Refer to :ref:`Unified Matcher API ` documentation +// for usage details. +// +// [#comment:TODO(sergiitk): When implemented, add the extension tag.] +message HttpAttributesCelMatchInput { +} diff --git a/xds/third_party/xds/src/main/proto/xds/type/v3/cel.proto b/xds/third_party/xds/src/main/proto/xds/type/v3/cel.proto new file mode 100644 index 00000000000..df4f81d90d2 --- /dev/null +++ b/xds/third_party/xds/src/main/proto/xds/type/v3/cel.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; + +package xds.type.v3; + +import "google/api/expr/v1alpha1/checked.proto"; +import "google/api/expr/v1alpha1/syntax.proto"; +import "cel/expr/checked.proto"; +import "cel/expr/syntax.proto"; +import "google/protobuf/wrappers.proto"; + +import "xds/annotations/v3/status.proto"; + +import "validate/validate.proto"; + +option java_package = "com.github.xds.type.v3"; +option java_outer_classname = "CelProto"; +option java_multiple_files = true; +option go_package = "github.com/cncf/xds/go/xds/type/v3"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +// [#protodoc-title: Common Expression Language (CEL)] + +// Either parsed or checked representation of the `Common Expression Language +// `_ (CEL) program. +message CelExpression { + oneof expr_specifier { + // Parsed expression in abstract syntax tree (AST) form. + // + // Deprecated -- use ``cel_expr_parsed`` field instead. + // If ``cel_expr_parsed`` or ``cel_expr_checked`` is set, this field is not used. + google.api.expr.v1alpha1.ParsedExpr parsed_expr = 1 [deprecated = true]; + + // Parsed expression in abstract syntax tree (AST) form that has been successfully type checked. + // + // Deprecated -- use ``cel_expr_checked`` field instead. + // If ``cel_expr_parsed`` or ``cel_expr_checked`` is set, this field is not used. + google.api.expr.v1alpha1.CheckedExpr checked_expr = 2 [deprecated = true]; + } + + // Parsed expression in abstract syntax tree (AST) form. + // + // If ``cel_expr_checked`` is set, this field is not used. + cel.expr.ParsedExpr cel_expr_parsed = 3; + + // Parsed expression in abstract syntax tree (AST) form that has been successfully type checked. + // + // If set, takes precedence over ``cel_expr_parsed``. + cel.expr.CheckedExpr cel_expr_checked = 4; +} + +// Extracts a string by evaluating a `Common Expression Language +// `_ (CEL) expression against the standardized set of +// :ref:`HTTP attributes `. +// +// .. attention:: +// +// Besides CEL evaluation raising an error explicitly, CEL program returning a type other than +// the ``string``, or not returning anything, are considered an error as well. +// +// [#comment:TODO(sergiitk): When implemented, add the extension tag.] +message CelExtractString { + // The CEL expression used to extract a string from the CEL environment. + // the "subject string") that should be replaced. + CelExpression expr_extract = 1 [(validate.rules).message = {required: true}]; + + // If CEL expression evaluates to an error, this value is be returned to the caller. + // If not set, the error is propagated to the caller. + google.protobuf.StringValue default_value = 2; +} From e567b4427aaef554de103de7c82c4448b6944d3d Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Fri, 2 Aug 2024 11:40:02 -0700 Subject: [PATCH 28/53] core: Don't reuse channels in PickFirstLeafLB test PickFirstLeafLB uses channel reference equality to see if it has re-created subchannels. The tests reusing channels breaks the expected invariant. --- .../PickFirstLeafLoadBalancerTest.java | 166 +++++++++++------- 1 file changed, 100 insertions(+), 66 deletions(-) diff --git a/core/src/test/java/io/grpc/internal/PickFirstLeafLoadBalancerTest.java b/core/src/test/java/io/grpc/internal/PickFirstLeafLoadBalancerTest.java index 7aae0c2731a..335d199d8b1 100644 --- a/core/src/test/java/io/grpc/internal/PickFirstLeafLoadBalancerTest.java +++ b/core/src/test/java/io/grpc/internal/PickFirstLeafLoadBalancerTest.java @@ -123,16 +123,14 @@ public void uncaughtException(Thread t, Throwable e) { private ArgumentCaptor createArgsCaptor; @Captor private ArgumentCaptor stateListenerCaptor; - private final Helper mockHelper = mock(Helper.class, delegatesTo(new MockHelperImpl())); - @Mock + private Helper mockHelper; private FakeSubchannel mockSubchannel1; - @Mock + private FakeSubchannel mockSubchannel1n2; private FakeSubchannel mockSubchannel2; - @Mock + private FakeSubchannel mockSubchannel2n2; private FakeSubchannel mockSubchannel3; - @Mock + private FakeSubchannel mockSubchannel3n2; private FakeSubchannel mockSubchannel4; - @Mock private FakeSubchannel mockSubchannel5; @Mock // This LoadBalancer doesn't use any of the arg fields, as verified in tearDown(). private PickSubchannelArgs mockArgs; @@ -150,23 +148,28 @@ public void setUp() { SocketAddress addr = new FakeSocketAddress("server" + i); servers.add(new EquivalentAddressGroup(addr)); } - mockSubchannel1 = mock(FakeSubchannel.class); - mockSubchannel2 = mock(FakeSubchannel.class); - mockSubchannel3 = mock(FakeSubchannel.class); - mockSubchannel4 = mock(FakeSubchannel.class); - mockSubchannel5 = mock(FakeSubchannel.class); - when(mockSubchannel1.getAttributes()).thenReturn(Attributes.EMPTY); - when(mockSubchannel2.getAttributes()).thenReturn(Attributes.EMPTY); - when(mockSubchannel3.getAttributes()).thenReturn(Attributes.EMPTY); - when(mockSubchannel4.getAttributes()).thenReturn(Attributes.EMPTY); - when(mockSubchannel5.getAttributes()).thenReturn(Attributes.EMPTY); - - when(mockSubchannel1.getAllAddresses()).thenReturn(Lists.newArrayList(servers.get(0))); - when(mockSubchannel2.getAllAddresses()).thenReturn(Lists.newArrayList(servers.get(1))); - when(mockSubchannel3.getAllAddresses()).thenReturn(Lists.newArrayList(servers.get(2))); - when(mockSubchannel4.getAllAddresses()).thenReturn(Lists.newArrayList(servers.get(3))); - when(mockSubchannel5.getAllAddresses()).thenReturn(Lists.newArrayList(servers.get(4))); - + mockSubchannel1 = mock(FakeSubchannel.class, delegatesTo( + new FakeSubchannel(Arrays.asList(servers.get(0)), Attributes.EMPTY))); + mockSubchannel1n2 = mock(FakeSubchannel.class, delegatesTo( + new FakeSubchannel(Arrays.asList(servers.get(0)), Attributes.EMPTY))); + mockSubchannel2 = mock(FakeSubchannel.class, delegatesTo( + new FakeSubchannel(Arrays.asList(servers.get(1)), Attributes.EMPTY))); + mockSubchannel2n2 = mock(FakeSubchannel.class, delegatesTo( + new FakeSubchannel(Arrays.asList(servers.get(1)), Attributes.EMPTY))); + mockSubchannel3 = mock(FakeSubchannel.class, delegatesTo( + new FakeSubchannel(Arrays.asList(servers.get(2)), Attributes.EMPTY))); + mockSubchannel3n2 = mock(FakeSubchannel.class, delegatesTo( + new FakeSubchannel(Arrays.asList(servers.get(2)), Attributes.EMPTY))); + mockSubchannel4 = mock(FakeSubchannel.class, delegatesTo( + new FakeSubchannel(Arrays.asList(servers.get(3)), Attributes.EMPTY))); + mockSubchannel5 = mock(FakeSubchannel.class, delegatesTo( + new FakeSubchannel(Arrays.asList(servers.get(4)), Attributes.EMPTY))); + + mockHelper = mock(Helper.class, delegatesTo(new MockHelperImpl(Arrays.asList( + mockSubchannel1, mockSubchannel1n2, + mockSubchannel2, mockSubchannel2n2, + mockSubchannel3, mockSubchannel3n2, + mockSubchannel4, mockSubchannel5)))); loadBalancer = new PickFirstLeafLoadBalancer(mockHelper); } @@ -251,14 +254,14 @@ public void pickAfterResolved_shuffle() { PickResult pick2 = pickerCaptor.getValue().pickSubchannel(mockArgs); assertEquals(pick1, pick2); verifyNoMoreInteractions(mockHelper); - assertThat(pick1.toString()).contains("subchannel=null"); + assertThat(pick1.getSubchannel()).isNull(); stateListener2.onSubchannelState(ConnectivityStateInfo.forNonError(READY)); verify(mockHelper).updateBalancingState(eq(READY), pickerCaptor.capture()); PickResult pick3 = pickerCaptor.getValue().pickSubchannel(mockArgs); PickResult pick4 = pickerCaptor.getValue().pickSubchannel(mockArgs); assertEquals(pick3, pick4); - assertThat(pick3.toString()).contains("subchannel=Mock"); + assertThat(pick3.getSubchannel()).isEqualTo(mockSubchannel2); } @Test @@ -569,7 +572,7 @@ public void pickWithDupAddressesUpDownUp() { InOrder inOrder = inOrder(mockHelper); SocketAddress socketAddress = servers.get(0).getAddresses().get(0); EquivalentAddressGroup badEag = new EquivalentAddressGroup( - Lists.newArrayList(socketAddress, socketAddress), affinity); + Lists.newArrayList(socketAddress, socketAddress)); List newServers = Lists.newArrayList(badEag); loadBalancer.acceptResolvedAddresses( @@ -727,7 +730,7 @@ public void nameResolutionSuccessAfterError() { @Test public void nameResolutionTemporaryError() { List newServers = Lists.newArrayList(servers.get(0)); - InOrder inOrder = inOrder(mockHelper, mockSubchannel1); + InOrder inOrder = inOrder(mockHelper, mockSubchannel1, mockSubchannel1n2); loadBalancer.acceptResolvedAddresses( ResolvedAddresses.newBuilder().setAddresses(newServers).setAttributes(affinity).build()); inOrder.verify(mockSubchannel1).start(stateListenerCaptor.capture()); @@ -744,14 +747,15 @@ public void nameResolutionTemporaryError() { loadBalancer.acceptResolvedAddresses( ResolvedAddresses.newBuilder().setAddresses(servers).setAttributes(affinity).build()); inOrder.verify(mockHelper).updateBalancingState(eq(CONNECTING), pickerCaptor.capture()); - inOrder.verify(mockSubchannel1).start(stateListenerCaptor.capture()); + inOrder.verify(mockSubchannel1n2).start(stateListenerCaptor.capture()); SubchannelStateListener stateListener2 = stateListenerCaptor.getValue(); assertNull(pickerCaptor.getValue().pickSubchannel(mockArgs).getSubchannel()); stateListener2.onSubchannelState(ConnectivityStateInfo.forNonError(READY)); inOrder.verify(mockHelper).updateBalancingState(eq(READY), pickerCaptor.capture()); - assertEquals(mockSubchannel1, pickerCaptor.getValue().pickSubchannel(mockArgs).getSubchannel()); + assertEquals(mockSubchannel1n2, + pickerCaptor.getValue().pickSubchannel(mockArgs).getSubchannel()); } @@ -1027,7 +1031,7 @@ public void updateAddresses_disjoint_connecting() { @Test public void updateAddresses_disjoint_ready_twice() { InOrder inOrder = inOrder(mockHelper, mockSubchannel1, mockSubchannel2, - mockSubchannel3, mockSubchannel4); + mockSubchannel3, mockSubchannel4, mockSubchannel1n2, mockSubchannel2n2); // Creating first set of endpoints/addresses List oldServers = Lists.newArrayList(servers.get(0), servers.get(1)); SubchannelStateListener stateListener2 = null; @@ -1126,7 +1130,7 @@ public void updateAddresses_disjoint_ready_twice() { ResolvedAddresses.newBuilder().setAddresses(newestServers).setAttributes(affinity).build()); inOrder.verify(mockSubchannel3).shutdown(); inOrder.verify(mockHelper).updateBalancingState(eq(CONNECTING), pickerCaptor.capture()); - inOrder.verify(mockSubchannel1).start(stateListenerCaptor.capture()); + inOrder.verify(mockSubchannel1n2).start(stateListenerCaptor.capture()); stateListener = stateListenerCaptor.getValue(); assertEquals(CONNECTING, loadBalancer.getConcludedConnectivityState()); picker = pickerCaptor.getValue(); @@ -1135,7 +1139,7 @@ public void updateAddresses_disjoint_ready_twice() { assertEquals(picker.pickSubchannel(mockArgs), picker.pickSubchannel(mockArgs)); // But the picker calls requestConnection() only once - inOrder.verify(mockSubchannel1).requestConnection(); + inOrder.verify(mockSubchannel1n2).requestConnection(); assertEquals(PickResult.withNoResult(), pickerCaptor.getValue().pickSubchannel(mockArgs)); assertEquals(CONNECTING, loadBalancer.getConcludedConnectivityState()); @@ -1150,23 +1154,24 @@ public void updateAddresses_disjoint_ready_twice() { stateListener.onSubchannelState(ConnectivityStateInfo.forTransientFailure(CONNECTION_ERROR)); // Starting connection attempt to address 2 - if (!enableHappyEyeballs) { - inOrder.verify(mockSubchannel2).start(stateListenerCaptor.capture()); - stateListener2 = stateListenerCaptor.getValue(); - } - inOrder.verify(mockSubchannel2).requestConnection(); + FakeSubchannel mockSubchannel2Attempt = + enableHappyEyeballs ? mockSubchannel2n2 : mockSubchannel2; + inOrder.verify(mockSubchannel2Attempt).start(stateListenerCaptor.capture()); + stateListener2 = stateListenerCaptor.getValue(); + inOrder.verify(mockSubchannel2Attempt).requestConnection(); // Connection attempt to address 2 is successful stateListener2.onSubchannelState(ConnectivityStateInfo.forNonError(READY)); assertEquals(READY, loadBalancer.getConcludedConnectivityState()); - inOrder.verify(mockSubchannel1).shutdown(); + inOrder.verify(mockSubchannel1n2).shutdown(); // Successful connection shuts down other subchannel inOrder.verify(mockHelper).updateBalancingState(eq(READY), pickerCaptor.capture()); picker = pickerCaptor.getValue(); // Verify that picker still returns correct subchannel - assertEquals(PickResult.withSubchannel(mockSubchannel2), picker.pickSubchannel(mockArgs)); + assertEquals( + PickResult.withSubchannel(mockSubchannel2Attempt), picker.pickSubchannel(mockArgs)); } @Test @@ -2048,7 +2053,7 @@ public void recreate_shutdown_subchannel() { // Starting first connection attempt InOrder inOrder = inOrder(mockHelper, mockSubchannel1, mockSubchannel2, - mockSubchannel3, mockSubchannel4); // captor: captures + mockSubchannel3, mockSubchannel4, mockSubchannel1n2); // captor: captures // Creating first set of endpoints/addresses List addrs = @@ -2084,9 +2089,9 @@ public void recreate_shutdown_subchannel() { // Calling pickSubchannel() requests a connection. assertEquals(picker.pickSubchannel(mockArgs), picker.pickSubchannel(mockArgs)); - inOrder.verify(mockSubchannel1).start(stateListenerCaptor.capture()); + inOrder.verify(mockSubchannel1n2).start(stateListenerCaptor.capture()); SubchannelStateListener stateListener3 = stateListenerCaptor.getValue(); - inOrder.verify(mockSubchannel1).requestConnection(); + inOrder.verify(mockSubchannel1n2).requestConnection(); when(mockSubchannel1.getAllAddresses()).thenReturn(Lists.newArrayList(servers.get(0))); // gives the same result when called twice @@ -2101,7 +2106,7 @@ public void recreate_shutdown_subchannel() { // second subchannel connection attempt succeeds inOrder.verify(mockSubchannel2).requestConnection(); stateListener2.onSubchannelState(ConnectivityStateInfo.forNonError(READY)); - inOrder.verify(mockSubchannel1).shutdown(); + inOrder.verify(mockSubchannel1n2).shutdown(); inOrder.verify(mockHelper).updateBalancingState(eq(READY), pickerCaptor.capture()); assertEquals(READY, loadBalancer.getConcludedConnectivityState()); @@ -2146,7 +2151,7 @@ public void shutdown() { public void ready_then_transient_failure_again() { // Starting first connection attempt InOrder inOrder = inOrder(mockHelper, mockSubchannel1, mockSubchannel2, - mockSubchannel3, mockSubchannel4); // captor: captures + mockSubchannel3, mockSubchannel4, mockSubchannel1n2); // captor: captures // Creating first set of endpoints/addresses List addrs = @@ -2183,9 +2188,9 @@ public void ready_then_transient_failure_again() { // Calling pickSubchannel() requests a connection, gives the same result when called twice. assertEquals(picker.pickSubchannel(mockArgs), picker.pickSubchannel(mockArgs)); - inOrder.verify(mockSubchannel1).start(stateListenerCaptor.capture()); + inOrder.verify(mockSubchannel1n2).start(stateListenerCaptor.capture()); SubchannelStateListener stateListener3 = stateListenerCaptor.getValue(); - inOrder.verify(mockSubchannel1).requestConnection(); + inOrder.verify(mockSubchannel1n2).requestConnection(); when(mockSubchannel3.getAllAddresses()).thenReturn(Lists.newArrayList(servers.get(0))); stateListener3.onSubchannelState(ConnectivityStateInfo.forNonError(CONNECTING)); inOrder.verify(mockHelper).updateBalancingState(eq(CONNECTING), pickerCaptor.capture()); @@ -2201,7 +2206,7 @@ public void ready_then_transient_failure_again() { assertEquals(READY, loadBalancer.getConcludedConnectivityState()); // verify that picker returns correct subchannel - inOrder.verify(mockSubchannel1).shutdown(); + inOrder.verify(mockSubchannel1n2).shutdown(); inOrder.verify(mockHelper).updateBalancingState(eq(READY), pickerCaptor.capture()); picker = pickerCaptor.getValue(); assertEquals(PickResult.withSubchannel(mockSubchannel2), picker.pickSubchannel(mockArgs)); @@ -2309,7 +2314,8 @@ public void happy_eyeballs_connection_results_happen_after_get_to_end() { public void happy_eyeballs_pick_pushes_index_over_end() { Assume.assumeTrue(enableHappyEyeballs); // This test is only for happy eyeballs - InOrder inOrder = inOrder(mockHelper, mockSubchannel1, mockSubchannel2, mockSubchannel3); + InOrder inOrder = inOrder(mockHelper, mockSubchannel1, mockSubchannel2, mockSubchannel3, + mockSubchannel2n2, mockSubchannel3n2); Status error = Status.UNAUTHENTICATED.withDescription("simulated failure"); List addrs = @@ -2359,9 +2365,9 @@ public void happy_eyeballs_pick_pushes_index_over_end() { // Try pushing after end with just picks listeners[0].onSubchannelState(ConnectivityStateInfo.forNonError(READY)); - for (SubchannelStateListener listener : listeners) { - listener.onSubchannelState(ConnectivityStateInfo.forNonError(IDLE)); - } + verify(mockSubchannel2).shutdown(); + verify(mockSubchannel3).shutdown(); + listeners[0].onSubchannelState(ConnectivityStateInfo.forNonError(IDLE)); loadBalancer.acceptResolvedAddresses( ResolvedAddresses.newBuilder().setAddresses(addrs).setAttributes(affinity).build()); inOrder.verify(mockHelper).updateBalancingState(eq(IDLE), pickerCaptor.capture()); @@ -2372,11 +2378,14 @@ public void happy_eyeballs_pick_pushes_index_over_end() { } assertEquals(IDLE, loadBalancer.getConcludedConnectivityState()); - for (SubchannelStateListener listener : listeners) { - listener.onSubchannelState(ConnectivityStateInfo.forTransientFailure(error)); - } + listeners[0].onSubchannelState(ConnectivityStateInfo.forTransientFailure(error)); + inOrder.verify(mockSubchannel2n2).start(stateListenerCaptor.capture()); + stateListenerCaptor.getValue().onSubchannelState( + ConnectivityStateInfo.forTransientFailure(error)); + inOrder.verify(mockSubchannel3n2).start(stateListenerCaptor.capture()); + stateListenerCaptor.getValue().onSubchannelState( + ConnectivityStateInfo.forTransientFailure(error)); assertEquals(TRANSIENT_FAILURE, loadBalancer.getConcludedConnectivityState()); - } @Test @@ -2571,9 +2580,22 @@ private static class FakeSocketAddress extends SocketAddress { @Override public String toString() { - return "FakeSocketAddress-" + name; + return "FakeSocketAddress(" + name + ")"; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof FakeSocketAddress)) { + return false; + } + FakeSocketAddress that = (FakeSocketAddress) o; + return this.name.equals(that.name); } + @Override + public int hashCode() { + return name.hashCode(); + } } private void forwardTimeByConnectionDelay() { @@ -2631,15 +2653,26 @@ public void updateAddresses(List addrs) { @Override public void shutdown() { + listener.onSubchannelState(ConnectivityStateInfo.forNonError(SHUTDOWN)); } @Override public void requestConnection() { - listener.onSubchannelState(ConnectivityStateInfo.forNonError(CONNECTING)); + } + + @Override + public String toString() { + return "FakeSubchannel@" + hashCode() + "(" + eags + ")"; } } private class MockHelperImpl extends LoadBalancer.Helper { + private final List subchannels; + + public MockHelperImpl(List subchannels) { + this.subchannels = new ArrayList(subchannels); + } + @Override public ManagedChannel createOobChannel(EquivalentAddressGroup eag, String authority) { return null; @@ -2672,16 +2705,17 @@ public void refreshNameResolution() { @Override public Subchannel createSubchannel(CreateSubchannelArgs args) { - SocketAddress addr = args.getAddresses().get(0).getAddresses().get(0); - List fakeSubchannels = - Arrays.asList(mockSubchannel1, mockSubchannel2, mockSubchannel3, mockSubchannel4, - mockSubchannel5); - for (int i = 1; i <= 5; i++) { - if (addr.toString().equals(new FakeSocketAddress("server" + i).toString())) { - return fakeSubchannels.get(i - 1); + for (int i = 0; i < subchannels.size(); i++) { + Subchannel subchannel = subchannels.get(i); + List addrs = subchannel.getAllAddresses(); + verify(subchannel, atLeast(1)).getAllAddresses(); // ignore the interaction + if (!args.getAddresses().equals(addrs)) { + continue; } + subchannels.remove(i); + return subchannel; } - throw new IllegalArgumentException("Unexpected address: " + addr); + throw new IllegalArgumentException("Unexpected addresses: " + args.getAddresses()); } } -} \ No newline at end of file +} From f9b072cfe24daf3661994bd3a83825bee4069927 Mon Sep 17 00:00:00 2001 From: Kannan J Date: Sat, 3 Aug 2024 01:05:44 +0530 Subject: [PATCH 29/53] Netty upgrade to 4.1.110 in grpc-java (#11273) * Bump Netty to 4.1.110.Final. --- MODULE.bazel | 30 ++++++++-------- README.md | 36 +++++++++---------- SECURITY.md | 3 +- build.gradle | 2 +- .../golden/TestDeprecatedService.java.txt | 2 +- compiler/src/test/golden/TestService.java.txt | 2 +- .../main/java/io/grpc/internal/GrpcUtil.java | 2 +- examples/android/clientcache/app/build.gradle | 10 +++--- examples/android/helloworld/app/build.gradle | 8 ++--- examples/android/routeguide/app/build.gradle | 8 ++--- examples/android/strictmode/app/build.gradle | 8 ++--- examples/build.gradle | 2 +- examples/example-alts/build.gradle | 2 +- examples/example-debug/build.gradle | 2 +- examples/example-debug/pom.xml | 4 +-- examples/example-gauth/build.gradle | 2 +- examples/example-gauth/pom.xml | 4 +-- .../build.gradle | 2 +- .../example-gcp-observability/build.gradle | 2 +- examples/example-hostname/build.gradle | 2 +- examples/example-hostname/pom.xml | 4 +-- examples/example-jwt-auth/build.gradle | 2 +- examples/example-jwt-auth/pom.xml | 4 +-- examples/example-oauth/build.gradle | 2 +- examples/example-oauth/pom.xml | 4 +-- examples/example-opentelemetry/build.gradle | 2 +- examples/example-orca/build.gradle | 2 +- examples/example-reflection/build.gradle | 2 +- examples/example-servlet/build.gradle | 2 +- examples/example-tls/build.gradle | 2 +- examples/example-tls/pom.xml | 4 +-- examples/example-xds/build.gradle | 2 +- examples/pom.xml | 4 +-- gradle/libs.versions.toml | 5 +-- .../io/grpc/netty/shaded/ShadingTest.java | 2 +- repositories.bzl | 28 +++++++-------- 36 files changed, 103 insertions(+), 101 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 2b5d85490f3..8260788c5cb 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -2,7 +2,7 @@ module( name = "grpc-java", compatibility_level = 0, repo_name = "io_grpc_grpc_java", - version = "1.67.0-SNAPSHOT", # CURRENT_GRPC_VERSION + version = "1.66.0-SNAPSHOT", # CURRENT_GRPC_VERSION ) # GRPC_DEPS_START @@ -22,20 +22,20 @@ IO_GRPC_GRPC_JAVA_ARTIFACTS = [ "com.google.truth:truth:1.4.2", "com.squareup.okhttp:okhttp:2.7.5", "com.squareup.okio:okio:2.10.0", # 3.0+ needs swapping to -jvm; need work to avoid flag-day - "io.netty:netty-buffer:4.1.100.Final", - "io.netty:netty-codec-http2:4.1.100.Final", - "io.netty:netty-codec-http:4.1.100.Final", - "io.netty:netty-codec-socks:4.1.100.Final", - "io.netty:netty-codec:4.1.100.Final", - "io.netty:netty-common:4.1.100.Final", - "io.netty:netty-handler-proxy:4.1.100.Final", - "io.netty:netty-handler:4.1.100.Final", - "io.netty:netty-resolver:4.1.100.Final", - "io.netty:netty-tcnative-boringssl-static:2.0.61.Final", - "io.netty:netty-tcnative-classes:2.0.61.Final", - "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.100.Final", - "io.netty:netty-transport-native-unix-common:4.1.100.Final", - "io.netty:netty-transport:4.1.100.Final", + "io.netty:netty-buffer:4.1.110.Final", + "io.netty:netty-codec-http2:4.1.110.Final", + "io.netty:netty-codec-http:4.1.110.Final", + "io.netty:netty-codec-socks:4.1.110.Final", + "io.netty:netty-codec:4.1.110.Final", + "io.netty:netty-common:4.1.110.Final", + "io.netty:netty-handler-proxy:4.1.110.Final", + "io.netty:netty-handler:4.1.110.Final", + "io.netty:netty-resolver:4.1.110.Final", + "io.netty:netty-tcnative-boringssl-static:2.0.65.Final", + "io.netty:netty-tcnative-classes:2.0.65.Final", + "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.110.Final", + "io.netty:netty-transport-native-unix-common:4.1.110.Final", + "io.netty:netty-transport:4.1.110.Final", "io.opencensus:opencensus-api:0.31.0", "io.opencensus:opencensus-contrib-grpc-metrics:0.31.0", "io.perfmark:perfmark-api:0.27.0", diff --git a/README.md b/README.md index fef37c1c3bb..cb38ad66394 100644 --- a/README.md +++ b/README.md @@ -44,8 +44,8 @@ For a guided tour, take a look at the [quick start guide](https://grpc.io/docs/languages/java/quickstart) or the more explanatory [gRPC basics](https://grpc.io/docs/languages/java/basics). -The [examples](https://github.com/grpc/grpc-java/tree/v1.65.0/examples) and the -[Android example](https://github.com/grpc/grpc-java/tree/v1.65.0/examples/android) +The [examples](https://github.com/grpc/grpc-java/tree/v1.66.0/examples) and the +[Android example](https://github.com/grpc/grpc-java/tree/v1.66.0/examples/android) are standalone projects that showcase the usage of gRPC. Download @@ -56,18 +56,18 @@ Download [the JARs][]. Or for Maven with non-Android, add to your `pom.xml`: io.grpc grpc-netty-shaded - 1.65.0 + 1.66.0 runtime io.grpc grpc-protobuf - 1.65.0 + 1.66.0 io.grpc grpc-stub - 1.65.0 + 1.66.0 org.apache.tomcat @@ -79,18 +79,18 @@ Download [the JARs][]. Or for Maven with non-Android, add to your `pom.xml`: Or for Gradle with non-Android, add to your dependencies: ```gradle -runtimeOnly 'io.grpc:grpc-netty-shaded:1.65.0' -implementation 'io.grpc:grpc-protobuf:1.65.0' -implementation 'io.grpc:grpc-stub:1.65.0' +runtimeOnly 'io.grpc:grpc-netty-shaded:1.66.0' +implementation 'io.grpc:grpc-protobuf:1.66.0' +implementation 'io.grpc:grpc-stub:1.66.0' compileOnly 'org.apache.tomcat:annotations-api:6.0.53' // necessary for Java 9+ ``` For Android client, use `grpc-okhttp` instead of `grpc-netty-shaded` and `grpc-protobuf-lite` instead of `grpc-protobuf`: ```gradle -implementation 'io.grpc:grpc-okhttp:1.65.0' -implementation 'io.grpc:grpc-protobuf-lite:1.65.0' -implementation 'io.grpc:grpc-stub:1.65.0' +implementation 'io.grpc:grpc-okhttp:1.66.0' +implementation 'io.grpc:grpc-protobuf-lite:1.66.0' +implementation 'io.grpc:grpc-stub:1.66.0' compileOnly 'org.apache.tomcat:annotations-api:6.0.53' // necessary for Java 9+ ``` @@ -99,7 +99,7 @@ For [Bazel](https://bazel.build), you can either (with the GAVs from above), or use `@io_grpc_grpc_java//api` et al (see below). [the JARs]: -https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.65.0 +https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.66.0 Development snapshots are available in [Sonatypes's snapshot repository](https://oss.sonatype.org/content/repositories/snapshots/). @@ -129,9 +129,9 @@ For protobuf-based codegen integrated with the Maven build system, you can use protobuf-maven-plugin 0.6.1 - com.google.protobuf:protoc:3.25.1:exe:${os.detected.classifier} + com.google.protobuf:protoc:3.25.3:exe:${os.detected.classifier} grpc-java - io.grpc:protoc-gen-grpc-java:1.65.0:exe:${os.detected.classifier} + io.grpc:protoc-gen-grpc-java:1.66.0:exe:${os.detected.classifier} @@ -157,11 +157,11 @@ plugins { protobuf { protoc { - artifact = "com.google.protobuf:protoc:3.25.1" + artifact = "com.google.protobuf:protoc:3.25.3" } plugins { grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.65.0' + artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0' } } generateProtoTasks { @@ -190,11 +190,11 @@ plugins { protobuf { protoc { - artifact = "com.google.protobuf:protoc:3.25.1" + artifact = "com.google.protobuf:protoc:3.25.3" } plugins { grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.65.0' + artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0' } } generateProtoTasks { diff --git a/SECURITY.md b/SECURITY.md index 774579bf68b..47b54f1ef47 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -398,7 +398,8 @@ grpc-netty version | netty-handler version | netty-tcnative-boringssl-static ver 1.56.x | 4.1.87.Final | 2.0.61.Final 1.57.x-1.58.x | 4.1.93.Final | 2.0.61.Final 1.59.x | 4.1.97.Final | 2.0.61.Final -1.60.x- | 4.1.100.Final | 2.0.61.Final +1.60.x-1.65.x | 4.1.100.Final | 2.0.61.Final +1.66.x- | 4.1.110.Final | 2.0.65.Final _(grpc-netty-shaded avoids issues with keeping these versions in sync.)_ diff --git a/build.gradle b/build.gradle index 74cfacb800a..76449ec0107 100644 --- a/build.gradle +++ b/build.gradle @@ -21,7 +21,7 @@ subprojects { apply plugin: "net.ltgt.errorprone" group = "io.grpc" - version = "1.67.0-SNAPSHOT" // CURRENT_GRPC_VERSION + version = "1.66.0-SNAPSHOT" // CURRENT_GRPC_VERSION repositories { maven { // The google mirror is less flaky than mavenCentral() diff --git a/compiler/src/test/golden/TestDeprecatedService.java.txt b/compiler/src/test/golden/TestDeprecatedService.java.txt index 75e9e0b47e0..5666abe8fda 100644 --- a/compiler/src/test/golden/TestDeprecatedService.java.txt +++ b/compiler/src/test/golden/TestDeprecatedService.java.txt @@ -8,7 +8,7 @@ import static io.grpc.MethodDescriptor.generateFullMethodName; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.67.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.66.0-SNAPSHOT)", comments = "Source: grpc/testing/compiler/test.proto") @io.grpc.stub.annotations.GrpcGenerated @java.lang.Deprecated diff --git a/compiler/src/test/golden/TestService.java.txt b/compiler/src/test/golden/TestService.java.txt index 3852b6ee547..52e2a772414 100644 --- a/compiler/src/test/golden/TestService.java.txt +++ b/compiler/src/test/golden/TestService.java.txt @@ -8,7 +8,7 @@ import static io.grpc.MethodDescriptor.generateFullMethodName; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.67.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.66.0-SNAPSHOT)", comments = "Source: grpc/testing/compiler/test.proto") @io.grpc.stub.annotations.GrpcGenerated public final class TestServiceGrpc { diff --git a/core/src/main/java/io/grpc/internal/GrpcUtil.java b/core/src/main/java/io/grpc/internal/GrpcUtil.java index 593bdbce13f..fa488f30ef8 100644 --- a/core/src/main/java/io/grpc/internal/GrpcUtil.java +++ b/core/src/main/java/io/grpc/internal/GrpcUtil.java @@ -219,7 +219,7 @@ public byte[] parseAsciiString(byte[] serialized) { public static final Splitter ACCEPT_ENCODING_SPLITTER = Splitter.on(',').trimResults(); - public static final String IMPLEMENTATION_VERSION = "1.67.0-SNAPSHOT"; // CURRENT_GRPC_VERSION + public static final String IMPLEMENTATION_VERSION = "1.66.0-SNAPSHOT"; // CURRENT_GRPC_VERSION /** * The default timeout in nanos for a keepalive ping request. diff --git a/examples/android/clientcache/app/build.gradle b/examples/android/clientcache/app/build.gradle index 0ca032fb0e4..64e95de4738 100644 --- a/examples/android/clientcache/app/build.gradle +++ b/examples/android/clientcache/app/build.gradle @@ -34,7 +34,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -54,12 +54,12 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' testImplementation 'junit:junit:4.13.2' testImplementation 'com.google.truth:truth:1.1.5' - testImplementation 'io.grpc:grpc-testing:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + testImplementation 'io.grpc:grpc-testing:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION } diff --git a/examples/android/helloworld/app/build.gradle b/examples/android/helloworld/app/build.gradle index 0f1e8b4047b..f9433f14010 100644 --- a/examples/android/helloworld/app/build.gradle +++ b/examples/android/helloworld/app/build.gradle @@ -32,7 +32,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -52,8 +52,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/android/routeguide/app/build.gradle b/examples/android/routeguide/app/build.gradle index c33135233ea..2431b473f29 100644 --- a/examples/android/routeguide/app/build.gradle +++ b/examples/android/routeguide/app/build.gradle @@ -32,7 +32,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -52,8 +52,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/android/strictmode/app/build.gradle b/examples/android/strictmode/app/build.gradle index e8e2e8cac29..699c8dd9d68 100644 --- a/examples/android/strictmode/app/build.gradle +++ b/examples/android/strictmode/app/build.gradle @@ -33,7 +33,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -53,8 +53,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/build.gradle b/examples/build.gradle index 076e0c4a25b..c9213cc6a21 100644 --- a/examples/build.gradle +++ b/examples/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' def protocVersion = protobufVersion diff --git a/examples/example-alts/build.gradle b/examples/example-alts/build.gradle index 3c998586bb6..06b7ac501d0 100644 --- a/examples/example-alts/build.gradle +++ b/examples/example-alts/build.gradle @@ -24,7 +24,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-debug/build.gradle b/examples/example-debug/build.gradle index ca151a13c1a..624483f663e 100644 --- a/examples/example-debug/build.gradle +++ b/examples/example-debug/build.gradle @@ -25,7 +25,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' dependencies { diff --git a/examples/example-debug/pom.xml b/examples/example-debug/pom.xml index 10ccf834d86..5aa8065ad31 100644 --- a/examples/example-debug/pom.xml +++ b/examples/example-debug/pom.xml @@ -6,13 +6,13 @@ jar - 1.67.0-SNAPSHOT + 1.66.0-SNAPSHOT example-debug https://github.com/grpc/grpc-java UTF-8 - 1.67.0-SNAPSHOT + 1.66.0-SNAPSHOT 3.25.3 1.8 diff --git a/examples/example-gauth/build.gradle b/examples/example-gauth/build.gradle index 40e72afad82..c43443c3860 100644 --- a/examples/example-gauth/build.gradle +++ b/examples/example-gauth/build.gradle @@ -24,7 +24,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' def protocVersion = protobufVersion diff --git a/examples/example-gauth/pom.xml b/examples/example-gauth/pom.xml index 1e58e21e975..d91eeb15ded 100644 --- a/examples/example-gauth/pom.xml +++ b/examples/example-gauth/pom.xml @@ -6,13 +6,13 @@ jar - 1.67.0-SNAPSHOT + 1.66.0-SNAPSHOT example-gauth https://github.com/grpc/grpc-java UTF-8 - 1.67.0-SNAPSHOT + 1.66.0-SNAPSHOT 3.25.3 1.8 diff --git a/examples/example-gcp-csm-observability/build.gradle b/examples/example-gcp-csm-observability/build.gradle index 5de2b1995e2..a24490918b5 100644 --- a/examples/example-gcp-csm-observability/build.gradle +++ b/examples/example-gcp-csm-observability/build.gradle @@ -25,7 +25,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' def openTelemetryVersion = '1.40.0' def openTelemetryPrometheusVersion = '1.40.0-alpha' diff --git a/examples/example-gcp-observability/build.gradle b/examples/example-gcp-observability/build.gradle index 0462c987f52..d6dd1aedc6e 100644 --- a/examples/example-gcp-observability/build.gradle +++ b/examples/example-gcp-observability/build.gradle @@ -25,7 +25,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-hostname/build.gradle b/examples/example-hostname/build.gradle index ab45ee2dc5b..ee5e5cf5c70 100644 --- a/examples/example-hostname/build.gradle +++ b/examples/example-hostname/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' dependencies { diff --git a/examples/example-hostname/pom.xml b/examples/example-hostname/pom.xml index 19b5f8b3c20..05131b89978 100644 --- a/examples/example-hostname/pom.xml +++ b/examples/example-hostname/pom.xml @@ -6,13 +6,13 @@ jar - 1.67.0-SNAPSHOT + 1.66.0-SNAPSHOT example-hostname https://github.com/grpc/grpc-java UTF-8 - 1.67.0-SNAPSHOT + 1.66.0-SNAPSHOT 3.25.3 1.8 diff --git a/examples/example-jwt-auth/build.gradle b/examples/example-jwt-auth/build.gradle index 6fdd4498c7d..2ad3c91f190 100644 --- a/examples/example-jwt-auth/build.gradle +++ b/examples/example-jwt-auth/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' def protocVersion = protobufVersion diff --git a/examples/example-jwt-auth/pom.xml b/examples/example-jwt-auth/pom.xml index ad530e33aa7..01cf0edce28 100644 --- a/examples/example-jwt-auth/pom.xml +++ b/examples/example-jwt-auth/pom.xml @@ -7,13 +7,13 @@ jar - 1.67.0-SNAPSHOT + 1.66.0-SNAPSHOT example-jwt-auth https://github.com/grpc/grpc-java UTF-8 - 1.67.0-SNAPSHOT + 1.66.0-SNAPSHOT 3.25.3 3.25.3 diff --git a/examples/example-oauth/build.gradle b/examples/example-oauth/build.gradle index 255633b4f9f..23a6633e264 100644 --- a/examples/example-oauth/build.gradle +++ b/examples/example-oauth/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' def protocVersion = protobufVersion diff --git a/examples/example-oauth/pom.xml b/examples/example-oauth/pom.xml index 2c38a05b3e4..afd45aecd39 100644 --- a/examples/example-oauth/pom.xml +++ b/examples/example-oauth/pom.xml @@ -7,13 +7,13 @@ jar - 1.67.0-SNAPSHOT + 1.66.0-SNAPSHOT example-oauth https://github.com/grpc/grpc-java UTF-8 - 1.67.0-SNAPSHOT + 1.66.0-SNAPSHOT 3.25.3 3.25.3 diff --git a/examples/example-opentelemetry/build.gradle b/examples/example-opentelemetry/build.gradle index 00f7dc101bf..55d6685d771 100644 --- a/examples/example-opentelemetry/build.gradle +++ b/examples/example-opentelemetry/build.gradle @@ -24,7 +24,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' def openTelemetryVersion = '1.40.0' def openTelemetryPrometheusVersion = '1.40.0-alpha' diff --git a/examples/example-orca/build.gradle b/examples/example-orca/build.gradle index 22feb8cae42..f3eae10ace4 100644 --- a/examples/example-orca/build.gradle +++ b/examples/example-orca/build.gradle @@ -18,7 +18,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-reflection/build.gradle b/examples/example-reflection/build.gradle index 78821391911..0b5c99898ed 100644 --- a/examples/example-reflection/build.gradle +++ b/examples/example-reflection/build.gradle @@ -18,7 +18,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-servlet/build.gradle b/examples/example-servlet/build.gradle index 9542ba0277f..b73d21fbc4c 100644 --- a/examples/example-servlet/build.gradle +++ b/examples/example-servlet/build.gradle @@ -16,7 +16,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-tls/build.gradle b/examples/example-tls/build.gradle index 94257af4758..3791cc03271 100644 --- a/examples/example-tls/build.gradle +++ b/examples/example-tls/build.gradle @@ -24,7 +24,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-tls/pom.xml b/examples/example-tls/pom.xml index bc9c0a7a8ee..1263b347030 100644 --- a/examples/example-tls/pom.xml +++ b/examples/example-tls/pom.xml @@ -6,13 +6,13 @@ jar - 1.67.0-SNAPSHOT + 1.66.0-SNAPSHOT example-tls https://github.com/grpc/grpc-java UTF-8 - 1.67.0-SNAPSHOT + 1.66.0-SNAPSHOT 3.25.3 1.8 diff --git a/examples/example-xds/build.gradle b/examples/example-xds/build.gradle index 2554adb0033..9807b1f8b74 100644 --- a/examples/example-xds/build.gradle +++ b/examples/example-xds/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/pom.xml b/examples/pom.xml index 2b25d13b50c..a71e9d449c3 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -6,13 +6,13 @@ jar - 1.67.0-SNAPSHOT + 1.66.0-SNAPSHOT examples https://github.com/grpc/grpc-java UTF-8 - 1.67.0-SNAPSHOT + 1.66.0-SNAPSHOT 3.25.3 3.25.3 diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 78550e9c95e..4eee9a6018e 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -1,8 +1,9 @@ [versions] -netty = '4.1.100.Final' +googleauth = "1.22.0" +netty = '4.1.110.Final' # Keep the following references of tcnative version in sync whenever it's updated: # SECURITY.md -nettytcnative = '2.0.61.Final' +nettytcnative = '2.0.65.Final' opencensus = "0.31.1" # Not upgrading to 4.x as it is not yet ABI compatible. # https://github.com/protocolbuffers/protobuf/issues/17247 diff --git a/netty/shaded/src/testShadow/java/io/grpc/netty/shaded/ShadingTest.java b/netty/shaded/src/testShadow/java/io/grpc/netty/shaded/ShadingTest.java index 7a5e4b43c8b..89803998925 100644 --- a/netty/shaded/src/testShadow/java/io/grpc/netty/shaded/ShadingTest.java +++ b/netty/shaded/src/testShadow/java/io/grpc/netty/shaded/ShadingTest.java @@ -79,7 +79,7 @@ public void nettyResourcesUpdated() throws IOException { InputStream inputStream = NettyChannelBuilder.class.getClassLoader() .getResourceAsStream( "META-INF/native-image/io.grpc.netty.shaded.io.netty/netty-transport/" - + "reflection-config.json"); + + "reflect-config.json"); assertThat(inputStream).isNotNull(); Scanner s = new Scanner(inputStream, StandardCharsets.UTF_8.name()).useDelimiter("\\A"); diff --git a/repositories.bzl b/repositories.bzl index af3acc8ddcf..455e9dcf3ca 100644 --- a/repositories.bzl +++ b/repositories.bzl @@ -26,20 +26,20 @@ IO_GRPC_GRPC_JAVA_ARTIFACTS = [ "com.google.truth:truth:1.4.2", "com.squareup.okhttp:okhttp:2.7.5", "com.squareup.okio:okio:2.10.0", # 3.0+ needs swapping to -jvm; need work to avoid flag-day - "io.netty:netty-buffer:4.1.100.Final", - "io.netty:netty-codec-http2:4.1.100.Final", - "io.netty:netty-codec-http:4.1.100.Final", - "io.netty:netty-codec-socks:4.1.100.Final", - "io.netty:netty-codec:4.1.100.Final", - "io.netty:netty-common:4.1.100.Final", - "io.netty:netty-handler-proxy:4.1.100.Final", - "io.netty:netty-handler:4.1.100.Final", - "io.netty:netty-resolver:4.1.100.Final", - "io.netty:netty-tcnative-boringssl-static:2.0.61.Final", - "io.netty:netty-tcnative-classes:2.0.61.Final", - "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.100.Final", - "io.netty:netty-transport-native-unix-common:4.1.100.Final", - "io.netty:netty-transport:4.1.100.Final", + "io.netty:netty-buffer:4.1.110.Final", + "io.netty:netty-codec-http2:4.1.110.Final", + "io.netty:netty-codec-http:4.1.110.Final", + "io.netty:netty-codec-socks:4.1.110.Final", + "io.netty:netty-codec:4.1.110.Final", + "io.netty:netty-common:4.1.110.Final", + "io.netty:netty-handler-proxy:4.1.110.Final", + "io.netty:netty-handler:4.1.110.Final", + "io.netty:netty-resolver:4.1.110.Final", + "io.netty:netty-tcnative-boringssl-static:2.0.65.Final", + "io.netty:netty-tcnative-classes:2.0.65.Final", + "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.110.Final", + "io.netty:netty-transport-native-unix-common:4.1.110.Final", + "io.netty:netty-transport:4.1.110.Final", "io.opencensus:opencensus-api:0.31.0", "io.opencensus:opencensus-contrib-grpc-metrics:0.31.0", "io.perfmark:perfmark-api:0.27.0", From 15456f8f0afbd884bf930c2c71d6beef8bb5b99f Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Fri, 28 Jun 2024 23:24:11 -0700 Subject: [PATCH 30/53] core: In PF, pass around SubchannelData instead of Subchannel Each usage of the subchannel immediately looked up the SubchannelData. --- .../internal/PickFirstLeafLoadBalancer.java | 54 +++++++++---------- 1 file changed, 24 insertions(+), 30 deletions(-) diff --git a/core/src/main/java/io/grpc/internal/PickFirstLeafLoadBalancer.java b/core/src/main/java/io/grpc/internal/PickFirstLeafLoadBalancer.java index c3f9a52404e..253422d3dbd 100644 --- a/core/src/main/java/io/grpc/internal/PickFirstLeafLoadBalancer.java +++ b/core/src/main/java/io/grpc/internal/PickFirstLeafLoadBalancer.java @@ -214,14 +214,13 @@ public void handleNameResolutionError(Status error) { updateBalancingState(TRANSIENT_FAILURE, new Picker(PickResult.withError(error))); } - void processSubchannelState(Subchannel subchannel, ConnectivityStateInfo stateInfo) { + void processSubchannelState(SubchannelData subchannelData, ConnectivityStateInfo stateInfo) { ConnectivityState newState = stateInfo.getState(); - SubchannelData subchannelData = subchannels.get(getAddress(subchannel)); // Shutdown channels/previously relevant subchannels can still callback with state updates. // To prevent pickers from returning these obsolete subchannels, this logic // is included to check if the current list of active subchannels includes this subchannel. - if (subchannelData == null || subchannelData.getSubchannel() != subchannel) { + if (subchannelData != subchannels.get(getAddress(subchannelData.subchannel))) { return; } @@ -269,7 +268,7 @@ void processSubchannelState(Subchannel subchannel, ConnectivityStateInfo stateIn case READY: shutdownRemaining(subchannelData); - addressIndex.seekTo(getAddress(subchannel)); + addressIndex.seekTo(getAddress(subchannelData.subchannel)); rawConnectivityState = READY; updateHealthCheckedState(subchannelData); break; @@ -277,7 +276,7 @@ void processSubchannelState(Subchannel subchannel, ConnectivityStateInfo stateIn case TRANSIENT_FAILURE: // If we are looking at current channel, request a connection if possible if (addressIndex.isValid() - && subchannels.get(addressIndex.getCurrentAddress()).getSubchannel() == subchannel) { + && subchannels.get(addressIndex.getCurrentAddress()) == subchannelData) { if (addressIndex.increment()) { cancelScheduleTask(); requestConnection(); // is recursive so might hit the end of the addresses @@ -317,7 +316,7 @@ private void updateHealthCheckedState(SubchannelData subchannelData) { new FixedResultPicker(PickResult.withSubchannel(subchannelData.subchannel))); } else if (subchannelData.getHealthState() == TRANSIENT_FAILURE) { updateBalancingState(TRANSIENT_FAILURE, new Picker(PickResult.withError( - subchannelData.healthListener.healthStateInfo.getStatus()))); + subchannelData.healthStateInfo.getStatus()))); } else if (concludedState != TRANSIENT_FAILURE) { updateBalancingState(subchannelData.getHealthState(), new Picker(PickResult.withNoResult())); @@ -377,25 +376,24 @@ public void requestConnection() { return; } - Subchannel subchannel; - SocketAddress currentAddress; - currentAddress = addressIndex.getCurrentAddress(); - subchannel = subchannels.containsKey(currentAddress) - ? subchannels.get(currentAddress).getSubchannel() - : createNewSubchannel(currentAddress, addressIndex.getCurrentEagAttributes()); + SocketAddress currentAddress = addressIndex.getCurrentAddress(); + SubchannelData subchannelData = subchannels.get(currentAddress); + if (subchannelData == null) { + subchannelData = createNewSubchannel(currentAddress, addressIndex.getCurrentEagAttributes()); + } - ConnectivityState subchannelState = subchannels.get(currentAddress).getState(); + ConnectivityState subchannelState = subchannelData.getState(); switch (subchannelState) { case IDLE: - subchannel.requestConnection(); - subchannels.get(currentAddress).updateState(CONNECTING); + subchannelData.subchannel.requestConnection(); + subchannelData.updateState(CONNECTING); scheduleNextConnection(); break; case CONNECTING: if (enableHappyEyeballs) { scheduleNextConnection(); } else { - subchannel.requestConnection(); + subchannelData.subchannel.requestConnection(); } break; case TRANSIENT_FAILURE: @@ -455,7 +453,7 @@ private void cancelScheduleTask() { } } - private Subchannel createNewSubchannel(SocketAddress addr, Attributes attrs) { + private SubchannelData createNewSubchannel(SocketAddress addr, Attributes attrs) { HealthListener hcListener = new HealthListener(); final Subchannel subchannel = helper.createSubchannel( CreateSubchannelArgs.newBuilder() @@ -467,15 +465,15 @@ private Subchannel createNewSubchannel(SocketAddress addr, Attributes attrs) { log.warning("Was not able to create subchannel for " + addr); throw new IllegalStateException("Can't create subchannel"); } - SubchannelData subchannelData = new SubchannelData(subchannel, IDLE, hcListener); + SubchannelData subchannelData = new SubchannelData(subchannel, IDLE); hcListener.subchannelData = subchannelData; subchannels.put(addr, subchannelData); Attributes scAttrs = subchannel.getAttributes(); if (scAttrs.get(LoadBalancer.HAS_HEALTH_PRODUCER_LISTENER_KEY) == null) { - hcListener.healthStateInfo = ConnectivityStateInfo.forNonError(READY); + subchannelData.healthStateInfo = ConnectivityStateInfo.forNonError(READY); } - subchannel.start(stateInfo -> processSubchannelState(subchannel, stateInfo)); - return subchannel; + subchannel.start(stateInfo -> processSubchannelState(subchannelData, stateInfo)); + return subchannelData; } private boolean isPassComplete() { @@ -492,17 +490,15 @@ private boolean isPassComplete() { } private final class HealthListener implements SubchannelStateListener { - private ConnectivityStateInfo healthStateInfo = ConnectivityStateInfo.forNonError(IDLE); private SubchannelData subchannelData; @Override public void onSubchannelState(ConnectivityStateInfo newState) { log.log(Level.FINE, "Received health status {0} for subchannel {1}", new Object[]{newState, subchannelData.subchannel}); - healthStateInfo = newState; + subchannelData.healthStateInfo = newState; try { - SubchannelData curSubChanData = subchannels.get(addressIndex.getCurrentAddress()); - if (curSubChanData != null && curSubChanData.healthListener == this) { + if (subchannelData == subchannels.get(addressIndex.getCurrentAddress())) { updateHealthCheckedState(subchannelData); } } catch (IllegalStateException e) { @@ -663,14 +659,12 @@ public int size() { private static final class SubchannelData { private final Subchannel subchannel; private ConnectivityState state; - private final HealthListener healthListener; private boolean completedConnectivityAttempt = false; + private ConnectivityStateInfo healthStateInfo = ConnectivityStateInfo.forNonError(IDLE); - public SubchannelData(Subchannel subchannel, ConnectivityState state, - HealthListener subchannelHealthListener) { + public SubchannelData(Subchannel subchannel, ConnectivityState state) { this.subchannel = subchannel; this.state = state; - this.healthListener = subchannelHealthListener; } public Subchannel getSubchannel() { @@ -695,7 +689,7 @@ private void updateState(ConnectivityState newState) { } private ConnectivityState getHealthState() { - return healthListener.healthStateInfo.getState(); + return healthStateInfo.getState(); } } From 9bed655c568b4f09a32b3910745a949f5f08d956 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Fri, 2 Aug 2024 14:46:14 -0700 Subject: [PATCH 31/53] Revert "Netty upgrade to 4.1.110 in grpc-java (#11273)" This reverts commit f9b072cfe24daf3661994bd3a83825bee4069927. Changes from the release process got mixed in with the commit. --- MODULE.bazel | 30 ++++++++-------- README.md | 36 +++++++++---------- SECURITY.md | 3 +- build.gradle | 2 +- .../golden/TestDeprecatedService.java.txt | 2 +- compiler/src/test/golden/TestService.java.txt | 2 +- .../main/java/io/grpc/internal/GrpcUtil.java | 2 +- examples/android/clientcache/app/build.gradle | 10 +++--- examples/android/helloworld/app/build.gradle | 8 ++--- examples/android/routeguide/app/build.gradle | 8 ++--- examples/android/strictmode/app/build.gradle | 8 ++--- examples/build.gradle | 2 +- examples/example-alts/build.gradle | 2 +- examples/example-debug/build.gradle | 2 +- examples/example-debug/pom.xml | 4 +-- examples/example-gauth/build.gradle | 2 +- examples/example-gauth/pom.xml | 4 +-- .../build.gradle | 2 +- .../example-gcp-observability/build.gradle | 2 +- examples/example-hostname/build.gradle | 2 +- examples/example-hostname/pom.xml | 4 +-- examples/example-jwt-auth/build.gradle | 2 +- examples/example-jwt-auth/pom.xml | 4 +-- examples/example-oauth/build.gradle | 2 +- examples/example-oauth/pom.xml | 4 +-- examples/example-opentelemetry/build.gradle | 2 +- examples/example-orca/build.gradle | 2 +- examples/example-reflection/build.gradle | 2 +- examples/example-servlet/build.gradle | 2 +- examples/example-tls/build.gradle | 2 +- examples/example-tls/pom.xml | 4 +-- examples/example-xds/build.gradle | 2 +- examples/pom.xml | 4 +-- gradle/libs.versions.toml | 5 ++- .../io/grpc/netty/shaded/ShadingTest.java | 2 +- repositories.bzl | 28 +++++++-------- 36 files changed, 101 insertions(+), 103 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 8260788c5cb..2b5d85490f3 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -2,7 +2,7 @@ module( name = "grpc-java", compatibility_level = 0, repo_name = "io_grpc_grpc_java", - version = "1.66.0-SNAPSHOT", # CURRENT_GRPC_VERSION + version = "1.67.0-SNAPSHOT", # CURRENT_GRPC_VERSION ) # GRPC_DEPS_START @@ -22,20 +22,20 @@ IO_GRPC_GRPC_JAVA_ARTIFACTS = [ "com.google.truth:truth:1.4.2", "com.squareup.okhttp:okhttp:2.7.5", "com.squareup.okio:okio:2.10.0", # 3.0+ needs swapping to -jvm; need work to avoid flag-day - "io.netty:netty-buffer:4.1.110.Final", - "io.netty:netty-codec-http2:4.1.110.Final", - "io.netty:netty-codec-http:4.1.110.Final", - "io.netty:netty-codec-socks:4.1.110.Final", - "io.netty:netty-codec:4.1.110.Final", - "io.netty:netty-common:4.1.110.Final", - "io.netty:netty-handler-proxy:4.1.110.Final", - "io.netty:netty-handler:4.1.110.Final", - "io.netty:netty-resolver:4.1.110.Final", - "io.netty:netty-tcnative-boringssl-static:2.0.65.Final", - "io.netty:netty-tcnative-classes:2.0.65.Final", - "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.110.Final", - "io.netty:netty-transport-native-unix-common:4.1.110.Final", - "io.netty:netty-transport:4.1.110.Final", + "io.netty:netty-buffer:4.1.100.Final", + "io.netty:netty-codec-http2:4.1.100.Final", + "io.netty:netty-codec-http:4.1.100.Final", + "io.netty:netty-codec-socks:4.1.100.Final", + "io.netty:netty-codec:4.1.100.Final", + "io.netty:netty-common:4.1.100.Final", + "io.netty:netty-handler-proxy:4.1.100.Final", + "io.netty:netty-handler:4.1.100.Final", + "io.netty:netty-resolver:4.1.100.Final", + "io.netty:netty-tcnative-boringssl-static:2.0.61.Final", + "io.netty:netty-tcnative-classes:2.0.61.Final", + "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.100.Final", + "io.netty:netty-transport-native-unix-common:4.1.100.Final", + "io.netty:netty-transport:4.1.100.Final", "io.opencensus:opencensus-api:0.31.0", "io.opencensus:opencensus-contrib-grpc-metrics:0.31.0", "io.perfmark:perfmark-api:0.27.0", diff --git a/README.md b/README.md index cb38ad66394..fef37c1c3bb 100644 --- a/README.md +++ b/README.md @@ -44,8 +44,8 @@ For a guided tour, take a look at the [quick start guide](https://grpc.io/docs/languages/java/quickstart) or the more explanatory [gRPC basics](https://grpc.io/docs/languages/java/basics). -The [examples](https://github.com/grpc/grpc-java/tree/v1.66.0/examples) and the -[Android example](https://github.com/grpc/grpc-java/tree/v1.66.0/examples/android) +The [examples](https://github.com/grpc/grpc-java/tree/v1.65.0/examples) and the +[Android example](https://github.com/grpc/grpc-java/tree/v1.65.0/examples/android) are standalone projects that showcase the usage of gRPC. Download @@ -56,18 +56,18 @@ Download [the JARs][]. Or for Maven with non-Android, add to your `pom.xml`: io.grpc grpc-netty-shaded - 1.66.0 + 1.65.0 runtime io.grpc grpc-protobuf - 1.66.0 + 1.65.0 io.grpc grpc-stub - 1.66.0 + 1.65.0 org.apache.tomcat @@ -79,18 +79,18 @@ Download [the JARs][]. Or for Maven with non-Android, add to your `pom.xml`: Or for Gradle with non-Android, add to your dependencies: ```gradle -runtimeOnly 'io.grpc:grpc-netty-shaded:1.66.0' -implementation 'io.grpc:grpc-protobuf:1.66.0' -implementation 'io.grpc:grpc-stub:1.66.0' +runtimeOnly 'io.grpc:grpc-netty-shaded:1.65.0' +implementation 'io.grpc:grpc-protobuf:1.65.0' +implementation 'io.grpc:grpc-stub:1.65.0' compileOnly 'org.apache.tomcat:annotations-api:6.0.53' // necessary for Java 9+ ``` For Android client, use `grpc-okhttp` instead of `grpc-netty-shaded` and `grpc-protobuf-lite` instead of `grpc-protobuf`: ```gradle -implementation 'io.grpc:grpc-okhttp:1.66.0' -implementation 'io.grpc:grpc-protobuf-lite:1.66.0' -implementation 'io.grpc:grpc-stub:1.66.0' +implementation 'io.grpc:grpc-okhttp:1.65.0' +implementation 'io.grpc:grpc-protobuf-lite:1.65.0' +implementation 'io.grpc:grpc-stub:1.65.0' compileOnly 'org.apache.tomcat:annotations-api:6.0.53' // necessary for Java 9+ ``` @@ -99,7 +99,7 @@ For [Bazel](https://bazel.build), you can either (with the GAVs from above), or use `@io_grpc_grpc_java//api` et al (see below). [the JARs]: -https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.66.0 +https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.65.0 Development snapshots are available in [Sonatypes's snapshot repository](https://oss.sonatype.org/content/repositories/snapshots/). @@ -129,9 +129,9 @@ For protobuf-based codegen integrated with the Maven build system, you can use protobuf-maven-plugin 0.6.1 - com.google.protobuf:protoc:3.25.3:exe:${os.detected.classifier} + com.google.protobuf:protoc:3.25.1:exe:${os.detected.classifier} grpc-java - io.grpc:protoc-gen-grpc-java:1.66.0:exe:${os.detected.classifier} + io.grpc:protoc-gen-grpc-java:1.65.0:exe:${os.detected.classifier} @@ -157,11 +157,11 @@ plugins { protobuf { protoc { - artifact = "com.google.protobuf:protoc:3.25.3" + artifact = "com.google.protobuf:protoc:3.25.1" } plugins { grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0' + artifact = 'io.grpc:protoc-gen-grpc-java:1.65.0' } } generateProtoTasks { @@ -190,11 +190,11 @@ plugins { protobuf { protoc { - artifact = "com.google.protobuf:protoc:3.25.3" + artifact = "com.google.protobuf:protoc:3.25.1" } plugins { grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0' + artifact = 'io.grpc:protoc-gen-grpc-java:1.65.0' } } generateProtoTasks { diff --git a/SECURITY.md b/SECURITY.md index 47b54f1ef47..774579bf68b 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -398,8 +398,7 @@ grpc-netty version | netty-handler version | netty-tcnative-boringssl-static ver 1.56.x | 4.1.87.Final | 2.0.61.Final 1.57.x-1.58.x | 4.1.93.Final | 2.0.61.Final 1.59.x | 4.1.97.Final | 2.0.61.Final -1.60.x-1.65.x | 4.1.100.Final | 2.0.61.Final -1.66.x- | 4.1.110.Final | 2.0.65.Final +1.60.x- | 4.1.100.Final | 2.0.61.Final _(grpc-netty-shaded avoids issues with keeping these versions in sync.)_ diff --git a/build.gradle b/build.gradle index 76449ec0107..74cfacb800a 100644 --- a/build.gradle +++ b/build.gradle @@ -21,7 +21,7 @@ subprojects { apply plugin: "net.ltgt.errorprone" group = "io.grpc" - version = "1.66.0-SNAPSHOT" // CURRENT_GRPC_VERSION + version = "1.67.0-SNAPSHOT" // CURRENT_GRPC_VERSION repositories { maven { // The google mirror is less flaky than mavenCentral() diff --git a/compiler/src/test/golden/TestDeprecatedService.java.txt b/compiler/src/test/golden/TestDeprecatedService.java.txt index 5666abe8fda..75e9e0b47e0 100644 --- a/compiler/src/test/golden/TestDeprecatedService.java.txt +++ b/compiler/src/test/golden/TestDeprecatedService.java.txt @@ -8,7 +8,7 @@ import static io.grpc.MethodDescriptor.generateFullMethodName; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.66.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.67.0-SNAPSHOT)", comments = "Source: grpc/testing/compiler/test.proto") @io.grpc.stub.annotations.GrpcGenerated @java.lang.Deprecated diff --git a/compiler/src/test/golden/TestService.java.txt b/compiler/src/test/golden/TestService.java.txt index 52e2a772414..3852b6ee547 100644 --- a/compiler/src/test/golden/TestService.java.txt +++ b/compiler/src/test/golden/TestService.java.txt @@ -8,7 +8,7 @@ import static io.grpc.MethodDescriptor.generateFullMethodName; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.66.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.67.0-SNAPSHOT)", comments = "Source: grpc/testing/compiler/test.proto") @io.grpc.stub.annotations.GrpcGenerated public final class TestServiceGrpc { diff --git a/core/src/main/java/io/grpc/internal/GrpcUtil.java b/core/src/main/java/io/grpc/internal/GrpcUtil.java index fa488f30ef8..593bdbce13f 100644 --- a/core/src/main/java/io/grpc/internal/GrpcUtil.java +++ b/core/src/main/java/io/grpc/internal/GrpcUtil.java @@ -219,7 +219,7 @@ public byte[] parseAsciiString(byte[] serialized) { public static final Splitter ACCEPT_ENCODING_SPLITTER = Splitter.on(',').trimResults(); - public static final String IMPLEMENTATION_VERSION = "1.66.0-SNAPSHOT"; // CURRENT_GRPC_VERSION + public static final String IMPLEMENTATION_VERSION = "1.67.0-SNAPSHOT"; // CURRENT_GRPC_VERSION /** * The default timeout in nanos for a keepalive ping request. diff --git a/examples/android/clientcache/app/build.gradle b/examples/android/clientcache/app/build.gradle index 64e95de4738..0ca032fb0e4 100644 --- a/examples/android/clientcache/app/build.gradle +++ b/examples/android/clientcache/app/build.gradle @@ -34,7 +34,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -54,12 +54,12 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' testImplementation 'junit:junit:4.13.2' testImplementation 'com.google.truth:truth:1.1.5' - testImplementation 'io.grpc:grpc-testing:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + testImplementation 'io.grpc:grpc-testing:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION } diff --git a/examples/android/helloworld/app/build.gradle b/examples/android/helloworld/app/build.gradle index f9433f14010..0f1e8b4047b 100644 --- a/examples/android/helloworld/app/build.gradle +++ b/examples/android/helloworld/app/build.gradle @@ -32,7 +32,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -52,8 +52,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/android/routeguide/app/build.gradle b/examples/android/routeguide/app/build.gradle index 2431b473f29..c33135233ea 100644 --- a/examples/android/routeguide/app/build.gradle +++ b/examples/android/routeguide/app/build.gradle @@ -32,7 +32,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -52,8 +52,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/android/strictmode/app/build.gradle b/examples/android/strictmode/app/build.gradle index 699c8dd9d68..e8e2e8cac29 100644 --- a/examples/android/strictmode/app/build.gradle +++ b/examples/android/strictmode/app/build.gradle @@ -33,7 +33,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -53,8 +53,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/build.gradle b/examples/build.gradle index c9213cc6a21..076e0c4a25b 100644 --- a/examples/build.gradle +++ b/examples/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' def protocVersion = protobufVersion diff --git a/examples/example-alts/build.gradle b/examples/example-alts/build.gradle index 06b7ac501d0..3c998586bb6 100644 --- a/examples/example-alts/build.gradle +++ b/examples/example-alts/build.gradle @@ -24,7 +24,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-debug/build.gradle b/examples/example-debug/build.gradle index 624483f663e..ca151a13c1a 100644 --- a/examples/example-debug/build.gradle +++ b/examples/example-debug/build.gradle @@ -25,7 +25,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' dependencies { diff --git a/examples/example-debug/pom.xml b/examples/example-debug/pom.xml index 5aa8065ad31..10ccf834d86 100644 --- a/examples/example-debug/pom.xml +++ b/examples/example-debug/pom.xml @@ -6,13 +6,13 @@ jar - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT example-debug https://github.com/grpc/grpc-java UTF-8 - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT 3.25.3 1.8 diff --git a/examples/example-gauth/build.gradle b/examples/example-gauth/build.gradle index c43443c3860..40e72afad82 100644 --- a/examples/example-gauth/build.gradle +++ b/examples/example-gauth/build.gradle @@ -24,7 +24,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' def protocVersion = protobufVersion diff --git a/examples/example-gauth/pom.xml b/examples/example-gauth/pom.xml index d91eeb15ded..1e58e21e975 100644 --- a/examples/example-gauth/pom.xml +++ b/examples/example-gauth/pom.xml @@ -6,13 +6,13 @@ jar - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT example-gauth https://github.com/grpc/grpc-java UTF-8 - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT 3.25.3 1.8 diff --git a/examples/example-gcp-csm-observability/build.gradle b/examples/example-gcp-csm-observability/build.gradle index a24490918b5..5de2b1995e2 100644 --- a/examples/example-gcp-csm-observability/build.gradle +++ b/examples/example-gcp-csm-observability/build.gradle @@ -25,7 +25,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' def openTelemetryVersion = '1.40.0' def openTelemetryPrometheusVersion = '1.40.0-alpha' diff --git a/examples/example-gcp-observability/build.gradle b/examples/example-gcp-observability/build.gradle index d6dd1aedc6e..0462c987f52 100644 --- a/examples/example-gcp-observability/build.gradle +++ b/examples/example-gcp-observability/build.gradle @@ -25,7 +25,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-hostname/build.gradle b/examples/example-hostname/build.gradle index ee5e5cf5c70..ab45ee2dc5b 100644 --- a/examples/example-hostname/build.gradle +++ b/examples/example-hostname/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' dependencies { diff --git a/examples/example-hostname/pom.xml b/examples/example-hostname/pom.xml index 05131b89978..19b5f8b3c20 100644 --- a/examples/example-hostname/pom.xml +++ b/examples/example-hostname/pom.xml @@ -6,13 +6,13 @@ jar - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT example-hostname https://github.com/grpc/grpc-java UTF-8 - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT 3.25.3 1.8 diff --git a/examples/example-jwt-auth/build.gradle b/examples/example-jwt-auth/build.gradle index 2ad3c91f190..6fdd4498c7d 100644 --- a/examples/example-jwt-auth/build.gradle +++ b/examples/example-jwt-auth/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' def protocVersion = protobufVersion diff --git a/examples/example-jwt-auth/pom.xml b/examples/example-jwt-auth/pom.xml index 01cf0edce28..ad530e33aa7 100644 --- a/examples/example-jwt-auth/pom.xml +++ b/examples/example-jwt-auth/pom.xml @@ -7,13 +7,13 @@ jar - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT example-jwt-auth https://github.com/grpc/grpc-java UTF-8 - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT 3.25.3 3.25.3 diff --git a/examples/example-oauth/build.gradle b/examples/example-oauth/build.gradle index 23a6633e264..255633b4f9f 100644 --- a/examples/example-oauth/build.gradle +++ b/examples/example-oauth/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.3' def protocVersion = protobufVersion diff --git a/examples/example-oauth/pom.xml b/examples/example-oauth/pom.xml index afd45aecd39..2c38a05b3e4 100644 --- a/examples/example-oauth/pom.xml +++ b/examples/example-oauth/pom.xml @@ -7,13 +7,13 @@ jar - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT example-oauth https://github.com/grpc/grpc-java UTF-8 - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT 3.25.3 3.25.3 diff --git a/examples/example-opentelemetry/build.gradle b/examples/example-opentelemetry/build.gradle index 55d6685d771..00f7dc101bf 100644 --- a/examples/example-opentelemetry/build.gradle +++ b/examples/example-opentelemetry/build.gradle @@ -24,7 +24,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' def openTelemetryVersion = '1.40.0' def openTelemetryPrometheusVersion = '1.40.0-alpha' diff --git a/examples/example-orca/build.gradle b/examples/example-orca/build.gradle index f3eae10ace4..22feb8cae42 100644 --- a/examples/example-orca/build.gradle +++ b/examples/example-orca/build.gradle @@ -18,7 +18,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-reflection/build.gradle b/examples/example-reflection/build.gradle index 0b5c99898ed..78821391911 100644 --- a/examples/example-reflection/build.gradle +++ b/examples/example-reflection/build.gradle @@ -18,7 +18,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-servlet/build.gradle b/examples/example-servlet/build.gradle index b73d21fbc4c..9542ba0277f 100644 --- a/examples/example-servlet/build.gradle +++ b/examples/example-servlet/build.gradle @@ -16,7 +16,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-tls/build.gradle b/examples/example-tls/build.gradle index 3791cc03271..94257af4758 100644 --- a/examples/example-tls/build.gradle +++ b/examples/example-tls/build.gradle @@ -24,7 +24,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/example-tls/pom.xml b/examples/example-tls/pom.xml index 1263b347030..bc9c0a7a8ee 100644 --- a/examples/example-tls/pom.xml +++ b/examples/example-tls/pom.xml @@ -6,13 +6,13 @@ jar - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT example-tls https://github.com/grpc/grpc-java UTF-8 - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT 3.25.3 1.8 diff --git a/examples/example-xds/build.gradle b/examples/example-xds/build.gradle index 9807b1f8b74..2554adb0033 100644 --- a/examples/example-xds/build.gradle +++ b/examples/example-xds/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.66.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.3' dependencies { diff --git a/examples/pom.xml b/examples/pom.xml index a71e9d449c3..2b25d13b50c 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -6,13 +6,13 @@ jar - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT examples https://github.com/grpc/grpc-java UTF-8 - 1.66.0-SNAPSHOT + 1.67.0-SNAPSHOT 3.25.3 3.25.3 diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 4eee9a6018e..78550e9c95e 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -1,9 +1,8 @@ [versions] -googleauth = "1.22.0" -netty = '4.1.110.Final' +netty = '4.1.100.Final' # Keep the following references of tcnative version in sync whenever it's updated: # SECURITY.md -nettytcnative = '2.0.65.Final' +nettytcnative = '2.0.61.Final' opencensus = "0.31.1" # Not upgrading to 4.x as it is not yet ABI compatible. # https://github.com/protocolbuffers/protobuf/issues/17247 diff --git a/netty/shaded/src/testShadow/java/io/grpc/netty/shaded/ShadingTest.java b/netty/shaded/src/testShadow/java/io/grpc/netty/shaded/ShadingTest.java index 89803998925..7a5e4b43c8b 100644 --- a/netty/shaded/src/testShadow/java/io/grpc/netty/shaded/ShadingTest.java +++ b/netty/shaded/src/testShadow/java/io/grpc/netty/shaded/ShadingTest.java @@ -79,7 +79,7 @@ public void nettyResourcesUpdated() throws IOException { InputStream inputStream = NettyChannelBuilder.class.getClassLoader() .getResourceAsStream( "META-INF/native-image/io.grpc.netty.shaded.io.netty/netty-transport/" - + "reflect-config.json"); + + "reflection-config.json"); assertThat(inputStream).isNotNull(); Scanner s = new Scanner(inputStream, StandardCharsets.UTF_8.name()).useDelimiter("\\A"); diff --git a/repositories.bzl b/repositories.bzl index 455e9dcf3ca..af3acc8ddcf 100644 --- a/repositories.bzl +++ b/repositories.bzl @@ -26,20 +26,20 @@ IO_GRPC_GRPC_JAVA_ARTIFACTS = [ "com.google.truth:truth:1.4.2", "com.squareup.okhttp:okhttp:2.7.5", "com.squareup.okio:okio:2.10.0", # 3.0+ needs swapping to -jvm; need work to avoid flag-day - "io.netty:netty-buffer:4.1.110.Final", - "io.netty:netty-codec-http2:4.1.110.Final", - "io.netty:netty-codec-http:4.1.110.Final", - "io.netty:netty-codec-socks:4.1.110.Final", - "io.netty:netty-codec:4.1.110.Final", - "io.netty:netty-common:4.1.110.Final", - "io.netty:netty-handler-proxy:4.1.110.Final", - "io.netty:netty-handler:4.1.110.Final", - "io.netty:netty-resolver:4.1.110.Final", - "io.netty:netty-tcnative-boringssl-static:2.0.65.Final", - "io.netty:netty-tcnative-classes:2.0.65.Final", - "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.110.Final", - "io.netty:netty-transport-native-unix-common:4.1.110.Final", - "io.netty:netty-transport:4.1.110.Final", + "io.netty:netty-buffer:4.1.100.Final", + "io.netty:netty-codec-http2:4.1.100.Final", + "io.netty:netty-codec-http:4.1.100.Final", + "io.netty:netty-codec-socks:4.1.100.Final", + "io.netty:netty-codec:4.1.100.Final", + "io.netty:netty-common:4.1.100.Final", + "io.netty:netty-handler-proxy:4.1.100.Final", + "io.netty:netty-handler:4.1.100.Final", + "io.netty:netty-resolver:4.1.100.Final", + "io.netty:netty-tcnative-boringssl-static:2.0.61.Final", + "io.netty:netty-tcnative-classes:2.0.61.Final", + "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.100.Final", + "io.netty:netty-transport-native-unix-common:4.1.100.Final", + "io.netty:netty-transport:4.1.100.Final", "io.opencensus:opencensus-api:0.31.0", "io.opencensus:opencensus-contrib-grpc-metrics:0.31.0", "io.perfmark:perfmark-api:0.27.0", From 06135a074568e0834795314fc422c869f4fe2533 Mon Sep 17 00:00:00 2001 From: Kurt Alfred Kluever Date: Mon, 5 Aug 2024 12:49:03 -0700 Subject: [PATCH 32/53] Migrate from the deprecated `Charsets` constants (in Guava) to the `StandardCharsets` constants (in the JDK) cl/658539667 --- api/src/main/java/io/grpc/Metadata.java | 2 +- api/src/main/java/io/grpc/Status.java | 5 ++--- api/src/test/java/io/grpc/MetadataTest.java | 4 ++-- api/src/testFixtures/java/io/grpc/StringMarshaller.java | 2 +- .../io/grpc/auth/GoogleAuthLibraryCallCredentialsTest.java | 2 +- core/src/main/java/io/grpc/internal/ReadableBuffers.java | 2 +- core/src/main/java/io/grpc/internal/TransportFrameUtil.java | 2 +- .../java/io/grpc/internal/CompositeReadableBufferTest.java | 2 +- .../grpc/internal/Http2ClientStreamTransportStateTest.java | 2 +- .../test/java/io/grpc/internal/ReadableBuffersArrayTest.java | 2 +- .../java/io/grpc/internal/ReadableBuffersByteBufferTest.java | 2 +- core/src/test/java/io/grpc/internal/ReadableBuffersTest.java | 2 +- core/src/test/java/io/grpc/internal/ServerCallImplTest.java | 2 +- .../test/java/io/grpc/internal/TransportFrameUtilTest.java | 4 ++-- .../java/io/grpc/internal/AbstractTransportTest.java | 2 +- .../java/io/grpc/internal/ReadableBufferTestBase.java | 2 +- .../grpc/testing/integration/GrpclbFallbackTestClient.java | 2 +- netty/src/main/java/io/grpc/netty/GrpcHttp2HeadersUtils.java | 2 +- .../io/grpc/okhttp/ExceptionHandlingFrameWriterTest.java | 2 +- .../test/java/io/grpc/okhttp/OkHttpClientTransportTest.java | 2 +- .../java/io/grpc/okhttp/OkHttpProtocolNegotiatorTest.java | 2 +- .../test/java/io/grpc/okhttp/OkHttpServerTransportTest.java | 2 +- .../io/grpc/protobuf/services/BinaryLogProviderTest.java | 2 +- .../main/java/io/grpc/internal/testing/StatsTestUtils.java | 2 +- .../internal/rbac/engine/GrpcAuthorizationEngineTest.java | 2 +- 25 files changed, 28 insertions(+), 29 deletions(-) diff --git a/api/src/main/java/io/grpc/Metadata.java b/api/src/main/java/io/grpc/Metadata.java index 58fcefe1373..fba2659776b 100644 --- a/api/src/main/java/io/grpc/Metadata.java +++ b/api/src/main/java/io/grpc/Metadata.java @@ -16,9 +16,9 @@ package io.grpc; -import static com.google.common.base.Charsets.US_ASCII; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; +import static java.nio.charset.StandardCharsets.US_ASCII; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; diff --git a/api/src/main/java/io/grpc/Status.java b/api/src/main/java/io/grpc/Status.java index 8e7f0b835c2..5d7dd30df01 100644 --- a/api/src/main/java/io/grpc/Status.java +++ b/api/src/main/java/io/grpc/Status.java @@ -16,10 +16,10 @@ package io.grpc; -import static com.google.common.base.Charsets.US_ASCII; -import static com.google.common.base.Charsets.UTF_8; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.base.Throwables.getStackTraceAsString; +import static java.nio.charset.StandardCharsets.US_ASCII; +import static java.nio.charset.StandardCharsets.UTF_8; import com.google.common.base.MoreObjects; import com.google.common.base.Objects; @@ -34,7 +34,6 @@ import javax.annotation.Nullable; import javax.annotation.concurrent.Immutable; - /** * Defines the status of an operation by providing a standard {@link Code} in conjunction with an * optional descriptive message. Instances of {@code Status} are created by starting with the diff --git a/api/src/test/java/io/grpc/MetadataTest.java b/api/src/test/java/io/grpc/MetadataTest.java index 84cd558a36e..14ba8ca9b23 100644 --- a/api/src/test/java/io/grpc/MetadataTest.java +++ b/api/src/test/java/io/grpc/MetadataTest.java @@ -16,8 +16,8 @@ package io.grpc; -import static com.google.common.base.Charsets.US_ASCII; -import static com.google.common.base.Charsets.UTF_8; +import static java.nio.charset.StandardCharsets.US_ASCII; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; diff --git a/api/src/testFixtures/java/io/grpc/StringMarshaller.java b/api/src/testFixtures/java/io/grpc/StringMarshaller.java index af53d420e2b..e8358b76333 100644 --- a/api/src/testFixtures/java/io/grpc/StringMarshaller.java +++ b/api/src/testFixtures/java/io/grpc/StringMarshaller.java @@ -16,7 +16,7 @@ package io.grpc; -import static com.google.common.base.Charsets.UTF_8; +import static java.nio.charset.StandardCharsets.UTF_8; import com.google.common.io.ByteStreams; import java.io.ByteArrayInputStream; diff --git a/auth/src/test/java/io/grpc/auth/GoogleAuthLibraryCallCredentialsTest.java b/auth/src/test/java/io/grpc/auth/GoogleAuthLibraryCallCredentialsTest.java index 6c350894929..1e8c27bca25 100644 --- a/auth/src/test/java/io/grpc/auth/GoogleAuthLibraryCallCredentialsTest.java +++ b/auth/src/test/java/io/grpc/auth/GoogleAuthLibraryCallCredentialsTest.java @@ -16,7 +16,7 @@ package io.grpc.auth; -import static com.google.common.base.Charsets.US_ASCII; +import static java.nio.charset.StandardCharsets.US_ASCII; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; diff --git a/core/src/main/java/io/grpc/internal/ReadableBuffers.java b/core/src/main/java/io/grpc/internal/ReadableBuffers.java index c54cb0e67d0..1435be138de 100644 --- a/core/src/main/java/io/grpc/internal/ReadableBuffers.java +++ b/core/src/main/java/io/grpc/internal/ReadableBuffers.java @@ -16,7 +16,7 @@ package io.grpc.internal; -import static com.google.common.base.Charsets.UTF_8; +import static java.nio.charset.StandardCharsets.UTF_8; import com.google.common.base.Preconditions; import io.grpc.Detachable; diff --git a/core/src/main/java/io/grpc/internal/TransportFrameUtil.java b/core/src/main/java/io/grpc/internal/TransportFrameUtil.java index 51854410843..f3c32416426 100644 --- a/core/src/main/java/io/grpc/internal/TransportFrameUtil.java +++ b/core/src/main/java/io/grpc/internal/TransportFrameUtil.java @@ -16,7 +16,7 @@ package io.grpc.internal; -import static com.google.common.base.Charsets.US_ASCII; +import static java.nio.charset.StandardCharsets.US_ASCII; import com.google.common.io.BaseEncoding; import io.grpc.InternalMetadata; diff --git a/core/src/test/java/io/grpc/internal/CompositeReadableBufferTest.java b/core/src/test/java/io/grpc/internal/CompositeReadableBufferTest.java index 011d83b548a..8d9248a8910 100644 --- a/core/src/test/java/io/grpc/internal/CompositeReadableBufferTest.java +++ b/core/src/test/java/io/grpc/internal/CompositeReadableBufferTest.java @@ -16,7 +16,7 @@ package io.grpc.internal; -import static com.google.common.base.Charsets.UTF_8; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; diff --git a/core/src/test/java/io/grpc/internal/Http2ClientStreamTransportStateTest.java b/core/src/test/java/io/grpc/internal/Http2ClientStreamTransportStateTest.java index e587a7709ec..9d32bf1af7d 100644 --- a/core/src/test/java/io/grpc/internal/Http2ClientStreamTransportStateTest.java +++ b/core/src/test/java/io/grpc/internal/Http2ClientStreamTransportStateTest.java @@ -16,9 +16,9 @@ package io.grpc.internal; -import static com.google.common.base.Charsets.US_ASCII; import static io.grpc.internal.ClientStreamListener.RpcProgress.PROCESSED; import static io.grpc.internal.GrpcUtil.DEFAULT_MAX_MESSAGE_SIZE; +import static java.nio.charset.StandardCharsets.US_ASCII; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; diff --git a/core/src/test/java/io/grpc/internal/ReadableBuffersArrayTest.java b/core/src/test/java/io/grpc/internal/ReadableBuffersArrayTest.java index d5c4fa77fd8..5b0fb02c611 100644 --- a/core/src/test/java/io/grpc/internal/ReadableBuffersArrayTest.java +++ b/core/src/test/java/io/grpc/internal/ReadableBuffersArrayTest.java @@ -16,8 +16,8 @@ package io.grpc.internal; -import static com.google.common.base.Charsets.UTF_8; import static io.grpc.internal.ReadableBuffers.wrap; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; diff --git a/core/src/test/java/io/grpc/internal/ReadableBuffersByteBufferTest.java b/core/src/test/java/io/grpc/internal/ReadableBuffersByteBufferTest.java index a040182c259..67e7aaf9132 100644 --- a/core/src/test/java/io/grpc/internal/ReadableBuffersByteBufferTest.java +++ b/core/src/test/java/io/grpc/internal/ReadableBuffersByteBufferTest.java @@ -16,7 +16,7 @@ package io.grpc.internal; -import static com.google.common.base.Charsets.UTF_8; +import static java.nio.charset.StandardCharsets.UTF_8; import java.nio.ByteBuffer; diff --git a/core/src/test/java/io/grpc/internal/ReadableBuffersTest.java b/core/src/test/java/io/grpc/internal/ReadableBuffersTest.java index 2bc5a8a3760..b9135b49503 100644 --- a/core/src/test/java/io/grpc/internal/ReadableBuffersTest.java +++ b/core/src/test/java/io/grpc/internal/ReadableBuffersTest.java @@ -16,7 +16,7 @@ package io.grpc.internal; -import static com.google.common.base.Charsets.UTF_8; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; diff --git a/core/src/test/java/io/grpc/internal/ServerCallImplTest.java b/core/src/test/java/io/grpc/internal/ServerCallImplTest.java index c3c2ab15e22..652c94a4640 100644 --- a/core/src/test/java/io/grpc/internal/ServerCallImplTest.java +++ b/core/src/test/java/io/grpc/internal/ServerCallImplTest.java @@ -16,8 +16,8 @@ package io.grpc.internal; -import static com.google.common.base.Charsets.UTF_8; import static io.grpc.internal.GrpcUtil.CONTENT_LENGTH_KEY; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; diff --git a/core/src/test/java/io/grpc/internal/TransportFrameUtilTest.java b/core/src/test/java/io/grpc/internal/TransportFrameUtilTest.java index 5fa789df4f3..8b4bc170d52 100644 --- a/core/src/test/java/io/grpc/internal/TransportFrameUtilTest.java +++ b/core/src/test/java/io/grpc/internal/TransportFrameUtilTest.java @@ -16,10 +16,10 @@ package io.grpc.internal; -import static com.google.common.base.Charsets.US_ASCII; -import static com.google.common.base.Charsets.UTF_8; import static io.grpc.Metadata.ASCII_STRING_MARSHALLER; import static io.grpc.Metadata.BINARY_BYTE_MARSHALLER; +import static java.nio.charset.StandardCharsets.US_ASCII; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; diff --git a/core/src/testFixtures/java/io/grpc/internal/AbstractTransportTest.java b/core/src/testFixtures/java/io/grpc/internal/AbstractTransportTest.java index 5d7aeca684b..62cbdc4f67b 100644 --- a/core/src/testFixtures/java/io/grpc/internal/AbstractTransportTest.java +++ b/core/src/testFixtures/java/io/grpc/internal/AbstractTransportTest.java @@ -16,8 +16,8 @@ package io.grpc.internal; -import static com.google.common.base.Charsets.UTF_8; import static com.google.common.truth.Truth.assertThat; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; diff --git a/core/src/testFixtures/java/io/grpc/internal/ReadableBufferTestBase.java b/core/src/testFixtures/java/io/grpc/internal/ReadableBufferTestBase.java index 97e0df38ae7..202fb7ee8a4 100644 --- a/core/src/testFixtures/java/io/grpc/internal/ReadableBufferTestBase.java +++ b/core/src/testFixtures/java/io/grpc/internal/ReadableBufferTestBase.java @@ -16,7 +16,7 @@ package io.grpc.internal; -import static com.google.common.base.Charsets.UTF_8; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; diff --git a/interop-testing/src/main/java/io/grpc/testing/integration/GrpclbFallbackTestClient.java b/interop-testing/src/main/java/io/grpc/testing/integration/GrpclbFallbackTestClient.java index 9fc017c0e35..8ce83f73e6d 100644 --- a/interop-testing/src/main/java/io/grpc/testing/integration/GrpclbFallbackTestClient.java +++ b/interop-testing/src/main/java/io/grpc/testing/integration/GrpclbFallbackTestClient.java @@ -16,7 +16,7 @@ package io.grpc.testing.integration; -import static com.google.common.base.Charsets.UTF_8; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertEquals; import com.google.common.io.CharStreams; diff --git a/netty/src/main/java/io/grpc/netty/GrpcHttp2HeadersUtils.java b/netty/src/main/java/io/grpc/netty/GrpcHttp2HeadersUtils.java index c0d60721a1b..96c4310ae3d 100644 --- a/netty/src/main/java/io/grpc/netty/GrpcHttp2HeadersUtils.java +++ b/netty/src/main/java/io/grpc/netty/GrpcHttp2HeadersUtils.java @@ -31,12 +31,12 @@ package io.grpc.netty; -import static com.google.common.base.Charsets.US_ASCII; import static com.google.common.base.Preconditions.checkArgument; import static io.grpc.netty.Utils.TE_HEADER; import static io.netty.handler.codec.http2.Http2Error.PROTOCOL_ERROR; import static io.netty.handler.codec.http2.Http2Exception.connectionError; import static io.netty.util.AsciiString.isUpperCase; +import static java.nio.charset.StandardCharsets.US_ASCII; import com.google.common.io.BaseEncoding; import com.google.errorprone.annotations.CanIgnoreReturnValue; diff --git a/okhttp/src/test/java/io/grpc/okhttp/ExceptionHandlingFrameWriterTest.java b/okhttp/src/test/java/io/grpc/okhttp/ExceptionHandlingFrameWriterTest.java index a9d39088844..8829abac034 100644 --- a/okhttp/src/test/java/io/grpc/okhttp/ExceptionHandlingFrameWriterTest.java +++ b/okhttp/src/test/java/io/grpc/okhttp/ExceptionHandlingFrameWriterTest.java @@ -16,9 +16,9 @@ package io.grpc.okhttp; -import static com.google.common.base.Charsets.UTF_8; import static com.google.common.truth.Truth.assertThat; import static io.grpc.okhttp.ExceptionHandlingFrameWriter.getLogLevel; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; diff --git a/okhttp/src/test/java/io/grpc/okhttp/OkHttpClientTransportTest.java b/okhttp/src/test/java/io/grpc/okhttp/OkHttpClientTransportTest.java index ab7dff98444..987cc09203e 100644 --- a/okhttp/src/test/java/io/grpc/okhttp/OkHttpClientTransportTest.java +++ b/okhttp/src/test/java/io/grpc/okhttp/OkHttpClientTransportTest.java @@ -16,7 +16,6 @@ package io.grpc.okhttp; -import static com.google.common.base.Charsets.UTF_8; import static com.google.common.truth.Truth.assertThat; import static io.grpc.internal.ClientStreamListener.RpcProgress.MISCARRIED; import static io.grpc.internal.ClientStreamListener.RpcProgress.PROCESSED; @@ -25,6 +24,7 @@ import static io.grpc.okhttp.Headers.HTTP_SCHEME_HEADER; import static io.grpc.okhttp.Headers.METHOD_HEADER; import static io.grpc.okhttp.Headers.TE_HEADER; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; diff --git a/okhttp/src/test/java/io/grpc/okhttp/OkHttpProtocolNegotiatorTest.java b/okhttp/src/test/java/io/grpc/okhttp/OkHttpProtocolNegotiatorTest.java index 3a4a21b2467..cc9f30862af 100644 --- a/okhttp/src/test/java/io/grpc/okhttp/OkHttpProtocolNegotiatorTest.java +++ b/okhttp/src/test/java/io/grpc/okhttp/OkHttpProtocolNegotiatorTest.java @@ -16,7 +16,7 @@ package io.grpc.okhttp; -import static com.google.common.base.Charsets.UTF_8; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; diff --git a/okhttp/src/test/java/io/grpc/okhttp/OkHttpServerTransportTest.java b/okhttp/src/test/java/io/grpc/okhttp/OkHttpServerTransportTest.java index 6438cf83a1d..d64d314d7d8 100644 --- a/okhttp/src/test/java/io/grpc/okhttp/OkHttpServerTransportTest.java +++ b/okhttp/src/test/java/io/grpc/okhttp/OkHttpServerTransportTest.java @@ -16,12 +16,12 @@ package io.grpc.okhttp; -import static com.google.common.base.Charsets.UTF_8; import static com.google.common.truth.Truth.assertThat; import static io.grpc.okhttp.Headers.CONTENT_TYPE_HEADER; import static io.grpc.okhttp.Headers.HTTP_SCHEME_HEADER; import static io.grpc.okhttp.Headers.METHOD_HEADER; import static io.grpc.okhttp.Headers.TE_HEADER; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.mockito.AdditionalAnswers.answerVoid; import static org.mockito.AdditionalAnswers.delegatesTo; import static org.mockito.ArgumentMatchers.any; diff --git a/services/src/test/java/io/grpc/protobuf/services/BinaryLogProviderTest.java b/services/src/test/java/io/grpc/protobuf/services/BinaryLogProviderTest.java index 2d2b7651c0a..67b187e9d7a 100644 --- a/services/src/test/java/io/grpc/protobuf/services/BinaryLogProviderTest.java +++ b/services/src/test/java/io/grpc/protobuf/services/BinaryLogProviderTest.java @@ -16,8 +16,8 @@ package io.grpc.protobuf.services; -import static com.google.common.base.Charsets.UTF_8; import static com.google.common.truth.Truth.assertThat; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; diff --git a/testing/src/main/java/io/grpc/internal/testing/StatsTestUtils.java b/testing/src/main/java/io/grpc/internal/testing/StatsTestUtils.java index cd525eeeeb9..a15559ed5cb 100644 --- a/testing/src/main/java/io/grpc/internal/testing/StatsTestUtils.java +++ b/testing/src/main/java/io/grpc/internal/testing/StatsTestUtils.java @@ -16,8 +16,8 @@ package io.grpc.internal.testing; -import static com.google.common.base.Charsets.UTF_8; import static com.google.common.base.Preconditions.checkNotNull; +import static java.nio.charset.StandardCharsets.UTF_8; import com.google.common.base.Function; import com.google.common.collect.ImmutableMap; diff --git a/xds/src/test/java/io/grpc/xds/internal/rbac/engine/GrpcAuthorizationEngineTest.java b/xds/src/test/java/io/grpc/xds/internal/rbac/engine/GrpcAuthorizationEngineTest.java index 4fb38f661e1..10287c11262 100644 --- a/xds/src/test/java/io/grpc/xds/internal/rbac/engine/GrpcAuthorizationEngineTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/rbac/engine/GrpcAuthorizationEngineTest.java @@ -16,8 +16,8 @@ package io.grpc.xds.internal.rbac.engine; -import static com.google.common.base.Charsets.US_ASCII; import static com.google.common.truth.Truth.assertThat; +import static java.nio.charset.StandardCharsets.US_ASCII; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; From 70ae83288de6a712e905c0f97eebd5713ad01677 Mon Sep 17 00:00:00 2001 From: Kannan J Date: Tue, 6 Aug 2024 20:38:08 +0530 Subject: [PATCH 33/53] Upgrade Netty to 4.1.110 and tcnative to 2.0.65 (#11444) Upgrade Netty to 4.1.110 and tcnative to 2.0.65. --- MODULE.bazel | 28 +++++++++---------- SECURITY.md | 3 +- gradle/libs.versions.toml | 4 +-- .../io/grpc/netty/shaded/ShadingTest.java | 2 +- repositories.bzl | 28 +++++++++---------- 5 files changed, 33 insertions(+), 32 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 2b5d85490f3..81c3249f47a 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -22,20 +22,20 @@ IO_GRPC_GRPC_JAVA_ARTIFACTS = [ "com.google.truth:truth:1.4.2", "com.squareup.okhttp:okhttp:2.7.5", "com.squareup.okio:okio:2.10.0", # 3.0+ needs swapping to -jvm; need work to avoid flag-day - "io.netty:netty-buffer:4.1.100.Final", - "io.netty:netty-codec-http2:4.1.100.Final", - "io.netty:netty-codec-http:4.1.100.Final", - "io.netty:netty-codec-socks:4.1.100.Final", - "io.netty:netty-codec:4.1.100.Final", - "io.netty:netty-common:4.1.100.Final", - "io.netty:netty-handler-proxy:4.1.100.Final", - "io.netty:netty-handler:4.1.100.Final", - "io.netty:netty-resolver:4.1.100.Final", - "io.netty:netty-tcnative-boringssl-static:2.0.61.Final", - "io.netty:netty-tcnative-classes:2.0.61.Final", - "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.100.Final", - "io.netty:netty-transport-native-unix-common:4.1.100.Final", - "io.netty:netty-transport:4.1.100.Final", + "io.netty:netty-buffer:4.1.110.Final", + "io.netty:netty-codec-http2:4.1.110.Final", + "io.netty:netty-codec-http:4.1.110.Final", + "io.netty:netty-codec-socks:4.1.110.Final", + "io.netty:netty-codec:4.1.110.Final", + "io.netty:netty-common:4.1.110.Final", + "io.netty:netty-handler-proxy:4.1.110.Final", + "io.netty:netty-handler:4.1.110.Final", + "io.netty:netty-resolver:4.1.110.Final", + "io.netty:netty-tcnative-boringssl-static:2.0.65.Final", + "io.netty:netty-tcnative-classes:2.0.65.Final", + "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.110.Final", + "io.netty:netty-transport-native-unix-common:4.1.110.Final", + "io.netty:netty-transport:4.1.110.Final", "io.opencensus:opencensus-api:0.31.0", "io.opencensus:opencensus-contrib-grpc-metrics:0.31.0", "io.perfmark:perfmark-api:0.27.0", diff --git a/SECURITY.md b/SECURITY.md index 774579bf68b..5c5e3598b29 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -398,7 +398,8 @@ grpc-netty version | netty-handler version | netty-tcnative-boringssl-static ver 1.56.x | 4.1.87.Final | 2.0.61.Final 1.57.x-1.58.x | 4.1.93.Final | 2.0.61.Final 1.59.x | 4.1.97.Final | 2.0.61.Final -1.60.x- | 4.1.100.Final | 2.0.61.Final +1.60.x-1.66.x | 4.1.100.Final | 2.0.61.Final +1.67.x | 4.1.110.Final | 2.0.65.Final _(grpc-netty-shaded avoids issues with keeping these versions in sync.)_ diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 78550e9c95e..299ca60ab4b 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -1,8 +1,8 @@ [versions] -netty = '4.1.100.Final' +netty = '4.1.110.Final' # Keep the following references of tcnative version in sync whenever it's updated: # SECURITY.md -nettytcnative = '2.0.61.Final' +nettytcnative = '2.0.65.Final' opencensus = "0.31.1" # Not upgrading to 4.x as it is not yet ABI compatible. # https://github.com/protocolbuffers/protobuf/issues/17247 diff --git a/netty/shaded/src/testShadow/java/io/grpc/netty/shaded/ShadingTest.java b/netty/shaded/src/testShadow/java/io/grpc/netty/shaded/ShadingTest.java index 7a5e4b43c8b..89803998925 100644 --- a/netty/shaded/src/testShadow/java/io/grpc/netty/shaded/ShadingTest.java +++ b/netty/shaded/src/testShadow/java/io/grpc/netty/shaded/ShadingTest.java @@ -79,7 +79,7 @@ public void nettyResourcesUpdated() throws IOException { InputStream inputStream = NettyChannelBuilder.class.getClassLoader() .getResourceAsStream( "META-INF/native-image/io.grpc.netty.shaded.io.netty/netty-transport/" - + "reflection-config.json"); + + "reflect-config.json"); assertThat(inputStream).isNotNull(); Scanner s = new Scanner(inputStream, StandardCharsets.UTF_8.name()).useDelimiter("\\A"); diff --git a/repositories.bzl b/repositories.bzl index af3acc8ddcf..455e9dcf3ca 100644 --- a/repositories.bzl +++ b/repositories.bzl @@ -26,20 +26,20 @@ IO_GRPC_GRPC_JAVA_ARTIFACTS = [ "com.google.truth:truth:1.4.2", "com.squareup.okhttp:okhttp:2.7.5", "com.squareup.okio:okio:2.10.0", # 3.0+ needs swapping to -jvm; need work to avoid flag-day - "io.netty:netty-buffer:4.1.100.Final", - "io.netty:netty-codec-http2:4.1.100.Final", - "io.netty:netty-codec-http:4.1.100.Final", - "io.netty:netty-codec-socks:4.1.100.Final", - "io.netty:netty-codec:4.1.100.Final", - "io.netty:netty-common:4.1.100.Final", - "io.netty:netty-handler-proxy:4.1.100.Final", - "io.netty:netty-handler:4.1.100.Final", - "io.netty:netty-resolver:4.1.100.Final", - "io.netty:netty-tcnative-boringssl-static:2.0.61.Final", - "io.netty:netty-tcnative-classes:2.0.61.Final", - "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.100.Final", - "io.netty:netty-transport-native-unix-common:4.1.100.Final", - "io.netty:netty-transport:4.1.100.Final", + "io.netty:netty-buffer:4.1.110.Final", + "io.netty:netty-codec-http2:4.1.110.Final", + "io.netty:netty-codec-http:4.1.110.Final", + "io.netty:netty-codec-socks:4.1.110.Final", + "io.netty:netty-codec:4.1.110.Final", + "io.netty:netty-common:4.1.110.Final", + "io.netty:netty-handler-proxy:4.1.110.Final", + "io.netty:netty-handler:4.1.110.Final", + "io.netty:netty-resolver:4.1.110.Final", + "io.netty:netty-tcnative-boringssl-static:2.0.65.Final", + "io.netty:netty-tcnative-classes:2.0.65.Final", + "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.110.Final", + "io.netty:netty-transport-native-unix-common:4.1.110.Final", + "io.netty:netty-transport:4.1.110.Final", "io.opencensus:opencensus-api:0.31.0", "io.opencensus:opencensus-contrib-grpc-metrics:0.31.0", "io.perfmark:perfmark-api:0.27.0", From 18d73a36812440c1e62c51f5da76c9dfe9774da5 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Wed, 7 Aug 2024 16:34:11 -0700 Subject: [PATCH 34/53] CONTRIBUTING.md: Update commit guidelines It came up in #11073, and I saw it could use a little updating. Notably, I'm linking to a guide to what Git commit messages should look like. I also tried to make the language less heavy-handed and demanding. --- CONTRIBUTING.md | 57 ++++++++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 32 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ce40827e748..646a7d986fd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -30,43 +30,36 @@ style configurations are commonly useful. For IntelliJ 14, copy the style to `~/.IdeaIC14/config/codestyles/`, start IntelliJ, go to File > Settings > Code Style, and set the Scheme to `GoogleStyle`. -## Maintaining clean commit history - -We have few conventions for keeping history clean and making code reviews easier -for reviewers: - -* First line of commit messages should be in format of - - `package-name: summary of change` - - where the summary finishes the sentence: `This commit improves gRPC to ____________.` - - for example: - - `core,netty,interop-testing: add capacitive duractance to turbo encabulators` - -* Every time you receive a feedback on your pull request, push changes that - address it as a separate one or multiple commits with a descriptive commit - message (try avoid using vauge `addressed pr feedback` type of messages). - - Project maintainers are obligated to squash those commits into one when - merging. - ## Guidelines for Pull Requests How to get your contributions merged smoothly and quickly. - Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. -- For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal). - -- Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. - -- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR. - -- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity. - -- Maintain **clean commit history** and use **meaningful commit messages**. See [maintaining clean commit history](#maintaining-clean-commit-history) for details. - +- For speculative changes, consider opening an issue and discussing it to avoid + wasting time on an inappropriate approach. If you are suggesting a behavioral + or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). + +- Follow [typical Git commit message](https://cbea.ms/git-commit/#seven-rules) + structure. Have a good **commit description** as a record of **what** and + **why** the change is being made. Link to a GitHub issue if it exists. The + commit description makes a good PR description and is auto-copied by GitHub if + you have a single commit when creating the PR. + + If your change is mostly for a single module (e.g., other module changes are + trivial), prefix your commit summary with the module name changed. Instead of + "Add HTTP/2 faster-than-light support to gRPC Netty" it is more terse as + "netty: Add faster-than-light support". + +- Don't fix code style and formatting unless you are already changing that line + to address an issue. If you do want to fix formatting or style, do that in a + separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. Address comments with additional + commits so the reviewer can review just the changes; do not squash reviewed + commits unless the reviewer agrees. PRs are squashed when merging. + - Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. Also, `./gradlew build` (`gradlew build` on Windows) **must not introduce any new warnings**. From 40e2b165b792ab1b812b8dc15829ac0a5531b1e5 Mon Sep 17 00:00:00 2001 From: Petr Portnov | PROgrm_JARvis Date: Thu, 8 Aug 2024 08:18:12 +0300 Subject: [PATCH 35/53] Make once-set fields of `AbstractClientStream` `final` (#11389) --- core/src/main/java/io/grpc/internal/AbstractClientStream.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/io/grpc/internal/AbstractClientStream.java b/core/src/main/java/io/grpc/internal/AbstractClientStream.java index 51c31993f46..bb346657d53 100644 --- a/core/src/main/java/io/grpc/internal/AbstractClientStream.java +++ b/core/src/main/java/io/grpc/internal/AbstractClientStream.java @@ -92,8 +92,8 @@ void writeFrame( private final TransportTracer transportTracer; private final Framer framer; - private boolean shouldBeCountedForInUse; - private boolean useGet; + private final boolean shouldBeCountedForInUse; + private final boolean useGet; private Metadata headers; /** * Whether cancel() has been called. This is not strictly necessary, but removes the delay between From 72a977bf7fcecc40f5ee715f833aa008d5b5ee62 Mon Sep 17 00:00:00 2001 From: Larry Safran Date: Thu, 8 Aug 2024 15:59:57 -0700 Subject: [PATCH 36/53] Dualstack example (#11451) --- examples/example-dualstack/README.md | 54 ++++++++ examples/example-dualstack/build.gradle | 79 +++++++++++ examples/example-dualstack/pom.xml | 122 +++++++++++++++++ examples/example-dualstack/settings.gradle | 10 ++ .../examples/dualstack/DualStackClient.java | 95 +++++++++++++ .../examples/dualstack/DualStackServer.java | 126 ++++++++++++++++++ .../ExampleDualStackNameResolver.java | 98 ++++++++++++++ .../ExampleDualStackNameResolverProvider.java | 47 +++++++ .../main/proto/helloworld/helloworld.proto | 37 +++++ .../loadbalance/ExampleNameResolver.java | 13 +- .../loadbalance/LoadBalanceServer.java | 32 +++-- .../nameresolve/NameResolveClient.java | 12 +- 12 files changed, 694 insertions(+), 31 deletions(-) create mode 100644 examples/example-dualstack/README.md create mode 100644 examples/example-dualstack/build.gradle create mode 100644 examples/example-dualstack/pom.xml create mode 100644 examples/example-dualstack/settings.gradle create mode 100644 examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/DualStackClient.java create mode 100644 examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/DualStackServer.java create mode 100644 examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/ExampleDualStackNameResolver.java create mode 100644 examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/ExampleDualStackNameResolverProvider.java create mode 100644 examples/example-dualstack/src/main/proto/helloworld/helloworld.proto diff --git a/examples/example-dualstack/README.md b/examples/example-dualstack/README.md new file mode 100644 index 00000000000..6c191661d1b --- /dev/null +++ b/examples/example-dualstack/README.md @@ -0,0 +1,54 @@ +# gRPC Dualstack Example + +The dualstack example uses a custom name resolver that provides both IPv4 and IPv6 localhost +endpoints for each of 3 server instances. The client will first use the default name resolver and +load balancers which will only connect tot he first server. It will then use the +custom name resolver with round robin to connect to each of the servers in turn. The 3 instances +of the server will bind respectively to: both IPv4 and IPv6, IPv4 only, and IPv6 only. + +The example requires grpc-java to already be built. You are strongly encouraged +to check out a git release tag, since there will already be a build of grpc +available. Otherwise, you must follow [COMPILING](../../COMPILING.md). + +### Build the example + +To build the dualstack example server and client. From the + `grpc-java/examples/example-dualstack` directory run: + +```bash +$ ../gradlew installDist +``` + +This creates the scripts +`build/install/example-dualstack/bin/dual-stack-server` + and `build/install/example-dualstack/bin/dual-stack-client`. + +To run the dualstack example, run the server with: + +```bash +$ ./build/install/example-dualstack/bin/dual-stack-server +``` + +And in a different terminal window run the client. + +```bash +$ ./build/install/example-dualstack/bin/dual-stack-client +``` + +### Maven + +If you prefer to use Maven: + +Run in the example-debug directory: + +```bash +$ mvn verify +$ # Run the server in one terminal +$ mvn exec:java -Dexec.mainClass=io.grpc.examples.dualstack.DualStackServer +``` + +```bash +$ # In another terminal run the client +$ mvn exec:java -Dexec.mainClass=io.grpc.examples.dualstack.DualStackClient +``` + diff --git a/examples/example-dualstack/build.gradle b/examples/example-dualstack/build.gradle new file mode 100644 index 00000000000..32b35af8a87 --- /dev/null +++ b/examples/example-dualstack/build.gradle @@ -0,0 +1,79 @@ +plugins { + id 'application' // Provide convenience executables for trying out the examples. + id 'java' + + id "com.google.protobuf" version "0.9.4" + + // Generate IntelliJ IDEA's .idea & .iml project files + id 'idea' +} + +repositories { + maven { // The google mirror is less flaky than mavenCentral() + url "https://maven-central.storage-download.googleapis.com/maven2/" } + mavenCentral() + mavenLocal() +} + +java { + sourceCompatibility = JavaVersion.VERSION_1_8 + targetCompatibility = JavaVersion.VERSION_1_8 +} + +// IMPORTANT: You probably want the non-SNAPSHOT version of gRPC. Make sure you +// are looking at a tagged version of the example and not "master"! + +// Feel free to delete the comment at the next line. It is just for safely +// updating the version in our release process. +def grpcVersion = '1.67.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def protobufVersion = '3.25.3' + +dependencies { + implementation "io.grpc:grpc-protobuf:${grpcVersion}" + implementation "io.grpc:grpc-netty:${grpcVersion}" + implementation "io.grpc:grpc-stub:${grpcVersion}" + implementation "io.grpc:grpc-services:${grpcVersion}" + compileOnly "org.apache.tomcat:annotations-api:6.0.53" +} + +protobuf { + protoc { + artifact = "com.google.protobuf:protoc:${protobufVersion}" + } + plugins { + grpc { + artifact = "io.grpc:protoc-gen-grpc-java:${grpcVersion}" + } + } + generateProtoTasks { + all()*.plugins { + grpc {} + } + } +} + +startScripts.enabled = false + +task DualStackClient(type: CreateStartScripts) { + mainClass = 'io.grpc.examples.dualstack.DualStackClient' + applicationName = 'dual-stack-client' + outputDir = new File(project.buildDir, 'tmp/scripts/' + name) + classpath = startScripts.classpath +} + +task DualStackServer(type: CreateStartScripts) { + mainClass = 'io.grpc.examples.dualstack.DualStackServer' + applicationName = 'dual-stack-server' + outputDir = new File(project.buildDir, 'tmp/scripts/' + name) + classpath = startScripts.classpath +} + +application { + applicationDistribution.into('bin') { + from(DualStackClient) + from(DualStackServer) + filePermissions { + unix(0755) + } + } +} diff --git a/examples/example-dualstack/pom.xml b/examples/example-dualstack/pom.xml new file mode 100644 index 00000000000..710b48ee617 --- /dev/null +++ b/examples/example-dualstack/pom.xml @@ -0,0 +1,122 @@ + + 4.0.0 + io.grpc + example-dualstack + jar + + 1.67.0-SNAPSHOT + example-dualstack + https://github.com/grpc/grpc-java + + + UTF-8 + 1.67.0-SNAPSHOT + 3.25.3 + + 1.8 + 1.8 + + + + + + io.grpc + grpc-bom + ${grpc.version} + pom + import + + + + + + + io.grpc + grpc-services + + + io.grpc + grpc-protobuf + + + io.grpc + grpc-stub + + + io.grpc + grpc-netty + + + org.apache.tomcat + annotations-api + 6.0.53 + provided + + + io.grpc + grpc-netty-shaded + runtime + + + junit + junit + 4.13.2 + test + + + io.grpc + grpc-testing + test + + + + + + + kr.motd.maven + os-maven-plugin + 1.7.1 + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + com.google.protobuf:protoc:${protoc.version}:exe:${os.detected.classifier} + grpc-java + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + + + + + compile + compile-custom + + + + + + org.apache.maven.plugins + maven-enforcer-plugin + 1.4.1 + + + enforce + + enforce + + + + + + + + + + + + diff --git a/examples/example-dualstack/settings.gradle b/examples/example-dualstack/settings.gradle new file mode 100644 index 00000000000..0aae8f7304e --- /dev/null +++ b/examples/example-dualstack/settings.gradle @@ -0,0 +1,10 @@ +pluginManagement { + repositories { + maven { // The google mirror is less flaky than mavenCentral() + url "https://maven-central.storage-download.googleapis.com/maven2/" + } + gradlePluginPortal() + } +} + +rootProject.name = 'example-dualstack' diff --git a/examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/DualStackClient.java b/examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/DualStackClient.java new file mode 100644 index 00000000000..b9993a524d6 --- /dev/null +++ b/examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/DualStackClient.java @@ -0,0 +1,95 @@ +/* + * Copyright 2024 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.examples.dualstack; + +import io.grpc.Channel; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.NameResolverRegistry; +import io.grpc.StatusRuntimeException; +import io.grpc.examples.helloworld.GreeterGrpc; +import io.grpc.examples.helloworld.HelloReply; +import io.grpc.examples.helloworld.HelloRequest; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * A client that requests greetings from the {@link DualStackServer}. + * First it sends 5 requests using the default nameresolver and load balancer. + * Then it sends 10 requests using the example nameresolver and round robin load balancer. These + * requests are evenly distributed among the 3 servers rather than favoring the server listening + * on both addresses because the ExampleDualStackNameResolver groups the 3 servers as 3 endpoints + * each with 2 addresses. + */ +public class DualStackClient { + public static final String channelTarget = "example:///lb.example.grpc.io"; + private static final Logger logger = Logger.getLogger(DualStackClient.class.getName()); + private final GreeterGrpc.GreeterBlockingStub blockingStub; + + public DualStackClient(Channel channel) { + blockingStub = GreeterGrpc.newBlockingStub(channel); + } + + public static void main(String[] args) throws Exception { + NameResolverRegistry.getDefaultRegistry() + .register(new ExampleDualStackNameResolverProvider()); + + logger.info("\n **** Use default DNS resolver ****"); + ManagedChannel channel = ManagedChannelBuilder.forTarget("localhost:50051") + .usePlaintext() + .build(); + try { + DualStackClient client = new DualStackClient(channel); + for (int i = 0; i < 5; i++) { + client.greet("request:" + i); + } + } finally { + channel.shutdownNow().awaitTermination(5, TimeUnit.SECONDS); + } + + logger.info("\n **** Change to use example name resolver ****"); + /* + Dial to "example:///resolver.example.grpc.io", use {@link ExampleNameResolver} to create connection + "resolver.example.grpc.io" is converted to {@link java.net.URI.path} + */ + channel = ManagedChannelBuilder.forTarget(channelTarget) + .defaultLoadBalancingPolicy("round_robin") + .usePlaintext() + .build(); + try { + DualStackClient client = new DualStackClient(channel); + for (int i = 0; i < 10; i++) { + client.greet("request:" + i); + } + } finally { + channel.shutdownNow().awaitTermination(5, TimeUnit.SECONDS); + } + } + + public void greet(String name) { + HelloRequest request = HelloRequest.newBuilder().setName(name).build(); + HelloReply response; + try { + response = blockingStub.sayHello(request); + } catch (StatusRuntimeException e) { + logger.log(Level.WARNING, "RPC failed: {0}", e.getStatus()); + return; + } + logger.info("Greeting: " + response.getMessage()); + } +} diff --git a/examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/DualStackServer.java b/examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/DualStackServer.java new file mode 100644 index 00000000000..43b21e963f8 --- /dev/null +++ b/examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/DualStackServer.java @@ -0,0 +1,126 @@ +/* + * Copyright 2024 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.examples.dualstack; + +import io.grpc.Server; +import io.grpc.ServerBuilder; +import io.grpc.examples.helloworld.GreeterGrpc; +import io.grpc.examples.helloworld.HelloReply; +import io.grpc.examples.helloworld.HelloRequest; +import io.grpc.netty.NettyServerBuilder; +import io.grpc.stub.StreamObserver; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +/** + * Starts 3 different greeter services each on its own port, but all for localhost. + * The first service listens on both IPv4 and IPv6, + * the second on just IPv4, and the third on just IPv6. + */ +public class DualStackServer { + private static final Logger logger = Logger.getLogger(DualStackServer.class.getName()); + private List servers; + + public static void main(String[] args) throws IOException, InterruptedException { + final DualStackServer server = new DualStackServer(); + server.start(); + server.blockUntilShutdown(); + } + + private void start() throws IOException { + InetSocketAddress inetSocketAddress; + + servers = new ArrayList<>(); + int[] serverPorts = ExampleDualStackNameResolver.SERVER_PORTS; + for (int i = 0; i < serverPorts.length; i++ ) { + String addressType; + int port = serverPorts[i]; + ServerBuilder serverBuilder; + switch (i) { + case 0: + serverBuilder = ServerBuilder.forPort(port); // bind to both IPv4 and IPv6 + addressType = "both IPv4 and IPv6"; + break; + case 1: + // bind to IPv4 only + inetSocketAddress = new InetSocketAddress("127.0.0.1", port); + serverBuilder = NettyServerBuilder.forAddress(inetSocketAddress); + addressType = "IPv4 only"; + break; + case 2: + // bind to IPv6 only + inetSocketAddress = new InetSocketAddress("::1", port); + serverBuilder = NettyServerBuilder.forAddress(inetSocketAddress); + addressType = "IPv6 only"; + break; + default: + throw new IllegalStateException("Unexpected value: " + i); + } + + servers.add(serverBuilder + .addService(new GreeterImpl(port, addressType)) + .build() + .start()); + logger.info("Server started, listening on " + port); + } + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + System.err.println("*** shutting down gRPC server since JVM is shutting down"); + try { + DualStackServer.this.stop(); + } catch (InterruptedException e) { + e.printStackTrace(System.err); + } + System.err.println("*** server shut down"); + })); + } + + private void stop() throws InterruptedException { + for (Server server : servers) { + server.shutdown().awaitTermination(30, TimeUnit.SECONDS); + } + } + + private void blockUntilShutdown() throws InterruptedException { + for (Server server : servers) { + server.awaitTermination(); + } + } + + static class GreeterImpl extends GreeterGrpc.GreeterImplBase { + + int port; + String addressType; + + public GreeterImpl(int port, String addressType) { + this.port = port; + this.addressType = addressType; + } + + @Override + public void sayHello(HelloRequest req, StreamObserver responseObserver) { + String msg = String.format("Hello %s from server<%d> type: %s", + req.getName(), this.port, addressType); + HelloReply reply = HelloReply.newBuilder().setMessage(msg).build(); + responseObserver.onNext(reply); + responseObserver.onCompleted(); + } + } +} diff --git a/examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/ExampleDualStackNameResolver.java b/examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/ExampleDualStackNameResolver.java new file mode 100644 index 00000000000..70675b3de3d --- /dev/null +++ b/examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/ExampleDualStackNameResolver.java @@ -0,0 +1,98 @@ +/* + * Copyright 2024 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.examples.dualstack; + +import com.google.common.collect.ImmutableMap; +import io.grpc.EquivalentAddressGroup; +import io.grpc.NameResolver; +import io.grpc.Status; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +/** + * A fake name resolver that resolves to a hard-coded list of 3 endpoints (EquivalentAddressGropu) + * each with 2 addresses (one IPv4 and one IPv6). + */ +public class ExampleDualStackNameResolver extends NameResolver { + static public final int[] SERVER_PORTS = {50051, 50052, 50053}; + + // This is a fake name resolver, so we just hard code the address here. + private static final ImmutableMap>> addrStore = + ImmutableMap.>>builder() + .put("lb.example.grpc.io", + Arrays.stream(SERVER_PORTS) + .mapToObj(port -> getLocalAddrs(port)) + .collect(Collectors.toList()) + ) + .build(); + + private Listener2 listener; + + private final URI uri; + + public ExampleDualStackNameResolver(URI targetUri) { + this.uri = targetUri; + } + + private static List getLocalAddrs(int port) { + return Arrays.asList( + new InetSocketAddress("127.0.0.1", port), + new InetSocketAddress("::1", port)); + } + + @Override + public String getServiceAuthority() { + return uri.getPath().substring(1); + } + + @Override + public void shutdown() { + } + + @Override + public void start(Listener2 listener) { + this.listener = listener; + this.resolve(); + } + + @Override + public void refresh() { + this.resolve(); + } + + private void resolve() { + List> addresses = addrStore.get(uri.getPath().substring(1)); + try { + List eagList = new ArrayList<>(); + for (List endpoint : addresses) { + // every server is an EquivalentAddressGroup, so they can be accessed randomly + eagList.add(new EquivalentAddressGroup(endpoint)); + } + + this.listener.onResult(ResolutionResult.newBuilder().setAddresses(eagList).build()); + } catch (Exception e){ + // when error occurs, notify listener + this.listener.onError(Status.UNAVAILABLE.withDescription("Unable to resolve host ").withCause(e)); + } + } + +} diff --git a/examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/ExampleDualStackNameResolverProvider.java b/examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/ExampleDualStackNameResolverProvider.java new file mode 100644 index 00000000000..a01d68aca3e --- /dev/null +++ b/examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/ExampleDualStackNameResolverProvider.java @@ -0,0 +1,47 @@ +/* + * Copyright 2024 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.examples.dualstack; + +import io.grpc.NameResolver; +import io.grpc.NameResolverProvider; + +import java.net.URI; + +public class ExampleDualStackNameResolverProvider extends NameResolverProvider { + public static final String exampleScheme = "example"; + + @Override + public NameResolver newNameResolver(URI targetUri, NameResolver.Args args) { + return new ExampleDualStackNameResolver(targetUri); + } + + @Override + protected boolean isAvailable() { + return true; + } + + @Override + protected int priority() { + return 5; + } + + @Override + // gRPC choose the first NameResolverProvider that supports the target URI scheme. + public String getDefaultScheme() { + return exampleScheme; + } +} diff --git a/examples/example-dualstack/src/main/proto/helloworld/helloworld.proto b/examples/example-dualstack/src/main/proto/helloworld/helloworld.proto new file mode 100644 index 00000000000..c60d9416f1f --- /dev/null +++ b/examples/example-dualstack/src/main/proto/helloworld/helloworld.proto @@ -0,0 +1,37 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "io.grpc.examples.helloworld"; +option java_outer_classname = "HelloWorldProto"; +option objc_class_prefix = "HLW"; + +package helloworld; + +// The greeting service definition. +service Greeter { + // Sends a greeting + rpc SayHello (HelloRequest) returns (HelloReply) {} +} + +// The request message containing the user's name. +message HelloRequest { + string name = 1; +} + +// The response message containing the greetings +message HelloReply { + string message = 1; +} diff --git a/examples/src/main/java/io/grpc/examples/loadbalance/ExampleNameResolver.java b/examples/src/main/java/io/grpc/examples/loadbalance/ExampleNameResolver.java index f562f0ac107..6ef327ade84 100644 --- a/examples/src/main/java/io/grpc/examples/loadbalance/ExampleNameResolver.java +++ b/examples/src/main/java/io/grpc/examples/loadbalance/ExampleNameResolver.java @@ -28,12 +28,12 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; -import java.util.stream.Stream; import static io.grpc.examples.loadbalance.LoadBalanceClient.exampleServiceName; public class ExampleNameResolver extends NameResolver { + static private final int[] SERVER_PORTS = {50051, 50052, 50053}; private Listener2 listener; private final URI uri; @@ -44,12 +44,11 @@ public ExampleNameResolver(URI targetUri) { this.uri = targetUri; // This is a fake name resolver, so we just hard code the address here. addrStore = ImmutableMap.>builder() - .put(exampleServiceName, - Stream.iterate(LoadBalanceServer.startPort,p->p+1) - .limit(LoadBalanceServer.serverCount) - .map(port->new InetSocketAddress("localhost",port)) - .collect(Collectors.toList()) - ) + .put(exampleServiceName, + Arrays.stream(SERVER_PORTS) + .mapToObj(port->new InetSocketAddress("localhost",port)) + .collect(Collectors.toList()) + ) .build(); } diff --git a/examples/src/main/java/io/grpc/examples/loadbalance/LoadBalanceServer.java b/examples/src/main/java/io/grpc/examples/loadbalance/LoadBalanceServer.java index c97d209497a..85ae92a537a 100644 --- a/examples/src/main/java/io/grpc/examples/loadbalance/LoadBalanceServer.java +++ b/examples/src/main/java/io/grpc/examples/loadbalance/LoadBalanceServer.java @@ -24,23 +24,24 @@ import io.grpc.stub.StreamObserver; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeUnit; import java.util.logging.Logger; public class LoadBalanceServer { private static final Logger logger = Logger.getLogger(LoadBalanceServer.class.getName()); - static public final int serverCount = 3; - static public final int startPort = 50051; - private Server[] servers; + static final int[] SERVER_PORTS = {50051, 50052, 50053}; + private List servers; private void start() throws IOException { - servers = new Server[serverCount]; - for (int i = 0; i < serverCount; i++) { - int port = startPort + i; - servers[i] = ServerBuilder.forPort(port) + servers = new ArrayList<>(); + for (int port : SERVER_PORTS) { + servers.add( + ServerBuilder.forPort(port) .addService(new GreeterImpl(port)) .build() - .start(); + .start()); logger.info("Server started, listening on " + port); } Runtime.getRuntime().addShutdownHook(new Thread(() -> { @@ -55,18 +56,14 @@ private void start() throws IOException { } private void stop() throws InterruptedException { - for (int i = 0; i < serverCount; i++) { - if (servers[i] != null) { - servers[i].shutdown().awaitTermination(30, TimeUnit.SECONDS); - } + for (Server server : servers) { + server.shutdown().awaitTermination(30, TimeUnit.SECONDS); } } private void blockUntilShutdown() throws InterruptedException { - for (int i = 0; i < serverCount; i++) { - if (servers[i] != null) { - servers[i].awaitTermination(); - } + for (Server server : servers) { + server.awaitTermination(); } } @@ -86,7 +83,8 @@ public GreeterImpl(int port) { @Override public void sayHello(HelloRequest req, StreamObserver responseObserver) { - HelloReply reply = HelloReply.newBuilder().setMessage("Hello " + req.getName() + " from server<" + this.port + ">").build(); + HelloReply reply = HelloReply.newBuilder() + .setMessage("Hello " + req.getName() + " from server<" + this.port + ">").build(); responseObserver.onNext(reply); responseObserver.onCompleted(); } diff --git a/examples/src/main/java/io/grpc/examples/nameresolve/NameResolveClient.java b/examples/src/main/java/io/grpc/examples/nameresolve/NameResolveClient.java index ac6fdd32549..9aaccbe1096 100644 --- a/examples/src/main/java/io/grpc/examples/nameresolve/NameResolveClient.java +++ b/examples/src/main/java/io/grpc/examples/nameresolve/NameResolveClient.java @@ -26,8 +26,7 @@ import java.util.logging.Logger; public class NameResolveClient { - public static final String exampleScheme = "example"; - public static final String exampleServiceName = "lb.example.grpc.io"; + public static final String channelTarget = "example:///lb.example.grpc.io"; private static final Logger logger = Logger.getLogger(NameResolveClient.class.getName()); private final GreeterGrpc.GreeterBlockingStub blockingStub; @@ -56,11 +55,10 @@ public static void main(String[] args) throws Exception { Dial to "example:///resolver.example.grpc.io", use {@link ExampleNameResolver} to create connection "resolver.example.grpc.io" is converted to {@link java.net.URI.path} */ - channel = ManagedChannelBuilder.forTarget( - String.format("%s:///%s", exampleScheme, exampleServiceName)) - .defaultLoadBalancingPolicy("round_robin") - .usePlaintext() - .build(); + channel = ManagedChannelBuilder.forTarget(channelTarget) + .defaultLoadBalancingPolicy("round_robin") + .usePlaintext() + .build(); try { NameResolveClient client = new NameResolveClient(channel); for (int i = 0; i < 5; i++) { From f866c805c2f78271de9f2b61254363d009cee8c6 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Thu, 25 Jul 2024 22:42:34 -0700 Subject: [PATCH 37/53] util: SocketAddress.toString() cannot be used for equality Some addresses are equal even though their toString is different (InetSocketAddress ignores the hostname when it has an address). And some addresses are not equal even though their toString might be the same (AnonymousInProcessSocketAddress doesn't override toString()). InetSocketAddress/InetAddress do not cache the toString() result. Thus, even in the worst case that uses a HashSet, this should use less memory than the earlier approach, as no strings are formatted. It probably also significantly improves performance in the reasonably common case when an Endpoint is created just for looking up a key, because the string creation in the constructor isn't then amorized. updateChildrenWithResolvedAddresses(), for example, creates n^2 Endpoint objects for lookups. --- .../io/grpc/util/MultiChildLoadBalancer.java | 33 ++++++----- .../grpc/util/MultiChildLoadBalancerTest.java | 55 ++++++++----------- .../java/io/grpc/util/AbstractTestHelper.java | 16 +++++- 3 files changed, 54 insertions(+), 50 deletions(-) diff --git a/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java b/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java index c5f774984fe..893dd1e1598 100644 --- a/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java +++ b/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java @@ -37,10 +37,10 @@ import io.grpc.internal.PickFirstLoadBalancerProvider; import java.net.SocketAddress; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -494,25 +494,27 @@ protected Helper delegate() { /** * Endpoint is an optimization to quickly lookup and compare EquivalentAddressGroup address sets. - * Ignores the attributes, orders the addresses in a deterministic manner and converts each - * address into a string for easy comparison. Also caches the hashcode. - * Is used as a key for ChildLbState for most load balancers (ClusterManagerLB uses a String). + * It ignores the attributes. Is used as a key for ChildLbState for most load balancers + * (ClusterManagerLB uses a String). */ protected static class Endpoint { - final String[] addrs; + final Collection addrs; final int hashCode; public Endpoint(EquivalentAddressGroup eag) { checkNotNull(eag, "eag"); - addrs = new String[eag.getAddresses().size()]; - int i = 0; + if (eag.getAddresses().size() < 10) { + addrs = eag.getAddresses(); + } else { + // This is expected to be very unlikely in practice + addrs = new HashSet<>(eag.getAddresses()); + } + int sum = 0; for (SocketAddress address : eag.getAddresses()) { - addrs[i++] = address.toString(); + sum += address.hashCode(); } - Arrays.sort(addrs); - - hashCode = Arrays.hashCode(addrs); + hashCode = sum; } @Override @@ -525,24 +527,21 @@ public boolean equals(Object other) { if (this == other) { return true; } - if (other == null) { - return false; - } if (!(other instanceof Endpoint)) { return false; } Endpoint o = (Endpoint) other; - if (o.hashCode != hashCode || o.addrs.length != addrs.length) { + if (o.hashCode != hashCode || o.addrs.size() != addrs.size()) { return false; } - return Arrays.equals(o.addrs, this.addrs); + return o.addrs.containsAll(addrs); } @Override public String toString() { - return Arrays.toString(addrs); + return addrs.toString(); } } diff --git a/util/src/test/java/io/grpc/util/MultiChildLoadBalancerTest.java b/util/src/test/java/io/grpc/util/MultiChildLoadBalancerTest.java index df226d5aee8..6bfd6d7a659 100644 --- a/util/src/test/java/io/grpc/util/MultiChildLoadBalancerTest.java +++ b/util/src/test/java/io/grpc/util/MultiChildLoadBalancerTest.java @@ -21,7 +21,6 @@ import static io.grpc.ConnectivityState.READY; import static io.grpc.ConnectivityState.SHUTDOWN; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.AdditionalAnswers.delegatesTo; @@ -34,6 +33,7 @@ import static org.mockito.Mockito.verify; import com.google.common.collect.Lists; +import com.google.common.testing.EqualsTester; import io.grpc.Attributes; import io.grpc.ConnectivityState; import io.grpc.ConnectivityStateInfo; @@ -244,37 +244,28 @@ public void testEndpoint_toString() { @Test public void testEndpoint_equals() { - assertEquals( - createEndpoint(Attributes.EMPTY, "addr1"), - createEndpoint(Attributes.EMPTY, "addr1")); - - assertEquals( - createEndpoint(Attributes.EMPTY, "addr1", "addr2"), - createEndpoint(Attributes.EMPTY, "addr2", "addr1")); - - assertEquals( - createEndpoint(Attributes.EMPTY, "addr1", "addr2"), - createEndpoint(affinity, "addr2", "addr1")); - - assertEquals( - createEndpoint(Attributes.EMPTY, "addr1", "addr2").hashCode(), - createEndpoint(affinity, "addr2", "addr1").hashCode()); - - } - - @Test - public void testEndpoint_notEquals() { - assertNotEquals( - createEndpoint(Attributes.EMPTY, "addr1", "addr2"), - createEndpoint(Attributes.EMPTY, "addr1", "addr3")); - - assertNotEquals( - createEndpoint(Attributes.EMPTY, "addr1"), - createEndpoint(Attributes.EMPTY, "addr1", "addr2")); - - assertNotEquals( - createEndpoint(Attributes.EMPTY, "addr1", "addr2"), - createEndpoint(Attributes.EMPTY, "addr1")); + new EqualsTester() + .addEqualityGroup( + createEndpoint(Attributes.EMPTY, "addr1"), + createEndpoint(Attributes.EMPTY, "addr1")) + .addEqualityGroup( + createEndpoint(Attributes.EMPTY, "addr1", "addr2"), + createEndpoint(Attributes.EMPTY, "addr2", "addr1"), + createEndpoint(affinity, "addr1", "addr2")) + .addEqualityGroup( + createEndpoint(Attributes.EMPTY, "addr1", "addr3")) + .addEqualityGroup( + createEndpoint(Attributes.EMPTY, "addr1", "addr2", "addr3", "addr4", "addr5", "addr6", + "addr7", "addr8", "addr9", "addr10"), + createEndpoint(Attributes.EMPTY, "addr2", "addr1", "addr3", "addr4", "addr5", "addr6", + "addr7", "addr8", "addr9", "addr10")) + .addEqualityGroup( + createEndpoint(Attributes.EMPTY, "addr1", "addr2", "addr3", "addr4", "addr5", "addr6", + "addr7", "addr8", "addr9", "addr11")) + .addEqualityGroup( + createEndpoint(Attributes.EMPTY, "addr1", "addr2", "addr3", "addr4", "addr5", "addr6", + "addr7", "addr8", "addr9", "addr10", "addr11")) + .testEquals(); } private String addressesOnlyString(EquivalentAddressGroup eag) { diff --git a/util/src/testFixtures/java/io/grpc/util/AbstractTestHelper.java b/util/src/testFixtures/java/io/grpc/util/AbstractTestHelper.java index b0239c56703..bdeff9d17c5 100644 --- a/util/src/testFixtures/java/io/grpc/util/AbstractTestHelper.java +++ b/util/src/testFixtures/java/io/grpc/util/AbstractTestHelper.java @@ -276,7 +276,7 @@ public String toString() { } } - public static class FakeSocketAddress extends SocketAddress { + public static final class FakeSocketAddress extends SocketAddress { private static final long serialVersionUID = 0L; final String name; @@ -288,6 +288,20 @@ public static class FakeSocketAddress extends SocketAddress { public String toString() { return "FakeSocketAddress-" + name; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof FakeSocketAddress)) { + return false; + } + FakeSocketAddress that = (FakeSocketAddress) o; + return this.name.equals(that.name); + } + + @Override + public int hashCode() { + return name.hashCode(); + } } } From 2f4f7f0ece1e5e5ef5ccae4281a32f00c5a56ffa Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Thu, 25 Jul 2024 18:59:33 -0700 Subject: [PATCH 38/53] util: Delete unused MultiChildLB.ChildLbState.getSubchannels() --- .../src/main/java/io/grpc/util/MultiChildLoadBalancer.java | 7 ------- 1 file changed, 7 deletions(-) diff --git a/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java b/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java index 893dd1e1598..f2e2cc617ee 100644 --- a/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java +++ b/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java @@ -411,13 +411,6 @@ public final SubchannelPicker getCurrentPicker() { return currentPicker; } - protected final Subchannel getSubchannels(PickSubchannelArgs args) { - if (getCurrentPicker() == null) { - return null; - } - return getCurrentPicker().pickSubchannel(args).getSubchannel(); - } - public final ConnectivityState getCurrentState() { return currentState; } From d1dcfb0451a0a19ad391f7e51502f4adec710b4e Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Sat, 27 Jul 2024 11:55:27 -0700 Subject: [PATCH 39/53] xds: Replace WrrHelper with a per-child Helper There's no need to assume which child makes a subchannel based on the subchannel address. --- .../xds/WeightedRoundRobinLoadBalancer.java | 53 ++++++------------- 1 file changed, 16 insertions(+), 37 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java index 115857d43ff..abcb0941fd9 100644 --- a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java @@ -17,7 +17,6 @@ package io.grpc.xds; import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkElementIndex; import static com.google.common.base.Preconditions.checkNotNull; import com.google.common.annotations.VisibleForTesting; @@ -40,7 +39,6 @@ import io.grpc.SynchronizationContext; import io.grpc.SynchronizationContext.ScheduledHandle; import io.grpc.services.MetricReport; -import io.grpc.util.ForwardingLoadBalancerHelper; import io.grpc.util.ForwardingSubchannel; import io.grpc.util.MultiChildLoadBalancer; import io.grpc.xds.orca.OrcaOobUtil; @@ -137,12 +135,12 @@ final class WeightedRoundRobinLoadBalancer extends MultiChildLoadBalancer { } public WeightedRoundRobinLoadBalancer(Helper helper, Ticker ticker) { - this(new WrrHelper(OrcaOobUtil.newOrcaReportingHelper(helper)), ticker, new Random()); + this(helper, ticker, new Random()); } - public WeightedRoundRobinLoadBalancer(WrrHelper helper, Ticker ticker, Random random) { - super(helper); - helper.setLoadBalancer(this); + @VisibleForTesting + WeightedRoundRobinLoadBalancer(Helper helper, Ticker ticker, Random random) { + super(OrcaOobUtil.newOrcaReportingHelper(helper)); this.ticker = checkNotNull(ticker, "ticker"); this.infTime = ticker.nanoTime() + Long.MAX_VALUE; this.syncContext = checkNotNull(helper.getSynchronizationContext(), "syncContext"); @@ -152,11 +150,6 @@ public WeightedRoundRobinLoadBalancer(WrrHelper helper, Ticker ticker, Random ra log.log(Level.FINE, "weighted_round_robin LB created"); } - @VisibleForTesting - WeightedRoundRobinLoadBalancer(Helper helper, Ticker ticker, Random random) { - this(new WrrHelper(OrcaOobUtil.newOrcaReportingHelper(helper)), ticker, random); - } - @Override protected ChildLbState createChildLbState(Object key, Object policyConfig, SubchannelPicker initialPicker, ResolvedAddresses unused) { @@ -270,6 +263,11 @@ public WeightedChildLbState(Object key, LoadBalancerProvider policyProvider, Obj super(key, policyProvider, childConfig, initialPicker); } + @Override + protected ChildLbStateHelper createChildHelper() { + return new WrrChildLbStateHelper(); + } + private double getWeight(AtomicInteger staleEndpoints, AtomicInteger notYetUsableEndpoints) { if (config == null) { return 0; @@ -305,6 +303,13 @@ public void removeSubchannel(WrrSubchannel wrrSubchannel) { subchannels.remove(wrrSubchannel); } + final class WrrChildLbStateHelper extends ChildLbStateHelper { + @Override + public Subchannel createSubchannel(CreateSubchannelArgs args) { + return new WrrSubchannel(super.createSubchannel(args), WeightedChildLbState.this); + } + } + final class OrcaReportListener implements OrcaPerRequestReportListener, OrcaOobReportListener { private final float errorUtilizationPenalty; @@ -374,32 +379,6 @@ public void shutdown() { super.shutdown(); } - private static final class WrrHelper extends ForwardingLoadBalancerHelper { - private final Helper delegate; - private WeightedRoundRobinLoadBalancer wrr; - - WrrHelper(Helper helper) { - this.delegate = helper; - } - - void setLoadBalancer(WeightedRoundRobinLoadBalancer lb) { - this.wrr = lb; - } - - @Override - protected Helper delegate() { - return delegate; - } - - @Override - public Subchannel createSubchannel(CreateSubchannelArgs args) { - checkElementIndex(0, args.getAddresses().size(), "Empty address group"); - WeightedChildLbState childLbState = - (WeightedChildLbState) wrr.getChildLbStateEag(args.getAddresses().get(0)); - return wrr.new WrrSubchannel(delegate().createSubchannel(args), childLbState); - } - } - @VisibleForTesting final class WrrSubchannel extends ForwardingSubchannel { private final Subchannel delegate; From 043ba556b89eb7b700c825bb09bed6e7fb038608 Mon Sep 17 00:00:00 2001 From: yifeizhuang Date: Mon, 12 Aug 2024 11:16:54 -0700 Subject: [PATCH 40/53] otel tracing: add binary format, grpcTraceBinContextPropagator (#11409) * otel tracing: add binary format, grpcTraceBinContextPropagator * exception handling, use api base64 encoder omit padding remove binary format abstract class in favor of binary marshaller --- .../io/grpc/opentelemetry/BinaryFormat.java | 143 ++++++++ .../GrpcTraceBinContextPropagator.java | 147 ++++++++ .../io/grpc/opentelemetry/MetadataGetter.java | 87 +++++ .../io/grpc/opentelemetry/MetadataSetter.java | 74 +++++ .../GrpcTraceBinContextPropagatorTest.java | 313 ++++++++++++++++++ .../opentelemetry/MetadataGetterTest.java | 96 ++++++ .../opentelemetry/MetadataSetterTest.java | 83 +++++ 7 files changed, 943 insertions(+) create mode 100644 opentelemetry/src/main/java/io/grpc/opentelemetry/BinaryFormat.java create mode 100644 opentelemetry/src/main/java/io/grpc/opentelemetry/GrpcTraceBinContextPropagator.java create mode 100644 opentelemetry/src/main/java/io/grpc/opentelemetry/MetadataGetter.java create mode 100644 opentelemetry/src/main/java/io/grpc/opentelemetry/MetadataSetter.java create mode 100644 opentelemetry/src/test/java/io/grpc/opentelemetry/GrpcTraceBinContextPropagatorTest.java create mode 100644 opentelemetry/src/test/java/io/grpc/opentelemetry/MetadataGetterTest.java create mode 100644 opentelemetry/src/test/java/io/grpc/opentelemetry/MetadataSetterTest.java diff --git a/opentelemetry/src/main/java/io/grpc/opentelemetry/BinaryFormat.java b/opentelemetry/src/main/java/io/grpc/opentelemetry/BinaryFormat.java new file mode 100644 index 00000000000..cdf27875903 --- /dev/null +++ b/opentelemetry/src/main/java/io/grpc/opentelemetry/BinaryFormat.java @@ -0,0 +1,143 @@ +/* + * Copyright 2024 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.opentelemetry; + + +import static com.google.common.base.Preconditions.checkNotNull; + +import io.grpc.Metadata; +import io.opentelemetry.api.trace.SpanContext; +import io.opentelemetry.api.trace.SpanId; +import io.opentelemetry.api.trace.TraceFlags; +import io.opentelemetry.api.trace.TraceId; +import io.opentelemetry.api.trace.TraceState; +import java.util.Arrays; + +/** + * Binary encoded {@link SpanContext} for context propagation. This is adapted from OpenCensus + * binary format. + * + *

BinaryFormat format: + * + *

    + *
  • Binary value: <version_id><version_format> + *
  • version_id: 1-byte representing the version id. + *
  • For version_id = 0: + *
      + *
    • version_format: <field><field> + *
    • field_format: <field_id><field_format> + *
    • Fields: + *
        + *
      • TraceId: (field_id = 0, len = 16, default = "0000000000000000") - + * 16-byte array representing the trace_id. + *
      • SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array + * representing the span_id. + *
      • TraceFlags: (field_id = 2, len = 1, default = "0") - 1-byte array + * representing the trace_flags. + *
      + *
    • Fields MUST be encoded using the field id order (smaller to higher). + *
    • Valid value example: + *
        + *
      • {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, + * 98, 99, 100, 101, 102, 103, 104, 2, 1} + *
      • version_id = 0; + *
      • trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} + *
      • span_id = {97, 98, 99, 100, 101, 102, 103, 104}; + *
      • trace_flags = {1}; + *
      + *
    + *
+ */ +final class BinaryFormat implements Metadata.BinaryMarshaller { + private static final byte VERSION_ID = 0; + private static final int VERSION_ID_OFFSET = 0; + private static final byte ID_SIZE = 1; + private static final byte TRACE_ID_FIELD_ID = 0; + + private static final int TRACE_ID_FIELD_ID_OFFSET = VERSION_ID_OFFSET + ID_SIZE; + private static final int TRACE_ID_OFFSET = TRACE_ID_FIELD_ID_OFFSET + ID_SIZE; + private static final int TRACE_ID_SIZE = TraceId.getLength() / 2; + + private static final byte SPAN_ID_FIELD_ID = 1; + private static final int SPAN_ID_FIELD_ID_OFFSET = TRACE_ID_OFFSET + TRACE_ID_SIZE; + private static final int SPAN_ID_OFFSET = SPAN_ID_FIELD_ID_OFFSET + ID_SIZE; + private static final int SPAN_ID_SIZE = SpanId.getLength() / 2; + + private static final byte TRACE_FLAG_FIELD_ID = 2; + private static final int TRACE_FLAG_FIELD_ID_OFFSET = SPAN_ID_OFFSET + SPAN_ID_SIZE; + private static final int TRACE_FLAG_OFFSET = TRACE_FLAG_FIELD_ID_OFFSET + ID_SIZE; + private static final int REQUIRED_FORMAT_LENGTH = 3 * ID_SIZE + TRACE_ID_SIZE + SPAN_ID_SIZE; + private static final int TRACE_FLAG_SIZE = TraceFlags.getLength() / 2; + private static final int ALL_FORMAT_LENGTH = REQUIRED_FORMAT_LENGTH + ID_SIZE + TRACE_FLAG_SIZE; + + private static final BinaryFormat INSTANCE = new BinaryFormat(); + + public static BinaryFormat getInstance() { + return INSTANCE; + } + + @Override + public byte[] toBytes(SpanContext spanContext) { + checkNotNull(spanContext, "spanContext"); + byte[] bytes = new byte[ALL_FORMAT_LENGTH]; + bytes[VERSION_ID_OFFSET] = VERSION_ID; + bytes[TRACE_ID_FIELD_ID_OFFSET] = TRACE_ID_FIELD_ID; + System.arraycopy(spanContext.getTraceIdBytes(), 0, bytes, TRACE_ID_OFFSET, TRACE_ID_SIZE); + bytes[SPAN_ID_FIELD_ID_OFFSET] = SPAN_ID_FIELD_ID; + System.arraycopy(spanContext.getSpanIdBytes(), 0, bytes, SPAN_ID_OFFSET, SPAN_ID_SIZE); + bytes[TRACE_FLAG_FIELD_ID_OFFSET] = TRACE_FLAG_FIELD_ID; + bytes[TRACE_FLAG_OFFSET] = spanContext.getTraceFlags().asByte(); + return bytes; + } + + + @Override + public SpanContext parseBytes(byte[] serialized) { + checkNotNull(serialized, "bytes"); + if (serialized.length == 0 || serialized[0] != VERSION_ID) { + throw new IllegalArgumentException("Unsupported version."); + } + if (serialized.length < REQUIRED_FORMAT_LENGTH) { + throw new IllegalArgumentException("Invalid input: truncated"); + } + String traceId; + String spanId; + TraceFlags traceFlags = TraceFlags.getDefault(); + int pos = 1; + if (serialized[pos] == TRACE_ID_FIELD_ID) { + traceId = TraceId.fromBytes( + Arrays.copyOfRange(serialized, pos + ID_SIZE, pos + ID_SIZE + TRACE_ID_SIZE)); + pos += ID_SIZE + TRACE_ID_SIZE; + } else { + throw new IllegalArgumentException("Invalid input: expected trace ID at offset " + pos); + } + if (serialized[pos] == SPAN_ID_FIELD_ID) { + spanId = SpanId.fromBytes( + Arrays.copyOfRange(serialized, pos + ID_SIZE, pos + ID_SIZE + SPAN_ID_SIZE)); + pos += ID_SIZE + SPAN_ID_SIZE; + } else { + throw new IllegalArgumentException("Invalid input: expected span ID at offset " + pos); + } + if (serialized.length > pos && serialized[pos] == TRACE_FLAG_FIELD_ID) { + if (serialized.length < ALL_FORMAT_LENGTH) { + throw new IllegalArgumentException("Invalid input: truncated"); + } + traceFlags = TraceFlags.fromByte(serialized[pos + ID_SIZE]); + } + return SpanContext.create(traceId, spanId, traceFlags, TraceState.getDefault()); + } +} diff --git a/opentelemetry/src/main/java/io/grpc/opentelemetry/GrpcTraceBinContextPropagator.java b/opentelemetry/src/main/java/io/grpc/opentelemetry/GrpcTraceBinContextPropagator.java new file mode 100644 index 00000000000..4825b203529 --- /dev/null +++ b/opentelemetry/src/main/java/io/grpc/opentelemetry/GrpcTraceBinContextPropagator.java @@ -0,0 +1,147 @@ +/* + * Copyright 2024 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.opentelemetry; + + +import static com.google.common.base.Preconditions.checkNotNull; +import static io.grpc.InternalMetadata.BASE64_ENCODING_OMIT_PADDING; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.io.BaseEncoding; +import io.grpc.ExperimentalApi; +import io.grpc.Metadata; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.SpanContext; +import io.opentelemetry.context.Context; +import io.opentelemetry.context.propagation.TextMapGetter; +import io.opentelemetry.context.propagation.TextMapPropagator; +import io.opentelemetry.context.propagation.TextMapSetter; +import java.util.Collection; +import java.util.Collections; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +/** + * A {@link TextMapPropagator} for transmitting "grpc-trace-bin" span context. + * + *

This propagator can transmit the "grpc-trace-bin" context in either binary or Base64-encoded + * text format, depending on the capabilities of the provided {@link TextMapGetter} and + * {@link TextMapSetter}. + * + *

If the {@code TextMapGetter} and {@code TextMapSetter} only support text format, Base64 + * encoding and decoding will be used when communicating with the carrier API. But gRPC uses + * it with gRPC's metadata-based getter/setter, and the propagator can directly transmit the binary + * header, avoiding the need for Base64 encoding. + */ + +@ExperimentalApi("https://github.com/grpc/grpc-java/issues/11400") +public final class GrpcTraceBinContextPropagator implements TextMapPropagator { + private static final Logger log = Logger.getLogger(GrpcTraceBinContextPropagator.class.getName()); + public static final String GRPC_TRACE_BIN_HEADER = "grpc-trace-bin"; + private final Metadata.BinaryMarshaller binaryFormat; + private static final GrpcTraceBinContextPropagator INSTANCE = + new GrpcTraceBinContextPropagator(BinaryFormat.getInstance()); + + public static GrpcTraceBinContextPropagator defaultInstance() { + return INSTANCE; + } + + @VisibleForTesting + GrpcTraceBinContextPropagator(Metadata.BinaryMarshaller binaryFormat) { + this.binaryFormat = checkNotNull(binaryFormat, "binaryFormat"); + } + + @Override + public Collection fields() { + return Collections.singleton(GRPC_TRACE_BIN_HEADER); + } + + @Override + public void inject(Context context, @Nullable C carrier, TextMapSetter setter) { + if (context == null || setter == null) { + return; + } + SpanContext spanContext = Span.fromContext(context).getSpanContext(); + if (!spanContext.isValid()) { + return; + } + try { + byte[] b = binaryFormat.toBytes(spanContext); + if (setter instanceof MetadataSetter) { + ((MetadataSetter) setter).set((Metadata) carrier, GRPC_TRACE_BIN_HEADER, b); + } else { + setter.set(carrier, GRPC_TRACE_BIN_HEADER, BASE64_ENCODING_OMIT_PADDING.encode(b)); + } + } catch (Exception e) { + log.log(Level.FINE, "Set grpc-trace-bin spanContext failed", e); + } + } + + @Override + public Context extract(Context context, @Nullable C carrier, TextMapGetter getter) { + if (context == null) { + return Context.root(); + } + if (getter == null) { + return context; + } + byte[] b; + if (getter instanceof MetadataGetter) { + try { + b = ((MetadataGetter) getter).getBinary((Metadata) carrier, GRPC_TRACE_BIN_HEADER); + if (b == null) { + log.log(Level.FINE, "No grpc-trace-bin present in carrier"); + return context; + } + } catch (Exception e) { + log.log(Level.FINE, "Get 'grpc-trace-bin' from MetadataGetter failed", e); + return context; + } + } else { + String value; + try { + value = getter.get(carrier, GRPC_TRACE_BIN_HEADER); + if (value == null) { + log.log(Level.FINE, "No grpc-trace-bin present in carrier"); + return context; + } + } catch (Exception e) { + log.log(Level.FINE, "Get 'grpc-trace-bin' from getter failed", e); + return context; + } + try { + b = BaseEncoding.base64().decode(value); + } catch (Exception e) { + log.log(Level.FINE, "Base64-decode spanContext bytes failed", e); + return context; + } + } + + SpanContext spanContext; + try { + spanContext = binaryFormat.parseBytes(b); + } catch (Exception e) { + log.log(Level.FINE, "Failed to parse tracing header", e); + return context; + } + if (!spanContext.isValid()) { + return context; + } + return context.with(Span.wrap(spanContext)); + } +} diff --git a/opentelemetry/src/main/java/io/grpc/opentelemetry/MetadataGetter.java b/opentelemetry/src/main/java/io/grpc/opentelemetry/MetadataGetter.java new file mode 100644 index 00000000000..f49c029f2fb --- /dev/null +++ b/opentelemetry/src/main/java/io/grpc/opentelemetry/MetadataGetter.java @@ -0,0 +1,87 @@ +/* + * Copyright 2024 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.opentelemetry; + + +import static io.grpc.InternalMetadata.BASE64_ENCODING_OMIT_PADDING; + +import io.grpc.Metadata; +import io.opentelemetry.context.propagation.TextMapGetter; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +/** + * A TextMapGetter that reads value from gRPC {@link Metadata}. Supports both text and binary + * headers. Supporting binary header is an optimization path for GrpcTraceBinContextPropagator + * to work around the lack of binary propagator API and thus avoid + * base64 (de)encoding when passing data between propagator API interfaces. + */ +final class MetadataGetter implements TextMapGetter { + private static final Logger logger = Logger.getLogger(MetadataGetter.class.getName()); + private static final MetadataGetter INSTANCE = new MetadataGetter(); + + public static MetadataGetter getInstance() { + return INSTANCE; + } + + @Override + public Iterable keys(Metadata carrier) { + return carrier.keys(); + } + + @Nullable + @Override + public String get(@Nullable Metadata carrier, String key) { + if (carrier == null) { + logger.log(Level.FINE, "Carrier is null, getting no data"); + return null; + } + try { + if (key.equals("grpc-trace-bin")) { + byte[] value = carrier.get(Metadata.Key.of(key, Metadata.BINARY_BYTE_MARSHALLER)); + if (value == null) { + return null; + } + return BASE64_ENCODING_OMIT_PADDING.encode(value); + } else { + return carrier.get(Metadata.Key.of(key, Metadata.ASCII_STRING_MARSHALLER)); + } + } catch (Exception e) { + logger.log(Level.FINE, String.format("Failed to get metadata key %s", key), e); + return null; + } + } + + @Nullable + public byte[] getBinary(@Nullable Metadata carrier, String key) { + if (carrier == null) { + logger.log(Level.FINE, "Carrier is null, getting no data"); + return null; + } + if (!key.equals("grpc-trace-bin")) { + logger.log(Level.FINE, "Only support 'grpc-trace-bin' binary header. Get no data"); + return null; + } + try { + return carrier.get(Metadata.Key.of(key, Metadata.BINARY_BYTE_MARSHALLER)); + } catch (Exception e) { + logger.log(Level.FINE, String.format("Failed to get metadata key %s", key), e); + return null; + } + } +} diff --git a/opentelemetry/src/main/java/io/grpc/opentelemetry/MetadataSetter.java b/opentelemetry/src/main/java/io/grpc/opentelemetry/MetadataSetter.java new file mode 100644 index 00000000000..5892c7accfe --- /dev/null +++ b/opentelemetry/src/main/java/io/grpc/opentelemetry/MetadataSetter.java @@ -0,0 +1,74 @@ +/* + * Copyright 2024 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.opentelemetry; + + +import com.google.common.io.BaseEncoding; +import io.grpc.Metadata; +import io.opentelemetry.context.propagation.TextMapSetter; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +/** + * A {@link TextMapSetter} that sets value to gRPC {@link Metadata}. Supports both text and binary + * headers. Supporting binary header is an optimization path for GrpcTraceBinContextPropagator + * to work around the lack of binary propagator API and thus avoid + * base64 (de)encoding when passing data between propagator API interfaces. + */ +final class MetadataSetter implements TextMapSetter { + private static final Logger logger = Logger.getLogger(MetadataSetter.class.getName()); + private static final MetadataSetter INSTANCE = new MetadataSetter(); + + public static MetadataSetter getInstance() { + return INSTANCE; + } + + @Override + public void set(@Nullable Metadata carrier, String key, String value) { + if (carrier == null) { + logger.log(Level.FINE, "Carrier is null, setting no data"); + return; + } + try { + if (key.equals("grpc-trace-bin")) { + carrier.put(Metadata.Key.of(key, Metadata.BINARY_BYTE_MARSHALLER), + BaseEncoding.base64().decode(value)); + } else { + carrier.put(Metadata.Key.of(key, Metadata.ASCII_STRING_MARSHALLER), value); + } + } catch (Exception e) { + logger.log(Level.INFO, String.format("Failed to set metadata, key=%s", key), e); + } + } + + void set(@Nullable Metadata carrier, String key, byte[] value) { + if (carrier == null) { + logger.log(Level.FINE, "Carrier is null, setting no data"); + return; + } + if (!key.equals("grpc-trace-bin")) { + logger.log(Level.INFO, "Only support 'grpc-trace-bin' binary header. Set no data"); + return; + } + try { + carrier.put(Metadata.Key.of(key, Metadata.BINARY_BYTE_MARSHALLER), value); + } catch (Exception e) { + logger.log(Level.INFO, String.format("Failed to set metadata key=%s", key), e); + } + } +} diff --git a/opentelemetry/src/test/java/io/grpc/opentelemetry/GrpcTraceBinContextPropagatorTest.java b/opentelemetry/src/test/java/io/grpc/opentelemetry/GrpcTraceBinContextPropagatorTest.java new file mode 100644 index 00000000000..f85b8067c26 --- /dev/null +++ b/opentelemetry/src/test/java/io/grpc/opentelemetry/GrpcTraceBinContextPropagatorTest.java @@ -0,0 +1,313 @@ +/* + * Copyright 2024 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.opentelemetry; + +import static com.google.common.truth.Truth.assertThat; +import static io.grpc.InternalMetadata.BASE64_ENCODING_OMIT_PADDING; +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.ImmutableMap; +import io.grpc.Metadata; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.SpanContext; +import io.opentelemetry.api.trace.TraceFlags; +import io.opentelemetry.api.trace.TraceState; +import io.opentelemetry.context.Context; +import io.opentelemetry.context.propagation.TextMapGetter; +import io.opentelemetry.context.propagation.TextMapSetter; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import javax.annotation.Nullable; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class GrpcTraceBinContextPropagatorTest { + private static final String TRACE_ID_BASE16 = "e384981d65129fa3e384981d65129fa3"; + private static final String SPAN_ID_BASE16 = "e384981d65129fa3"; + private static final String TRACE_HEADER_SAMPLED = + "0000" + TRACE_ID_BASE16 + "01" + SPAN_ID_BASE16 + "0201"; + private static final String TRACE_HEADER_NOT_SAMPLED = + "0000" + TRACE_ID_BASE16 + "01" + SPAN_ID_BASE16 + "0200"; + private final String goldenHeaderEncodedSampled = encode(TRACE_HEADER_SAMPLED); + private final String goldenHeaderEncodedNotSampled = encode(TRACE_HEADER_NOT_SAMPLED); + private static final TextMapSetter> setter = Map::put; + private static final TextMapGetter> getter = + new TextMapGetter>() { + @Override + public Iterable keys(Map carrier) { + return carrier.keySet(); + } + + @Nullable + @Override + public String get(Map carrier, String key) { + return carrier.get(key); + } + }; + private final GrpcTraceBinContextPropagator grpcTraceBinContextPropagator = + GrpcTraceBinContextPropagator.defaultInstance(); + + private static Context withSpanContext(SpanContext spanContext, Context context) { + return context.with(Span.wrap(spanContext)); + } + + private static SpanContext getSpanContext(Context context) { + return Span.fromContext(context).getSpanContext(); + } + + @Test + public void inject_map_Nothing() { + Map carrier = new HashMap<>(); + grpcTraceBinContextPropagator.inject(Context.current(), carrier, setter); + assertThat(carrier).hasSize(0); + } + + @Test + public void inject_map_invalidSpan() { + Map carrier = new HashMap<>(); + Context context = withSpanContext(SpanContext.getInvalid(), Context.current()); + grpcTraceBinContextPropagator.inject(context, carrier, setter); + assertThat(carrier).isEmpty(); + } + + @Test + public void inject_map_nullCarrier() { + Map carrier = new HashMap<>(); + Context context = + withSpanContext( + SpanContext.create( + TRACE_ID_BASE16, SPAN_ID_BASE16, TraceFlags.getSampled(), TraceState.getDefault()), + Context.current()); + grpcTraceBinContextPropagator.inject(context, null, + (TextMapSetter>) (ignored, key, value) -> carrier.put(key, value)); + assertThat(carrier) + .containsExactly( + GrpcTraceBinContextPropagator.GRPC_TRACE_BIN_HEADER, goldenHeaderEncodedSampled); + } + + @Test + public void inject_map_nullContext() { + Map carrier = new HashMap<>(); + grpcTraceBinContextPropagator.inject(null, carrier, setter); + assertThat(carrier).isEmpty(); + } + + @Test + public void inject_map_invalidBinaryFormat() { + GrpcTraceBinContextPropagator propagator = new GrpcTraceBinContextPropagator( + new Metadata.BinaryMarshaller() { + @Override + public byte[] toBytes(SpanContext value) { + throw new IllegalArgumentException("failed to byte"); + } + + @Override + public SpanContext parseBytes(byte[] serialized) { + return null; + } + }); + Map carrier = new HashMap<>(); + Context context = + withSpanContext( + SpanContext.create( + TRACE_ID_BASE16, SPAN_ID_BASE16, TraceFlags.getSampled(), TraceState.getDefault()), + Context.current()); + propagator.inject(context, carrier, setter); + assertThat(carrier).hasSize(0); + } + + @Test + public void inject_map_SampledContext() { + verify_inject_map(TraceFlags.getSampled(), goldenHeaderEncodedSampled); + } + + @Test + public void inject_map_NotSampledContext() { + verify_inject_map(TraceFlags.getDefault(), goldenHeaderEncodedNotSampled); + } + + private void verify_inject_map(TraceFlags traceFlags, String goldenHeader) { + Map carrier = new HashMap<>(); + Context context = + withSpanContext( + SpanContext.create( + TRACE_ID_BASE16, SPAN_ID_BASE16, traceFlags, TraceState.getDefault()), + Context.current()); + grpcTraceBinContextPropagator.inject(context, carrier, setter); + assertThat(carrier) + .containsExactly( + GrpcTraceBinContextPropagator.GRPC_TRACE_BIN_HEADER, goldenHeader); + } + + @Test + public void extract_map_nothing() { + Map carrier = new HashMap<>(); + assertThat(grpcTraceBinContextPropagator.extract(Context.current(), carrier, getter)) + .isSameInstanceAs(Context.current()); + } + + @Test + public void extract_map_SampledContext() { + verify_extract_map(TraceFlags.getSampled(), goldenHeaderEncodedSampled); + } + + @Test + public void extract_map_NotSampledContext() { + verify_extract_map(TraceFlags.getDefault(), goldenHeaderEncodedNotSampled); + } + + private void verify_extract_map(TraceFlags traceFlags, String goldenHeader) { + Map carrier = ImmutableMap.of( + GrpcTraceBinContextPropagator.GRPC_TRACE_BIN_HEADER, goldenHeader); + Context result = grpcTraceBinContextPropagator.extract(Context.current(), carrier, getter); + assertThat(getSpanContext(result)).isEqualTo(SpanContext.create( + TRACE_ID_BASE16, SPAN_ID_BASE16, traceFlags, TraceState.getDefault())); + } + + @Test + public void inject_metadata_Nothing() { + Metadata carrier = new Metadata(); + grpcTraceBinContextPropagator.inject(Context.current(), carrier, MetadataSetter.getInstance()); + assertThat(carrier.keys()).isEmpty(); + } + + @Test + public void inject_metadata_nullCarrier() { + Context context = + withSpanContext( + SpanContext.create( + TRACE_ID_BASE16, SPAN_ID_BASE16, TraceFlags.getSampled(), TraceState.getDefault()), + Context.current()); + grpcTraceBinContextPropagator.inject(context, null, MetadataSetter.getInstance()); + } + + @Test + public void inject_metadata_invalidSpan() { + Metadata carrier = new Metadata(); + Context context = withSpanContext(SpanContext.getInvalid(), Context.current()); + grpcTraceBinContextPropagator.inject(context, carrier, MetadataSetter.getInstance()); + assertThat(carrier.keys()).isEmpty(); + } + + @Test + public void inject_metadata_SampledContext() { + verify_inject_metadata(TraceFlags.getSampled(), hexStringToByteArray(TRACE_HEADER_SAMPLED)); + } + + @Test + public void inject_metadataSetter_NotSampledContext() { + verify_inject_metadata(TraceFlags.getDefault(), hexStringToByteArray(TRACE_HEADER_NOT_SAMPLED)); + } + + private void verify_inject_metadata(TraceFlags traceFlags, byte[] bytes) { + Metadata metadata = new Metadata(); + Context context = + withSpanContext( + SpanContext.create( + TRACE_ID_BASE16, SPAN_ID_BASE16, traceFlags, TraceState.getDefault()), + Context.current()); + grpcTraceBinContextPropagator.inject(context, metadata, MetadataSetter.getInstance()); + byte[] injected = metadata.get(Metadata.Key.of( + GrpcTraceBinContextPropagator.GRPC_TRACE_BIN_HEADER, Metadata.BINARY_BYTE_MARSHALLER)); + assertTrue(Arrays.equals(injected, bytes)); + } + + @Test + public void extract_metadata_nothing() { + assertThat(grpcTraceBinContextPropagator.extract( + Context.current(), new Metadata(), MetadataGetter.getInstance())) + .isSameInstanceAs(Context.current()); + } + + @Test + public void extract_metadata_nullCarrier() { + assertThat(grpcTraceBinContextPropagator.extract( + Context.current(), null, MetadataGetter.getInstance())) + .isSameInstanceAs(Context.current()); + } + + @Test + public void extract_metadata_SampledContext() { + verify_extract_metadata(TraceFlags.getSampled(), TRACE_HEADER_SAMPLED); + } + + @Test + public void extract_metadataGetter_NotSampledContext() { + verify_extract_metadata(TraceFlags.getDefault(), TRACE_HEADER_NOT_SAMPLED); + } + + private void verify_extract_metadata(TraceFlags traceFlags, String hex) { + Metadata carrier = new Metadata(); + carrier.put(Metadata.Key.of( + GrpcTraceBinContextPropagator.GRPC_TRACE_BIN_HEADER, Metadata.BINARY_BYTE_MARSHALLER), + hexStringToByteArray(hex)); + Context result = grpcTraceBinContextPropagator.extract(Context.current(), carrier, + MetadataGetter.getInstance()); + assertThat(getSpanContext(result)).isEqualTo(SpanContext.create( + TRACE_ID_BASE16, SPAN_ID_BASE16, traceFlags, TraceState.getDefault())); + } + + @Test + public void extract_metadata_invalidBinaryFormat() { + GrpcTraceBinContextPropagator propagator = new GrpcTraceBinContextPropagator( + new Metadata.BinaryMarshaller() { + @Override + public byte[] toBytes(SpanContext value) { + return new byte[0]; + } + + @Override + public SpanContext parseBytes(byte[] serialized) { + throw new IllegalArgumentException("failed to byte"); + } + }); + Metadata carrier = new Metadata(); + carrier.put(Metadata.Key.of( + GrpcTraceBinContextPropagator.GRPC_TRACE_BIN_HEADER, Metadata.BINARY_BYTE_MARSHALLER), + hexStringToByteArray(TRACE_HEADER_SAMPLED)); + assertThat(propagator.extract(Context.current(), carrier, MetadataGetter.getInstance())) + .isSameInstanceAs(Context.current()); + } + + @Test + public void extract_metadata_invalidBinaryFormatVersion() { + Metadata carrier = new Metadata(); + carrier.put(Metadata.Key.of( + GrpcTraceBinContextPropagator.GRPC_TRACE_BIN_HEADER, Metadata.BINARY_BYTE_MARSHALLER), + hexStringToByteArray("0100" + TRACE_ID_BASE16 + "01" + SPAN_ID_BASE16 + "0201")); + assertThat(grpcTraceBinContextPropagator.extract( + Context.current(), carrier, MetadataGetter.getInstance())) + .isSameInstanceAs(Context.current()); + } + + private static String encode(String hex) { + return BASE64_ENCODING_OMIT_PADDING.encode(hexStringToByteArray(hex)); + } + + private static byte[] hexStringToByteArray(String s) { + int len = s.length(); + byte[] data = new byte[len / 2]; + for (int i = 0; i < len; i += 2) { + data[i / 2] = (byte) ((Character.digit(s.charAt(i), 16) << 4) + + Character.digit(s.charAt(i + 1), 16)); + } + return data; + } +} diff --git a/opentelemetry/src/test/java/io/grpc/opentelemetry/MetadataGetterTest.java b/opentelemetry/src/test/java/io/grpc/opentelemetry/MetadataGetterTest.java new file mode 100644 index 00000000000..5934240e5c2 --- /dev/null +++ b/opentelemetry/src/test/java/io/grpc/opentelemetry/MetadataGetterTest.java @@ -0,0 +1,96 @@ +/* + * Copyright 2024 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.opentelemetry; + +import static io.grpc.InternalMetadata.BASE64_ENCODING_OMIT_PADDING; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import io.grpc.Metadata; +import java.nio.charset.Charset; +import java.util.Iterator; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class MetadataGetterTest { + private final MetadataGetter metadataGetter = MetadataGetter.getInstance(); + + @Test + public void getBinaryGrpcTraceBin() { + Metadata metadata = new Metadata(); + byte[] b = "generated".getBytes(Charset.defaultCharset()); + Metadata.Key grpc_trace_bin_key = + Metadata.Key.of("grpc-trace-bin", Metadata.BINARY_BYTE_MARSHALLER); + metadata.put(grpc_trace_bin_key, b); + assertArrayEquals(b, metadataGetter.getBinary(metadata, "grpc-trace-bin")); + } + + @Test + public void getBinaryEmptyMetadata() { + assertNull(metadataGetter.getBinary(new Metadata(), "grpc-trace-bin")); + } + + @Test + public void getBinaryNotGrpcTraceBin() { + Metadata metadata = new Metadata(); + byte[] b = "generated".getBytes(Charset.defaultCharset()); + Metadata.Key grpc_trace_bin_key = + Metadata.Key.of("another-bin", Metadata.BINARY_BYTE_MARSHALLER); + metadata.put(grpc_trace_bin_key, b); + assertNull(metadataGetter.getBinary(metadata, "another-bin")); + } + + @Test + public void getTextEmptyMetadata() { + assertNull(metadataGetter.get(new Metadata(), "a-key")); + } + + @Test + public void getTextBinHeader() { + assertNull(metadataGetter.get(new Metadata(), "a-key-bin")); + } + + @Test + public void getTestGrpcTraceBin() { + Metadata metadata = new Metadata(); + byte[] b = "generated".getBytes(Charset.defaultCharset()); + Metadata.Key grpc_trace_bin_key = + Metadata.Key.of("grpc-trace-bin", Metadata.BINARY_BYTE_MARSHALLER); + metadata.put(grpc_trace_bin_key, b); + assertEquals(BASE64_ENCODING_OMIT_PADDING.encode(b), + metadataGetter.get(metadata, "grpc-trace-bin")); + } + + @Test + public void getText() { + Metadata metadata = new Metadata(); + Metadata.Key other_key = + Metadata.Key.of("other", Metadata.ASCII_STRING_MARSHALLER); + metadata.put(other_key, "header-value"); + assertEquals("header-value", metadataGetter.get(metadata, "other")); + + Iterator iterator = metadataGetter.keys(metadata).iterator(); + assertTrue(iterator.hasNext()); + assertEquals("other", iterator.next()); + assertFalse(iterator.hasNext()); + } +} diff --git a/opentelemetry/src/test/java/io/grpc/opentelemetry/MetadataSetterTest.java b/opentelemetry/src/test/java/io/grpc/opentelemetry/MetadataSetterTest.java new file mode 100644 index 00000000000..fcd85480bb9 --- /dev/null +++ b/opentelemetry/src/test/java/io/grpc/opentelemetry/MetadataSetterTest.java @@ -0,0 +1,83 @@ +/* + * Copyright 2024 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.opentelemetry; + +import static io.grpc.InternalMetadata.BASE64_ENCODING_OMIT_PADDING; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import io.grpc.Metadata; +import java.nio.charset.Charset; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class MetadataSetterTest { + private final MetadataSetter metadataSetter = MetadataSetter.getInstance(); + + @Test + public void setGrpcTraceBin() { + Metadata metadata = new Metadata(); + byte[] b = "generated".getBytes(Charset.defaultCharset()); + Metadata.Key grpc_trace_bin_key = + Metadata.Key.of("grpc-trace-bin", Metadata.BINARY_BYTE_MARSHALLER); + metadataSetter.set(metadata, "grpc-trace-bin", b); + assertArrayEquals(b, metadata.get(grpc_trace_bin_key)); + } + + @Test + public void setOtherBinaryKey() { + Metadata metadata = new Metadata(); + byte[] b = "generated".getBytes(Charset.defaultCharset()); + Metadata.Key other_key = + Metadata.Key.of("for-test-bin", Metadata.BINARY_BYTE_MARSHALLER); + metadataSetter.set(metadata, other_key.name(), b); + assertNull(metadata.get(other_key)); + } + + @Test + public void setText() { + Metadata metadata = new Metadata(); + String v = "generated"; + Metadata.Key textKey = + Metadata.Key.of("text-key", Metadata.ASCII_STRING_MARSHALLER); + metadataSetter.set(metadata, textKey.name(), v); + assertEquals(metadata.get(textKey), v); + } + + @Test + public void setTextBin() { + Metadata metadata = new Metadata(); + Metadata.Key other_key = + Metadata.Key.of("for-test-bin", Metadata.BINARY_BYTE_MARSHALLER); + metadataSetter.set(metadata, other_key.name(), "generated"); + assertNull(metadata.get(other_key)); + } + + @Test + public void setTextGrpcTraceBin() { + Metadata metadata = new Metadata(); + byte[] b = "generated".getBytes(Charset.defaultCharset()); + metadataSetter.set(metadata, "grpc-trace-bin", BASE64_ENCODING_OMIT_PADDING.encode(b)); + + Metadata.Key grpc_trace_bin_key = + Metadata.Key.of("grpc-trace-bin", Metadata.BINARY_BYTE_MARSHALLER); + assertArrayEquals(metadata.get(grpc_trace_bin_key), b); + } +} From 0d2ad890164dd88a5e8819354e3fa6243211002c Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Thu, 8 Aug 2024 07:35:01 -0700 Subject: [PATCH 41/53] xds: Remove useless ExperimentalApi for WRR A package-private class isn't visible and `@Internal` is stronger than experimental. The only way users should use WRR is via the weight_round_robin string, and that's already not suffixed with _experimental. Closes #9885 --- .../main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java | 2 -- .../io/grpc/xds/WeightedRoundRobinLoadBalancerProvider.java | 2 -- 2 files changed, 4 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java index abcb0941fd9..f45bb571a36 100644 --- a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java @@ -29,7 +29,6 @@ import io.grpc.Deadline.Ticker; import io.grpc.DoubleHistogramMetricInstrument; import io.grpc.EquivalentAddressGroup; -import io.grpc.ExperimentalApi; import io.grpc.LoadBalancer; import io.grpc.LoadBalancerProvider; import io.grpc.LongCounterMetricInstrument; @@ -87,7 +86,6 @@ * * See related documentation: https://cloud.google.com/service-mesh/legacy/load-balancing-apis/proxyless-configure-advanced-traffic-management#custom-lb-config */ -@ExperimentalApi("https://github.com/grpc/grpc-java/issues/9885") final class WeightedRoundRobinLoadBalancer extends MultiChildLoadBalancer { private static final LongCounterMetricInstrument RR_FALLBACK_COUNTER; diff --git a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancerProvider.java b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancerProvider.java index 161e7c4ed0c..433ea34b857 100644 --- a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancerProvider.java +++ b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancerProvider.java @@ -18,7 +18,6 @@ import com.google.common.annotations.VisibleForTesting; import io.grpc.Deadline; -import io.grpc.ExperimentalApi; import io.grpc.Internal; import io.grpc.LoadBalancer; import io.grpc.LoadBalancer.Helper; @@ -32,7 +31,6 @@ /** * Provides a {@link WeightedRoundRobinLoadBalancer}. * */ -@ExperimentalApi("https://github.com/grpc/grpc-java/issues/9885") @Internal public final class WeightedRoundRobinLoadBalancerProvider extends LoadBalancerProvider { From 0d47f5bd1baff87d412223c4ac22ea061eafb506 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Mon, 12 Aug 2024 11:23:37 -0700 Subject: [PATCH 42/53] xds: WRRPicker must not access unsynchronized data in ChildLbState There was no point to using subchannels as keys to subchannelToReportListenerMap, as the listener is per-child. That meant the keys would be guaranteed to be known ahead-of-time and the unsynchronized getOrCreateOrcaListener() during picking was unnecessary. The picker still stores ChildLbStates to make sure that updating weights uses the correct children, but the picker itself no longer references ChildLbStates except in the constructor. That means weight calculation is moved into the LB policy, as child.getWeight() is unsynchronized, and the picker no longer needs a reference to helper. --- .../xds/WeightedRoundRobinLoadBalancer.java | 132 +++++++++--------- .../WeightedRoundRobinLoadBalancerTest.java | 2 +- 2 files changed, 67 insertions(+), 67 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java index f45bb571a36..e4502da8740 100644 --- a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java @@ -44,11 +44,10 @@ import io.grpc.xds.orca.OrcaOobUtil.OrcaOobReportListener; import io.grpc.xds.orca.OrcaPerRequestUtil; import io.grpc.xds.orca.OrcaPerRequestUtil.OrcaPerRequestReportListener; +import java.util.ArrayList; import java.util.Collection; -import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Random; import java.util.Set; import java.util.concurrent.ScheduledExecutorService; @@ -233,9 +232,44 @@ protected void updateOverallBalancingState() { } private SubchannelPicker createReadyPicker(Collection activeList) { - return new WeightedRoundRobinPicker(ImmutableList.copyOf(activeList), - config.enableOobLoadReport, config.errorUtilizationPenalty, sequence, getHelper(), - locality); + WeightedRoundRobinPicker picker = new WeightedRoundRobinPicker(ImmutableList.copyOf(activeList), + config.enableOobLoadReport, config.errorUtilizationPenalty, sequence); + updateWeight(picker); + return picker; + } + + private void updateWeight(WeightedRoundRobinPicker picker) { + Helper helper = getHelper(); + float[] newWeights = new float[picker.children.size()]; + AtomicInteger staleEndpoints = new AtomicInteger(); + AtomicInteger notYetUsableEndpoints = new AtomicInteger(); + for (int i = 0; i < picker.children.size(); i++) { + double newWeight = ((WeightedChildLbState) picker.children.get(i)).getWeight(staleEndpoints, + notYetUsableEndpoints); + helper.getMetricRecorder() + .recordDoubleHistogram(ENDPOINT_WEIGHTS_HISTOGRAM, newWeight, + ImmutableList.of(helper.getChannelTarget()), + ImmutableList.of(locality)); + newWeights[i] = newWeight > 0 ? (float) newWeight : 0.0f; + } + + if (staleEndpoints.get() > 0) { + helper.getMetricRecorder() + .addLongCounter(ENDPOINT_WEIGHT_STALE_COUNTER, staleEndpoints.get(), + ImmutableList.of(helper.getChannelTarget()), + ImmutableList.of(locality)); + } + if (notYetUsableEndpoints.get() > 0) { + helper.getMetricRecorder() + .addLongCounter(ENDPOINT_WEIGHT_NOT_YET_USEABLE_COUNTER, notYetUsableEndpoints.get(), + ImmutableList.of(helper.getChannelTarget()), ImmutableList.of(locality)); + } + boolean weightsEffective = picker.updateWeight(newWeights); + if (!weightsEffective) { + helper.getMetricRecorder() + .addLongCounter(RR_FALLBACK_COUNTER, 1, ImmutableList.of(helper.getChannelTarget()), + ImmutableList.of(locality)); + } } private void updateBalancingState(ConnectivityState state, SubchannelPicker picker) { @@ -345,7 +379,7 @@ private final class UpdateWeightTask implements Runnable { @Override public void run() { if (currentPicker != null && currentPicker instanceof WeightedRoundRobinPicker) { - ((WeightedRoundRobinPicker) currentPicker).updateWeight(); + updateWeight((WeightedRoundRobinPicker) currentPicker); } weightUpdateTimer = syncContext.schedule(this, config.weightUpdatePeriodNanos, TimeUnit.NANOSECONDS, timeService); @@ -415,53 +449,50 @@ public void shutdown() { @VisibleForTesting static final class WeightedRoundRobinPicker extends SubchannelPicker { - private final List children; - private final Map subchannelToReportListenerMap = - new HashMap<>(); + // Parallel lists (column-based storage instead of normal row-based storage of List). + // The ith element of children corresponds to the ith element of pickers, listeners, and even + // updateWeight(float[]). + private final List children; // May only be accessed from sync context + private final List pickers; + private final List reportListeners; private final boolean enableOobLoadReport; private final float errorUtilizationPenalty; private final AtomicInteger sequence; private final int hashCode; - private final LoadBalancer.Helper helper; - private final String locality; private volatile StaticStrideScheduler scheduler; WeightedRoundRobinPicker(List children, boolean enableOobLoadReport, - float errorUtilizationPenalty, AtomicInteger sequence, LoadBalancer.Helper helper, - String locality) { + float errorUtilizationPenalty, AtomicInteger sequence) { checkNotNull(children, "children"); Preconditions.checkArgument(!children.isEmpty(), "empty child list"); this.children = children; + List pickers = new ArrayList<>(children.size()); + List reportListeners = new ArrayList<>(children.size()); for (ChildLbState child : children) { WeightedChildLbState wChild = (WeightedChildLbState) child; - for (WrrSubchannel subchannel : wChild.subchannels) { - this.subchannelToReportListenerMap - .put(subchannel, wChild.getOrCreateOrcaListener(errorUtilizationPenalty)); - } + pickers.add(wChild.getCurrentPicker()); + reportListeners.add(wChild.getOrCreateOrcaListener(errorUtilizationPenalty)); } + this.pickers = pickers; + this.reportListeners = reportListeners; this.enableOobLoadReport = enableOobLoadReport; this.errorUtilizationPenalty = errorUtilizationPenalty; this.sequence = checkNotNull(sequence, "sequence"); - this.helper = helper; - this.locality = checkNotNull(locality, "locality"); - // For equality we treat children as a set; use hash code as defined by Set + // For equality we treat pickers as a set; use hash code as defined by Set int sum = 0; - for (ChildLbState child : children) { - sum += child.hashCode(); + for (SubchannelPicker picker : pickers) { + sum += picker.hashCode(); } this.hashCode = sum ^ Boolean.hashCode(enableOobLoadReport) ^ Float.hashCode(errorUtilizationPenalty); - - updateWeight(); } @Override public PickResult pickSubchannel(PickSubchannelArgs args) { - ChildLbState childLbState = children.get(scheduler.pick()); - WeightedChildLbState wChild = (WeightedChildLbState) childLbState; - PickResult pickResult = childLbState.getCurrentPicker().pickSubchannel(args); + int pick = scheduler.pick(); + PickResult pickResult = pickers.get(pick).pickSubchannel(args); Subchannel subchannel = pickResult.getSubchannel(); if (subchannel == null) { return pickResult; @@ -469,48 +500,16 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { if (!enableOobLoadReport) { return PickResult.withSubchannel(subchannel, OrcaPerRequestUtil.getInstance().newOrcaClientStreamTracerFactory( - subchannelToReportListenerMap.getOrDefault(subchannel, - wChild.getOrCreateOrcaListener(errorUtilizationPenalty)))); + reportListeners.get(pick))); } else { return PickResult.withSubchannel(subchannel); } } - private void updateWeight() { - float[] newWeights = new float[children.size()]; - AtomicInteger staleEndpoints = new AtomicInteger(); - AtomicInteger notYetUsableEndpoints = new AtomicInteger(); - for (int i = 0; i < children.size(); i++) { - double newWeight = ((WeightedChildLbState) children.get(i)).getWeight(staleEndpoints, - notYetUsableEndpoints); - // TODO: add locality label once available - helper.getMetricRecorder() - .recordDoubleHistogram(ENDPOINT_WEIGHTS_HISTOGRAM, newWeight, - ImmutableList.of(helper.getChannelTarget()), - ImmutableList.of(locality)); - newWeights[i] = newWeight > 0 ? (float) newWeight : 0.0f; - } - if (staleEndpoints.get() > 0) { - // TODO: add locality label once available - helper.getMetricRecorder() - .addLongCounter(ENDPOINT_WEIGHT_STALE_COUNTER, staleEndpoints.get(), - ImmutableList.of(helper.getChannelTarget()), - ImmutableList.of(locality)); - } - if (notYetUsableEndpoints.get() > 0) { - // TODO: add locality label once available - helper.getMetricRecorder() - .addLongCounter(ENDPOINT_WEIGHT_NOT_YET_USEABLE_COUNTER, notYetUsableEndpoints.get(), - ImmutableList.of(helper.getChannelTarget()), ImmutableList.of(locality)); - } - + /** Returns {@code true} if weights are different than round_robin. */ + private boolean updateWeight(float[] newWeights) { this.scheduler = new StaticStrideScheduler(newWeights, sequence); - if (this.scheduler.usesRoundRobin()) { - // TODO: locality label once available - helper.getMetricRecorder() - .addLongCounter(RR_FALLBACK_COUNTER, 1, ImmutableList.of(helper.getChannelTarget()), - ImmutableList.of(locality)); - } + return !this.scheduler.usesRoundRobin(); } @Override @@ -518,7 +517,8 @@ public String toString() { return MoreObjects.toStringHelper(WeightedRoundRobinPicker.class) .add("enableOobLoadReport", enableOobLoadReport) .add("errorUtilizationPenalty", errorUtilizationPenalty) - .add("list", children).toString(); + .add("pickers", pickers) + .toString(); } @VisibleForTesting @@ -545,8 +545,8 @@ public boolean equals(Object o) { && sequence == other.sequence && enableOobLoadReport == other.enableOobLoadReport && Float.compare(errorUtilizationPenalty, other.errorUtilizationPenalty) == 0 - && children.size() == other.children.size() - && new HashSet<>(children).containsAll(other.children); + && pickers.size() == other.pickers.size() + && new HashSet<>(pickers).containsAll(other.pickers); } } diff --git a/xds/src/test/java/io/grpc/xds/WeightedRoundRobinLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/WeightedRoundRobinLoadBalancerTest.java index dd98f1e1ae6..05ad1f56ece 100644 --- a/xds/src/test/java/io/grpc/xds/WeightedRoundRobinLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/WeightedRoundRobinLoadBalancerTest.java @@ -244,7 +244,7 @@ public void wrrLifeCycle() { String weightedPickerStr = weightedPicker.toString(); assertThat(weightedPickerStr).contains("enableOobLoadReport=false"); assertThat(weightedPickerStr).contains("errorUtilizationPenalty=1.0"); - assertThat(weightedPickerStr).contains("list="); + assertThat(weightedPickerStr).contains("pickers="); WeightedChildLbState weightedChild1 = (WeightedChildLbState) getChild(weightedPicker, 0); WeightedChildLbState weightedChild2 = (WeightedChildLbState) getChild(weightedPicker, 1); From 4ab34229fb32f00a2146302e16e226080d13a4aa Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Fri, 9 Aug 2024 16:21:19 -0700 Subject: [PATCH 43/53] netty: Use DefaultELG with LocalChannel in test LocalChannel is not guaranteed to be compatible with NioEventLoopGroup, and is failing with Netty 4.2.0.Alpha3-SNAPSHOT. See #11447 --- .../grpc/netty/NettyClientTransportTest.java | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/netty/src/test/java/io/grpc/netty/NettyClientTransportTest.java b/netty/src/test/java/io/grpc/netty/NettyClientTransportTest.java index f94960cbab3..b40cd9d5607 100644 --- a/netty/src/test/java/io/grpc/netty/NettyClientTransportTest.java +++ b/netty/src/test/java/io/grpc/netty/NettyClientTransportTest.java @@ -82,6 +82,7 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelOption; import io.netty.channel.ChannelPromise; +import io.netty.channel.DefaultEventLoopGroup; import io.netty.channel.EventLoopGroup; import io.netty.channel.ReflectiveChannelFactory; import io.netty.channel.local.LocalChannel; @@ -519,15 +520,20 @@ public void channelFactoryShouldSetSocketOptionKeepAlive() throws Exception { @Test public void channelFactoryShouldNNotSetSocketOptionKeepAlive() throws Exception { startServer(); - NettyClientTransport transport = newTransport(newNegotiator(), - DEFAULT_MAX_MESSAGE_SIZE, GrpcUtil.DEFAULT_MAX_HEADER_LIST_SIZE, "testUserAgent", true, - TimeUnit.SECONDS.toNanos(10L), TimeUnit.SECONDS.toNanos(1L), - new ReflectiveChannelFactory<>(LocalChannel.class), group); + DefaultEventLoopGroup group = new DefaultEventLoopGroup(1); + try { + NettyClientTransport transport = newTransport(newNegotiator(), + DEFAULT_MAX_MESSAGE_SIZE, GrpcUtil.DEFAULT_MAX_HEADER_LIST_SIZE, "testUserAgent", true, + TimeUnit.SECONDS.toNanos(10L), TimeUnit.SECONDS.toNanos(1L), + new ReflectiveChannelFactory<>(LocalChannel.class), group); - callMeMaybe(transport.start(clientTransportListener)); + callMeMaybe(transport.start(clientTransportListener)); - assertThat(transport.channel().config().getOption(ChannelOption.SO_KEEPALIVE)) - .isNull(); + assertThat(transport.channel().config().getOption(ChannelOption.SO_KEEPALIVE)) + .isNull(); + } finally { + group.shutdownGracefully(0, 10, TimeUnit.SECONDS); + } } @Test From a6f8ebf33dd34e202dcd253f404606f161ddb741 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Thu, 25 Jul 2024 15:25:42 -0700 Subject: [PATCH 44/53] Remove implicit requestConnection() on IDLE from MultiChildLB One LB no longer needs to extend ChildLbState and one has to start, so it is a bit of a wash. There are more LBs that need the auto-request logic, but if we have an API where subclasses override it without calling super then we can't change the implementation in the future. Adding behavior on top of a base class allows subclasses to call super, which lets the base class change over time. --- .../io/grpc/util/MultiChildLoadBalancer.java | 5 -- .../io/grpc/util/RoundRobinLoadBalancer.java | 19 +++++++ .../io/grpc/xds/LeastRequestLoadBalancer.java | 13 +++++ .../io/grpc/xds/RingHashLoadBalancer.java | 49 +++---------------- .../xds/WeightedRoundRobinLoadBalancer.java | 8 +++ .../io/grpc/xds/RingHashLoadBalancerTest.java | 6 +-- 6 files changed, 48 insertions(+), 52 deletions(-) diff --git a/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java b/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java index f2e2cc617ee..951721adced 100644 --- a/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java +++ b/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java @@ -456,8 +456,6 @@ protected class ChildLbStateHelper extends ForwardingLoadBalancerHelper { /** * Update current state and picker for this child and then use * {@link #updateOverallBalancingState()} for the parent LB. - * - *

Override this if you don't want to automatically request a connection when in IDLE */ @Override public void updateBalancingState(final ConnectivityState newState, @@ -471,9 +469,6 @@ public void updateBalancingState(final ConnectivityState newState, // If we are already in the process of resolving addresses, the overall balancing state // will be updated at the end of it, and we don't need to trigger that update here. if (!resolvingAddresses) { - if (newState == IDLE) { - lb.requestConnection(); - } updateOverallBalancingState(); } } diff --git a/util/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java b/util/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java index 7c235bb3640..765e2a4d4b6 100644 --- a/util/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java +++ b/util/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java @@ -95,6 +95,25 @@ private SubchannelPicker createReadyPicker(Collection children) { return new ReadyPicker(pickerList, sequence); } + @Override + protected ChildLbState createChildLbState(Object key, Object policyConfig, + SubchannelPicker initialPicker, ResolvedAddresses resolvedAddresses) { + return new ChildLbState(key, pickFirstLbProvider, policyConfig, initialPicker) { + @Override + protected ChildLbStateHelper createChildHelper() { + return new ChildLbStateHelper() { + @Override + public void updateBalancingState(ConnectivityState newState, SubchannelPicker newPicker) { + super.updateBalancingState(newState, newPicker); + if (!resolvingAddresses && newState == IDLE) { + getLb().requestConnection(); + } + } + }; + } + }; + } + @VisibleForTesting static class ReadyPicker extends SubchannelPicker { private final List subchannelPickers; // non-empty diff --git a/xds/src/main/java/io/grpc/xds/LeastRequestLoadBalancer.java b/xds/src/main/java/io/grpc/xds/LeastRequestLoadBalancer.java index f96c171ee9c..a11622d492f 100644 --- a/xds/src/main/java/io/grpc/xds/LeastRequestLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/LeastRequestLoadBalancer.java @@ -328,5 +328,18 @@ public LeastRequestLbState(Object key, LoadBalancerProvider policyProvider, int getActiveRequests() { return activeRequests.get(); } + + @Override + protected ChildLbStateHelper createChildHelper() { + return new ChildLbStateHelper() { + @Override + public void updateBalancingState(ConnectivityState newState, SubchannelPicker newPicker) { + super.updateBalancingState(newState, newPicker); + if (!resolvingAddresses && newState == IDLE) { + getLb().requestConnection(); + } + } + }; + } } } diff --git a/xds/src/main/java/io/grpc/xds/RingHashLoadBalancer.java b/xds/src/main/java/io/grpc/xds/RingHashLoadBalancer.java index 3b7e451f2a5..72618b7bbaa 100644 --- a/xds/src/main/java/io/grpc/xds/RingHashLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/RingHashLoadBalancer.java @@ -229,7 +229,7 @@ protected void updateOverallBalancingState() { @Override protected ChildLbState createChildLbState(Object key, Object policyConfig, SubchannelPicker initialPicker, ResolvedAddresses resolvedAddresses) { - return new RingHashChildLbState((Endpoint)key); + return new ChildLbState(key, lazyLbFactory, null, EMPTY_PICKER); } private Status validateAddrList(List addrList) { @@ -358,7 +358,7 @@ private RingHashPicker( this.ring = ring; pickableSubchannels = new HashMap<>(subchannels.size()); for (Map.Entry entry : subchannels.entrySet()) { - RingHashChildLbState childLbState = (RingHashChildLbState) entry.getValue(); + ChildLbState childLbState = entry.getValue(); pickableSubchannels.put((Endpoint)entry.getKey(), new SubchannelView(childLbState, childLbState.getCurrentState())); } @@ -405,7 +405,7 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { for (int i = 0; i < ring.size(); i++) { int index = (targetIndex + i) % ring.size(); SubchannelView subchannelView = pickableSubchannels.get(ring.get(index).addrKey); - RingHashChildLbState childLbState = subchannelView.childLbState; + ChildLbState childLbState = subchannelView.childLbState; if (subchannelView.connectivityState == READY) { return childLbState.getCurrentPicker().pickSubchannel(args); @@ -427,7 +427,7 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { } // return the pick from the original subchannel hit by hash, which is probably an error - RingHashChildLbState originalSubchannel = + ChildLbState originalSubchannel = pickableSubchannels.get(ring.get(targetIndex).addrKey).childLbState; return originalSubchannel.getCurrentPicker().pickSubchannel(args); } @@ -439,10 +439,10 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { * state changes. */ private static final class SubchannelView { - private final RingHashChildLbState childLbState; + private final ChildLbState childLbState; private final ConnectivityState connectivityState; - private SubchannelView(RingHashChildLbState childLbState, ConnectivityState state) { + private SubchannelView(ChildLbState childLbState, ConnectivityState state) { this.childLbState = childLbState; this.connectivityState = state; } @@ -487,41 +487,4 @@ public String toString() { .toString(); } } - - class RingHashChildLbState extends MultiChildLoadBalancer.ChildLbState { - - public RingHashChildLbState(Endpoint key) { - super(key, lazyLbFactory, null, EMPTY_PICKER); - } - - @Override - protected ChildLbStateHelper createChildHelper() { - return new RingHashChildHelper(); - } - - // Need to expose this to the LB class - @Override - protected void shutdown() { - super.shutdown(); - } - - private class RingHashChildHelper extends ChildLbStateHelper { - @Override - public void updateBalancingState(final ConnectivityState newState, - final SubchannelPicker newPicker) { - setCurrentState(newState); - setCurrentPicker(newPicker); - - if (getChildLbState(getKey()) == null) { - return; - } - - // If we are already in the process of resolving addresses, the overall balancing state - // will be updated at the end of it, and we don't need to trigger that update here. - if (!resolvingAddresses) { - updateOverallBalancingState(); - } - } - } - } } diff --git a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java index e4502da8740..388eca2579d 100644 --- a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java @@ -340,6 +340,14 @@ final class WrrChildLbStateHelper extends ChildLbStateHelper { public Subchannel createSubchannel(CreateSubchannelArgs args) { return new WrrSubchannel(super.createSubchannel(args), WeightedChildLbState.this); } + + @Override + public void updateBalancingState(ConnectivityState newState, SubchannelPicker newPicker) { + super.updateBalancingState(newState, newPicker); + if (!resolvingAddresses && newState == ConnectivityState.IDLE) { + getLb().requestConnection(); + } + } } final class OrcaReportListener implements OrcaPerRequestReportListener, OrcaOobReportListener { diff --git a/xds/src/test/java/io/grpc/xds/RingHashLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/RingHashLoadBalancerTest.java index de871cdd8f1..047ba71bbe0 100644 --- a/xds/src/test/java/io/grpc/xds/RingHashLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/RingHashLoadBalancerTest.java @@ -66,7 +66,6 @@ import io.grpc.testing.TestMethodDescriptors; import io.grpc.util.AbstractTestHelper; import io.grpc.util.MultiChildLoadBalancer.ChildLbState; -import io.grpc.xds.RingHashLoadBalancer.RingHashChildLbState; import io.grpc.xds.RingHashLoadBalancer.RingHashConfig; import java.lang.Thread.UncaughtExceptionHandler; import java.net.SocketAddress; @@ -177,8 +176,7 @@ public void subchannelNotAutoReconnectAfterReenteringIdle() { assertThat(addressesAcceptanceStatus.isOk()).isTrue(); verify(helper).updateBalancingState(eq(IDLE), pickerCaptor.capture()); - RingHashChildLbState childLbState = - (RingHashChildLbState) loadBalancer.getChildLbStates().iterator().next(); + ChildLbState childLbState = loadBalancer.getChildLbStates().iterator().next(); assertThat(subchannels.get(Collections.singletonList(childLbState.getEag()))).isNull(); // Picking subchannel triggers connection. @@ -422,7 +420,7 @@ public void skipFailingHosts_pickNextNonFailingHost() { assertThat(addressesAcceptanceStatus.isOk()).isTrue(); // Create subchannel for the first address - ((RingHashChildLbState) loadBalancer.getChildLbStateEag(servers.get(0))).getCurrentPicker() + loadBalancer.getChildLbStateEag(servers.get(0)).getCurrentPicker() .pickSubchannel(getDefaultPickSubchannelArgs(hashFunc.hashVoid())); verifyConnection(1); From b5989a54014b9066defb26d4cca79432a5dba1be Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Thu, 25 Jul 2024 15:46:02 -0700 Subject: [PATCH 45/53] util: MultiChildLb children should always start with a NoResult picker That's the obvious default, and all current usages use (something equivalent to) that default. --- .../io/grpc/util/MultiChildLoadBalancer.java | 22 +++++-------------- .../io/grpc/util/RoundRobinLoadBalancer.java | 4 ++-- .../grpc/xds/ClusterManagerLoadBalancer.java | 6 ++--- .../io/grpc/xds/LeastRequestLoadBalancer.java | 8 +++---- .../io/grpc/xds/RingHashLoadBalancer.java | 4 ++-- .../xds/WeightedRoundRobinLoadBalancer.java | 11 +++++----- 6 files changed, 21 insertions(+), 34 deletions(-) diff --git a/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java b/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java index 951721adced..51144b7c017 100644 --- a/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java +++ b/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java @@ -91,8 +91,7 @@ protected Map createChildLbMap(ResolvedAddresses resolvedA if (existingChildLbState != null) { childLbMap.put(endpoint, existingChildLbState); } else { - childLbMap.put(endpoint, - createChildLbState(endpoint, null, getInitialPicker(), resolvedAddresses)); + childLbMap.put(endpoint, createChildLbState(endpoint, null, resolvedAddresses)); } } return childLbMap; @@ -102,8 +101,8 @@ protected Map createChildLbMap(ResolvedAddresses resolvedA * Override to create an instance of a subclass. */ protected ChildLbState createChildLbState(Object key, Object policyConfig, - SubchannelPicker initialPicker, ResolvedAddresses resolvedAddresses) { - return new ChildLbState(key, pickFirstLbProvider, policyConfig, initialPicker); + ResolvedAddresses resolvedAddresses) { + return new ChildLbState(key, pickFirstLbProvider, policyConfig); } /** @@ -187,15 +186,6 @@ protected void handleNameResolutionError(ChildLbState child, Status error) { child.lb.handleNameResolutionError(error); } - /** - * Creates a picker representing the state before any connections have been established. - * - *

Override to produce a custom picker. - */ - protected SubchannelPicker getInitialPicker() { - return new FixedResultPicker(PickResult.withNoResult()); - } - /** * Creates a new picker representing an error status. * @@ -365,12 +355,10 @@ public class ChildLbState { private final LoadBalancer lb; private ConnectivityState currentState; - private SubchannelPicker currentPicker; + private SubchannelPicker currentPicker = new FixedResultPicker(PickResult.withNoResult()); - public ChildLbState(Object key, LoadBalancer.Factory policyFactory, Object childConfig, - SubchannelPicker initialPicker) { + public ChildLbState(Object key, LoadBalancer.Factory policyFactory, Object childConfig) { this.key = key; - this.currentPicker = initialPicker; this.config = childConfig; this.lb = policyFactory.newLoadBalancer(createChildHelper()); this.currentState = CONNECTING; diff --git a/util/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java b/util/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java index 765e2a4d4b6..a8d829cfb8d 100644 --- a/util/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java +++ b/util/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java @@ -97,8 +97,8 @@ private SubchannelPicker createReadyPicker(Collection children) { @Override protected ChildLbState createChildLbState(Object key, Object policyConfig, - SubchannelPicker initialPicker, ResolvedAddresses resolvedAddresses) { - return new ChildLbState(key, pickFirstLbProvider, policyConfig, initialPicker) { + ResolvedAddresses resolvedAddresses) { + return new ChildLbState(key, pickFirstLbProvider, policyConfig) { @Override protected ChildLbStateHelper createChildHelper() { return new ChildLbStateHelper() { diff --git a/xds/src/main/java/io/grpc/xds/ClusterManagerLoadBalancer.java b/xds/src/main/java/io/grpc/xds/ClusterManagerLoadBalancer.java index 9e9ca5e1da3..50669cfeeb3 100644 --- a/xds/src/main/java/io/grpc/xds/ClusterManagerLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/ClusterManagerLoadBalancer.java @@ -85,7 +85,7 @@ protected Map createChildLbMap(ResolvedAddresses resolvedA ChildLbState child = getChildLbState(entry.getKey()); if (child == null) { child = new ClusterManagerLbState(entry.getKey(), - entry.getValue().getProvider(), entry.getValue().getConfig(), getInitialPicker()); + entry.getValue().getProvider(), entry.getValue().getConfig()); } newChildPolicies.put(entry.getKey(), child); } @@ -202,8 +202,8 @@ private class ClusterManagerLbState extends ChildLbState { ScheduledHandle deletionTimer; public ClusterManagerLbState(Object key, LoadBalancerProvider policyProvider, - Object childConfig, SubchannelPicker initialPicker) { - super(key, policyProvider, childConfig, initialPicker); + Object childConfig) { + super(key, policyProvider, childConfig); } @Override diff --git a/xds/src/main/java/io/grpc/xds/LeastRequestLoadBalancer.java b/xds/src/main/java/io/grpc/xds/LeastRequestLoadBalancer.java index a11622d492f..52fa1298e60 100644 --- a/xds/src/main/java/io/grpc/xds/LeastRequestLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/LeastRequestLoadBalancer.java @@ -127,8 +127,8 @@ protected void updateOverallBalancingState() { @Override protected ChildLbState createChildLbState(Object key, Object policyConfig, - SubchannelPicker initialPicker, ResolvedAddresses unused) { - return new LeastRequestLbState(key, pickFirstLbProvider, policyConfig, initialPicker); + ResolvedAddresses unused) { + return new LeastRequestLbState(key, pickFirstLbProvider, policyConfig); } private void updateBalancingState(ConnectivityState state, SubchannelPicker picker) { @@ -321,8 +321,8 @@ protected class LeastRequestLbState extends ChildLbState { private final AtomicInteger activeRequests = new AtomicInteger(0); public LeastRequestLbState(Object key, LoadBalancerProvider policyProvider, - Object childConfig, SubchannelPicker initialPicker) { - super(key, policyProvider, childConfig, initialPicker); + Object childConfig) { + super(key, policyProvider, childConfig); } int getActiveRequests() { diff --git a/xds/src/main/java/io/grpc/xds/RingHashLoadBalancer.java b/xds/src/main/java/io/grpc/xds/RingHashLoadBalancer.java index 72618b7bbaa..4e380dcd3d3 100644 --- a/xds/src/main/java/io/grpc/xds/RingHashLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/RingHashLoadBalancer.java @@ -228,8 +228,8 @@ protected void updateOverallBalancingState() { @Override protected ChildLbState createChildLbState(Object key, Object policyConfig, - SubchannelPicker initialPicker, ResolvedAddresses resolvedAddresses) { - return new ChildLbState(key, lazyLbFactory, null, EMPTY_PICKER); + ResolvedAddresses resolvedAddresses) { + return new ChildLbState(key, lazyLbFactory, null); } private Status validateAddrList(List addrList) { diff --git a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java index 388eca2579d..65cd146fdd3 100644 --- a/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java @@ -149,9 +149,8 @@ public WeightedRoundRobinLoadBalancer(Helper helper, Ticker ticker) { @Override protected ChildLbState createChildLbState(Object key, Object policyConfig, - SubchannelPicker initialPicker, ResolvedAddresses unused) { - ChildLbState childLbState = new WeightedChildLbState(key, pickFirstLbProvider, policyConfig, - initialPicker); + ResolvedAddresses unused) { + ChildLbState childLbState = new WeightedChildLbState(key, pickFirstLbProvider, policyConfig); return childLbState; } @@ -290,9 +289,9 @@ final class WeightedChildLbState extends ChildLbState { private OrcaReportListener orcaReportListener; - public WeightedChildLbState(Object key, LoadBalancerProvider policyProvider, Object childConfig, - SubchannelPicker initialPicker) { - super(key, policyProvider, childConfig, initialPicker); + public WeightedChildLbState( + Object key, LoadBalancerProvider policyProvider, Object childConfig) { + super(key, policyProvider, childConfig); } @Override From fd8734f341a12999e7dd5c14aed744768775c855 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Fri, 26 Jul 2024 08:53:46 -0700 Subject: [PATCH 46/53] xds: Delegate more RingHashLB address updates to MultiChildLB Since 04474970 RingHashLB has not used acceptResolvedAddressesInternal(). At the time that was needed because deactivated children were part of MultiChildLB. But in 9de8e443, the logic of RingHashLB and MultiChildLB.acceptResolvedAddressesInternal() converged, so it can now swap back to using the base class for more logic. --- .../io/grpc/util/MultiChildLoadBalancer.java | 6 +++--- .../java/io/grpc/xds/RingHashLoadBalancer.java | 16 ++++------------ 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java b/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java index 51144b7c017..02ed6a00cca 100644 --- a/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java +++ b/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java @@ -231,7 +231,7 @@ protected final AcceptResolvedAddrRetVal acceptResolvedAddressesInternal( return new AcceptResolvedAddrRetVal(Status.OK, getRemovedChildren(newChildren.keySet())); } - protected final void addMissingChildren(Map newChildren) { + private void addMissingChildren(Map newChildren) { // Do adds and identify reused children for (Map.Entry entry : newChildren.entrySet()) { final Object key = entry.getKey(); @@ -241,7 +241,7 @@ protected final void addMissingChildren(Map newChildren) { } } - protected final void updateChildrenWithResolvedAddresses(ResolvedAddresses resolvedAddresses, + private void updateChildrenWithResolvedAddresses(ResolvedAddresses resolvedAddresses, Map newChildren) { for (Map.Entry entry : newChildren.entrySet()) { Object childConfig = entry.getValue().getConfig(); @@ -256,7 +256,7 @@ protected final void updateChildrenWithResolvedAddresses(ResolvedAddresses resol /** * Identifies which children have been removed (are not part of the newChildKeys). */ - protected final List getRemovedChildren(Set newChildKeys) { + private List getRemovedChildren(Set newChildKeys) { List removedChildren = new ArrayList<>(); // Do removals for (Object key : ImmutableList.copyOf(childLbStates.keySet())) { diff --git a/xds/src/main/java/io/grpc/xds/RingHashLoadBalancer.java b/xds/src/main/java/io/grpc/xds/RingHashLoadBalancer.java index 4e380dcd3d3..e2f9a00f25b 100644 --- a/xds/src/main/java/io/grpc/xds/RingHashLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/RingHashLoadBalancer.java @@ -89,19 +89,11 @@ public Status acceptResolvedAddresses(ResolvedAddresses resolvedAddresses) { try { resolvingAddresses = true; - // Subclass handles any special manipulation to create appropriate types of ChildLbStates - Map newChildren = createChildLbMap(resolvedAddresses); - - if (newChildren.isEmpty()) { - addressValidityStatus = Status.UNAVAILABLE.withDescription( - "Ring hash lb error: EDS resolution was successful, but there were no valid addresses"); - handleNameResolutionError(addressValidityStatus); - return addressValidityStatus; + AcceptResolvedAddrRetVal acceptRetVal = acceptResolvedAddressesInternal(resolvedAddresses); + if (!acceptRetVal.status.isOk()) { + return acceptRetVal.status; } - addMissingChildren(newChildren); - updateChildrenWithResolvedAddresses(resolvedAddresses, newChildren); - // Now do the ringhash specific logic with weights and building the ring RingHashConfig config = (RingHashConfig) resolvedAddresses.getLoadBalancingPolicyConfig(); if (config == null) { @@ -145,7 +137,7 @@ public Status acceptResolvedAddresses(ResolvedAddresses resolvedAddresses) { // clusters and resolver can remove them in service config. updateOverallBalancingState(); - shutdownRemoved(getRemovedChildren(newChildren.keySet())); + shutdownRemoved(acceptRetVal.removedChildren); } finally { this.resolvingAddresses = false; } From c2eccca3bc1037be121b299ec3a3a4c47f6ee506 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Tue, 13 Aug 2024 10:27:48 -0700 Subject: [PATCH 47/53] cronet: Add internal API to specify Network cl/661194496 --- .../io/grpc/cronet/CronetChannelBuilder.java | 28 +++++++++++++++++-- .../cronet/InternalCronetChannelBuilder.java | 7 +++++ 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/cronet/src/main/java/io/grpc/cronet/CronetChannelBuilder.java b/cronet/src/main/java/io/grpc/cronet/CronetChannelBuilder.java index 1c60f82846d..f42dabdd55a 100644 --- a/cronet/src/main/java/io/grpc/cronet/CronetChannelBuilder.java +++ b/cronet/src/main/java/io/grpc/cronet/CronetChannelBuilder.java @@ -20,9 +20,12 @@ import static com.google.common.base.Preconditions.checkNotNull; import static io.grpc.internal.GrpcUtil.DEFAULT_MAX_MESSAGE_SIZE; +import android.net.Network; +import android.os.Build; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.MoreExecutors; +import com.google.errorprone.annotations.CanIgnoreReturnValue; import com.google.errorprone.annotations.DoNotCall; import io.grpc.ChannelCredentials; import io.grpc.ChannelLogger; @@ -105,6 +108,7 @@ public static CronetChannelBuilder forAddress(String name, int port) { private int trafficStatsTag; private boolean trafficStatsUidSet; private int trafficStatsUid; + private Network network; private CronetChannelBuilder(String host, int port, CronetEngine cronetEngine) { final class CronetChannelTransportFactoryBuilder implements ClientTransportFactoryBuilder { @@ -190,6 +194,13 @@ CronetChannelBuilder setTrafficStatsUid(int uid) { return this; } + /** Sets the network ID to use for this channel traffic. */ + @CanIgnoreReturnValue + CronetChannelBuilder bindToNetwork(@Nullable Network network) { + this.network = network; + return this; + } + /** * Provides a custom scheduled executor service. * @@ -210,7 +221,12 @@ public CronetChannelBuilder scheduledExecutorService( ClientTransportFactory buildTransportFactory() { return new CronetTransportFactory( new TaggingStreamFactory( - cronetEngine, trafficStatsTagSet, trafficStatsTag, trafficStatsUidSet, trafficStatsUid), + cronetEngine, + trafficStatsTagSet, + trafficStatsTag, + trafficStatsUidSet, + trafficStatsUid, + network), MoreExecutors.directExecutor(), scheduledExecutorService, maxMessageSize, @@ -294,18 +310,21 @@ private static class TaggingStreamFactory extends StreamBuilderFactory { private final int trafficStatsTag; private final boolean trafficStatsUidSet; private final int trafficStatsUid; + private final Network network; TaggingStreamFactory( CronetEngine cronetEngine, boolean trafficStatsTagSet, int trafficStatsTag, boolean trafficStatsUidSet, - int trafficStatsUid) { + int trafficStatsUid, + Network network) { this.cronetEngine = cronetEngine; this.trafficStatsTagSet = trafficStatsTagSet; this.trafficStatsTag = trafficStatsTag; this.trafficStatsUidSet = trafficStatsUidSet; this.trafficStatsUid = trafficStatsUid; + this.network = network; } @Override @@ -320,6 +339,11 @@ public BidirectionalStream.Builder newBidirectionalStreamBuilder( if (trafficStatsUidSet) { builder.setTrafficStatsUid(trafficStatsUid); } + if (network != null) { + if (Build.VERSION.SDK_INT >= 23) { + builder.bindToNetwork(network.getNetworkHandle()); + } + } return builder; } } diff --git a/cronet/src/main/java/io/grpc/cronet/InternalCronetChannelBuilder.java b/cronet/src/main/java/io/grpc/cronet/InternalCronetChannelBuilder.java index 2954f1eee81..7e5e610ca67 100644 --- a/cronet/src/main/java/io/grpc/cronet/InternalCronetChannelBuilder.java +++ b/cronet/src/main/java/io/grpc/cronet/InternalCronetChannelBuilder.java @@ -16,7 +16,9 @@ package io.grpc.cronet; +import android.net.Network; import io.grpc.Internal; +import org.checkerframework.checker.nullness.qual.Nullable; /** * Internal {@link CronetChannelBuilder} accessor. This is intended for usage internal to the gRPC @@ -58,4 +60,9 @@ public static void setTrafficStatsTag(CronetChannelBuilder builder, int tag) { public static void setTrafficStatsUid(CronetChannelBuilder builder, int uid) { builder.setTrafficStatsUid(uid); } + + /** Sets the network {@link android.net.Network} to use when relying traffic by this channel. */ + public static void bindToNetwork(CronetChannelBuilder builder, @Nullable Network network) { + builder.bindToNetwork(network); + } } From d5840448d4145e7b9d649a484628682781fd3318 Mon Sep 17 00:00:00 2001 From: Kannan J Date: Tue, 13 Aug 2024 23:52:35 +0530 Subject: [PATCH 48/53] Update README etc to reference 1.66.0 (#11472) --- README.md | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index fef37c1c3bb..cb38ad66394 100644 --- a/README.md +++ b/README.md @@ -44,8 +44,8 @@ For a guided tour, take a look at the [quick start guide](https://grpc.io/docs/languages/java/quickstart) or the more explanatory [gRPC basics](https://grpc.io/docs/languages/java/basics). -The [examples](https://github.com/grpc/grpc-java/tree/v1.65.0/examples) and the -[Android example](https://github.com/grpc/grpc-java/tree/v1.65.0/examples/android) +The [examples](https://github.com/grpc/grpc-java/tree/v1.66.0/examples) and the +[Android example](https://github.com/grpc/grpc-java/tree/v1.66.0/examples/android) are standalone projects that showcase the usage of gRPC. Download @@ -56,18 +56,18 @@ Download [the JARs][]. Or for Maven with non-Android, add to your `pom.xml`: io.grpc grpc-netty-shaded - 1.65.0 + 1.66.0 runtime io.grpc grpc-protobuf - 1.65.0 + 1.66.0 io.grpc grpc-stub - 1.65.0 + 1.66.0 org.apache.tomcat @@ -79,18 +79,18 @@ Download [the JARs][]. Or for Maven with non-Android, add to your `pom.xml`: Or for Gradle with non-Android, add to your dependencies: ```gradle -runtimeOnly 'io.grpc:grpc-netty-shaded:1.65.0' -implementation 'io.grpc:grpc-protobuf:1.65.0' -implementation 'io.grpc:grpc-stub:1.65.0' +runtimeOnly 'io.grpc:grpc-netty-shaded:1.66.0' +implementation 'io.grpc:grpc-protobuf:1.66.0' +implementation 'io.grpc:grpc-stub:1.66.0' compileOnly 'org.apache.tomcat:annotations-api:6.0.53' // necessary for Java 9+ ``` For Android client, use `grpc-okhttp` instead of `grpc-netty-shaded` and `grpc-protobuf-lite` instead of `grpc-protobuf`: ```gradle -implementation 'io.grpc:grpc-okhttp:1.65.0' -implementation 'io.grpc:grpc-protobuf-lite:1.65.0' -implementation 'io.grpc:grpc-stub:1.65.0' +implementation 'io.grpc:grpc-okhttp:1.66.0' +implementation 'io.grpc:grpc-protobuf-lite:1.66.0' +implementation 'io.grpc:grpc-stub:1.66.0' compileOnly 'org.apache.tomcat:annotations-api:6.0.53' // necessary for Java 9+ ``` @@ -99,7 +99,7 @@ For [Bazel](https://bazel.build), you can either (with the GAVs from above), or use `@io_grpc_grpc_java//api` et al (see below). [the JARs]: -https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.65.0 +https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.66.0 Development snapshots are available in [Sonatypes's snapshot repository](https://oss.sonatype.org/content/repositories/snapshots/). @@ -129,9 +129,9 @@ For protobuf-based codegen integrated with the Maven build system, you can use protobuf-maven-plugin 0.6.1 - com.google.protobuf:protoc:3.25.1:exe:${os.detected.classifier} + com.google.protobuf:protoc:3.25.3:exe:${os.detected.classifier} grpc-java - io.grpc:protoc-gen-grpc-java:1.65.0:exe:${os.detected.classifier} + io.grpc:protoc-gen-grpc-java:1.66.0:exe:${os.detected.classifier} @@ -157,11 +157,11 @@ plugins { protobuf { protoc { - artifact = "com.google.protobuf:protoc:3.25.1" + artifact = "com.google.protobuf:protoc:3.25.3" } plugins { grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.65.0' + artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0' } } generateProtoTasks { @@ -190,11 +190,11 @@ plugins { protobuf { protoc { - artifact = "com.google.protobuf:protoc:3.25.1" + artifact = "com.google.protobuf:protoc:3.25.3" } plugins { grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.65.0' + artifact = 'io.grpc:protoc-gen-grpc-java:1.66.0' } } generateProtoTasks { From 75012a5be2e8dbcbe171ac119434239a30bee385 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Tue, 13 Aug 2024 16:43:44 -0700 Subject: [PATCH 49/53] examples: Upgrade Maven plugin versions Upgrade Maven to 3.8.8, the oldest supported version, as the plugins required a newer version. --- buildscripts/grpc-java-artifacts/Dockerfile | 4 ++-- examples/example-debug/pom.xml | 2 +- examples/example-dualstack/pom.xml | 2 +- examples/example-gauth/pom.xml | 2 +- examples/example-hostname/pom.xml | 2 +- examples/example-jwt-auth/pom.xml | 4 ++-- examples/example-oauth/pom.xml | 4 ++-- examples/example-tls/pom.xml | 2 +- examples/pom.xml | 2 +- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/buildscripts/grpc-java-artifacts/Dockerfile b/buildscripts/grpc-java-artifacts/Dockerfile index 97c152780a3..736babe9d8e 100644 --- a/buildscripts/grpc-java-artifacts/Dockerfile +++ b/buildscripts/grpc-java-artifacts/Dockerfile @@ -28,6 +28,6 @@ RUN mkdir -p "$ANDROID_HOME/cmdline-tools" && \ yes | "$ANDROID_HOME/cmdline-tools/latest/bin/sdkmanager" --licenses # Install Maven -RUN curl -Ls https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.3.9/apache-maven-3.3.9-bin.tar.gz | \ +RUN curl -Ls https://dlcdn.apache.org/maven/maven-3/3.8.8/binaries/apache-maven-3.8.8-bin.tar.gz | \ tar xz -C /var/local -ENV PATH /var/local/apache-maven-3.3.9/bin:$PATH +ENV PATH /var/local/apache-maven-3.8.8/bin:$PATH diff --git a/examples/example-debug/pom.xml b/examples/example-debug/pom.xml index 10ccf834d86..7365b35daab 100644 --- a/examples/example-debug/pom.xml +++ b/examples/example-debug/pom.xml @@ -98,7 +98,7 @@ org.apache.maven.plugins maven-enforcer-plugin - 1.4.1 + 3.5.0 enforce diff --git a/examples/example-dualstack/pom.xml b/examples/example-dualstack/pom.xml index 710b48ee617..f955cada0f6 100644 --- a/examples/example-dualstack/pom.xml +++ b/examples/example-dualstack/pom.xml @@ -102,7 +102,7 @@ org.apache.maven.plugins maven-enforcer-plugin - 1.4.1 + 3.5.0 enforce diff --git a/examples/example-gauth/pom.xml b/examples/example-gauth/pom.xml index 1e58e21e975..2512cb37ea5 100644 --- a/examples/example-gauth/pom.xml +++ b/examples/example-gauth/pom.xml @@ -96,7 +96,7 @@ org.apache.maven.plugins maven-enforcer-plugin - 1.4.1 + 3.5.0 enforce diff --git a/examples/example-hostname/pom.xml b/examples/example-hostname/pom.xml index 19b5f8b3c20..443ecb85ef2 100644 --- a/examples/example-hostname/pom.xml +++ b/examples/example-hostname/pom.xml @@ -98,7 +98,7 @@ org.apache.maven.plugins maven-enforcer-plugin - 1.4.1 + 3.5.0 enforce diff --git a/examples/example-jwt-auth/pom.xml b/examples/example-jwt-auth/pom.xml index ad530e33aa7..f2e92e28c84 100644 --- a/examples/example-jwt-auth/pom.xml +++ b/examples/example-jwt-auth/pom.xml @@ -94,7 +94,7 @@ org.xolstice.maven.plugins protobuf-maven-plugin - 0.5.1 + 0.6.1 com.google.protobuf:protoc:${protoc.version}:exe:${os.detected.classifier} @@ -116,7 +116,7 @@ org.apache.maven.plugins maven-enforcer-plugin - 1.4.1 + 3.5.0 enforce diff --git a/examples/example-oauth/pom.xml b/examples/example-oauth/pom.xml index 2c38a05b3e4..4e284b5f248 100644 --- a/examples/example-oauth/pom.xml +++ b/examples/example-oauth/pom.xml @@ -99,7 +99,7 @@ org.xolstice.maven.plugins protobuf-maven-plugin - 0.5.1 + 0.6.1 com.google.protobuf:protoc:${protoc.version}:exe:${os.detected.classifier} @@ -121,7 +121,7 @@ org.apache.maven.plugins maven-enforcer-plugin - 1.4.1 + 3.5.0 enforce diff --git a/examples/example-tls/pom.xml b/examples/example-tls/pom.xml index bc9c0a7a8ee..eab02cb5919 100644 --- a/examples/example-tls/pom.xml +++ b/examples/example-tls/pom.xml @@ -82,7 +82,7 @@ org.apache.maven.plugins maven-enforcer-plugin - 1.4.1 + 3.5.0 enforce diff --git a/examples/pom.xml b/examples/pom.xml index 2b25d13b50c..fdf92e9eca1 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -110,7 +110,7 @@ org.apache.maven.plugins maven-enforcer-plugin - 1.4.1 + 3.5.0 enforce From 909c4bc382c653399eddff25bb5cc9357a166713 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Sat, 27 Jul 2024 12:50:03 -0700 Subject: [PATCH 50/53] util: Remove minor convenience functions from MultiChildLB These were once needed to be overridden (e.g., by RoundRobinLB), but now nothing overrides them and MultiChildLB doesn't even call one of them. --- .../io/grpc/util/MultiChildLoadBalancer.java | 21 ++----------------- .../grpc/xds/ClusterManagerLoadBalancer.java | 5 +++-- 2 files changed, 5 insertions(+), 21 deletions(-) diff --git a/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java b/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java index 02ed6a00cca..748f58924e1 100644 --- a/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java +++ b/util/src/main/java/io/grpc/util/MultiChildLoadBalancer.java @@ -173,28 +173,11 @@ protected ResolvedAddresses getChildAddresses(Object key, ResolvedAddresses reso @Override public void handleNameResolutionError(Status error) { if (currentConnectivityState != READY) { - helper.updateBalancingState(TRANSIENT_FAILURE, getErrorPicker(error)); + helper.updateBalancingState( + TRANSIENT_FAILURE, new FixedResultPicker(PickResult.withError(error))); } } - /** - * Handle the name resolution error only for the specified child. - * - *

Override if you need special handling. - */ - protected void handleNameResolutionError(ChildLbState child, Status error) { - child.lb.handleNameResolutionError(error); - } - - /** - * Creates a new picker representing an error status. - * - *

Override to produce a custom picker when there are errors. - */ - protected SubchannelPicker getErrorPicker(Status error) { - return new FixedResultPicker(PickResult.withError(error)); - } - @Override public void shutdown() { logger.log(Level.FINE, "Shutdown"); diff --git a/xds/src/main/java/io/grpc/xds/ClusterManagerLoadBalancer.java b/xds/src/main/java/io/grpc/xds/ClusterManagerLoadBalancer.java index 50669cfeeb3..6b6d2b81352 100644 --- a/xds/src/main/java/io/grpc/xds/ClusterManagerLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/ClusterManagerLoadBalancer.java @@ -183,11 +183,12 @@ public void handleNameResolutionError(Status error) { for (ChildLbState state : getChildLbStates()) { if (((ClusterManagerLbState) state).deletionTimer == null) { gotoTransientFailure = false; - handleNameResolutionError(state, error); + state.getLb().handleNameResolutionError(error); } } if (gotoTransientFailure) { - getHelper().updateBalancingState(TRANSIENT_FAILURE, getErrorPicker(error)); + getHelper().updateBalancingState( + TRANSIENT_FAILURE, new FixedResultPicker(PickResult.withError(error))); } } From ff8e4137603b88bf2dcc43b502554c4fae437dac Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Tue, 13 Aug 2024 21:33:55 -0700 Subject: [PATCH 51/53] Remove direct dependency on j2objc Bazel had the dependency added because of #5046, where Guava was depending on it as compile-only and Bazel build have "unknown enum constant" warnings. Guava now has a compile dependency on j2objc, so this workaround is no longer needed. There are currently no version skew issues in Gradle, which was the only usage. --- alts/BUILD.bazel | 2 -- api/BUILD.bazel | 1 - auth/BUILD.bazel | 1 - core/BUILD.bazel | 1 - examples/pom.xml | 5 +++++ gcp-observability/build.gradle | 3 +-- gradle/libs.versions.toml | 1 - grpclb/BUILD.bazel | 1 - grpclb/build.gradle | 4 ++-- inprocess/BUILD.bazel | 1 - netty/BUILD.bazel | 1 - okhttp/BUILD.bazel | 1 - protobuf-lite/BUILD.bazel | 1 - protobuf/BUILD.bazel | 1 - services/build.gradle | 5 ++--- stub/BUILD.bazel | 1 - testing/BUILD.bazel | 1 - util/BUILD.bazel | 1 - xds/build.gradle | 1 + 19 files changed, 11 insertions(+), 22 deletions(-) diff --git a/alts/BUILD.bazel b/alts/BUILD.bazel index 819daedcc82..73420e11053 100644 --- a/alts/BUILD.bazel +++ b/alts/BUILD.bazel @@ -19,7 +19,6 @@ java_library( "@com_google_protobuf//:protobuf_java_util", artifact("com.google.code.findbugs:jsr305"), artifact("com.google.guava:guava"), - artifact("com.google.j2objc:j2objc-annotations"), artifact("io.netty:netty-buffer"), artifact("io.netty:netty-codec"), artifact("io.netty:netty-common"), @@ -45,7 +44,6 @@ java_library( artifact("com.google.auth:google-auth-library-oauth2-http"), artifact("com.google.code.findbugs:jsr305"), artifact("com.google.guava:guava"), - artifact("com.google.j2objc:j2objc-annotations"), artifact("io.netty:netty-common"), artifact("io.netty:netty-handler"), artifact("io.netty:netty-transport"), diff --git a/api/BUILD.bazel b/api/BUILD.bazel index 07be1d58dc7..6bf3375e9f0 100644 --- a/api/BUILD.bazel +++ b/api/BUILD.bazel @@ -13,6 +13,5 @@ java_library( artifact("com.google.errorprone:error_prone_annotations"), artifact("com.google.guava:failureaccess"), # future transitive dep of Guava. See #5214 artifact("com.google.guava:guava"), - artifact("com.google.j2objc:j2objc-annotations"), ], ) diff --git a/auth/BUILD.bazel b/auth/BUILD.bazel index 095fae5af8b..a19562fa7f7 100644 --- a/auth/BUILD.bazel +++ b/auth/BUILD.bazel @@ -11,6 +11,5 @@ java_library( artifact("com.google.auth:google-auth-library-credentials"), artifact("com.google.code.findbugs:jsr305"), artifact("com.google.guava:guava"), - artifact("com.google.j2objc:j2objc-annotations"), ], ) diff --git a/core/BUILD.bazel b/core/BUILD.bazel index a1d3d19e828..35c20628d0b 100644 --- a/core/BUILD.bazel +++ b/core/BUILD.bazel @@ -30,7 +30,6 @@ java_library( artifact("com.google.code.findbugs:jsr305"), artifact("com.google.errorprone:error_prone_annotations"), artifact("com.google.guava:guava"), - artifact("com.google.j2objc:j2objc-annotations"), artifact("io.perfmark:perfmark-api"), artifact("org.codehaus.mojo:animal-sniffer-annotations"), ], diff --git a/examples/pom.xml b/examples/pom.xml index fdf92e9eca1..5cd721b50d1 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -55,6 +55,11 @@ protobuf-java-util ${protobuf.version} + + com.google.j2objc + j2objc-annotations + 3.0.0 + org.apache.tomcat annotations-api diff --git a/gcp-observability/build.gradle b/gcp-observability/build.gradle index 0de7f8363bc..f869bd61a76 100644 --- a/gcp-observability/build.gradle +++ b/gcp-observability/build.gradle @@ -65,8 +65,7 @@ dependencies { libraries.auto.value.annotations, // Use our newer version libraries.guava.jre, // Use our newer version libraries.protobuf.java.util, // Use our newer version - libraries.re2j, // Use our newer version - libraries.j2objc.annotations // Explicit dependency to keep in step with version used by guava + libraries.re2j // Use our newer version testImplementation testFixtures(project(':grpc-api')), project(':grpc-testing'), diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 299ca60ab4b..488ead9ad86 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -42,7 +42,6 @@ guava-testlib = "com.google.guava:guava-testlib:33.2.1-android" # May be different from the -android version. guava-jre = "com.google.guava:guava:33.2.1-jre" hdrhistogram = "org.hdrhistogram:HdrHistogram:2.2.2" -j2objc-annotations = " com.google.j2objc:j2objc-annotations:3.0.0" jakarta-servlet-api = "jakarta.servlet:jakarta.servlet-api:5.0.0" javax-annotation = "org.apache.tomcat:annotations-api:6.0.53" javax-servlet-api = "javax.servlet:javax.servlet-api:4.0.1" diff --git a/grpclb/BUILD.bazel b/grpclb/BUILD.bazel index 517155bbfc1..2dd24bb52a2 100644 --- a/grpclb/BUILD.bazel +++ b/grpclb/BUILD.bazel @@ -21,7 +21,6 @@ java_library( "@io_grpc_grpc_proto//:grpclb_load_balancer_java_proto", artifact("com.google.code.findbugs:jsr305"), artifact("com.google.guava:guava"), - artifact("com.google.j2objc:j2objc-annotations"), ], ) diff --git a/grpclb/build.gradle b/grpclb/build.gradle index cea599828f5..93331053b09 100644 --- a/grpclb/build.gradle +++ b/grpclb/build.gradle @@ -19,9 +19,9 @@ dependencies { implementation project(':grpc-core'), project(':grpc-protobuf'), project(':grpc-stub'), + libraries.guava, libraries.protobuf.java, - libraries.protobuf.java.util, - libraries.guava + libraries.protobuf.java.util runtimeOnly libraries.errorprone.annotations compileOnly libraries.javax.annotation testImplementation libraries.truth, diff --git a/inprocess/BUILD.bazel b/inprocess/BUILD.bazel index aa614df654c..bef38612713 100644 --- a/inprocess/BUILD.bazel +++ b/inprocess/BUILD.bazel @@ -13,6 +13,5 @@ java_library( artifact("com.google.code.findbugs:jsr305"), artifact("com.google.errorprone:error_prone_annotations"), artifact("com.google.guava:guava"), - artifact("com.google.j2objc:j2objc-annotations"), ], ) diff --git a/netty/BUILD.bazel b/netty/BUILD.bazel index daf2e83e59a..9fe52ea5868 100644 --- a/netty/BUILD.bazel +++ b/netty/BUILD.bazel @@ -15,7 +15,6 @@ java_library( artifact("com.google.code.findbugs:jsr305"), artifact("com.google.errorprone:error_prone_annotations"), artifact("com.google.guava:guava"), - artifact("com.google.j2objc:j2objc-annotations"), artifact("io.netty:netty-buffer"), artifact("io.netty:netty-codec"), artifact("io.netty:netty-codec-http"), diff --git a/okhttp/BUILD.bazel b/okhttp/BUILD.bazel index 7cf1775da2c..80068c9bb5b 100644 --- a/okhttp/BUILD.bazel +++ b/okhttp/BUILD.bazel @@ -17,7 +17,6 @@ java_library( artifact("com.google.code.findbugs:jsr305"), artifact("com.google.errorprone:error_prone_annotations"), artifact("com.google.guava:guava"), - artifact("com.google.j2objc:j2objc-annotations"), artifact("com.squareup.okhttp:okhttp"), artifact("com.squareup.okio:okio"), artifact("io.perfmark:perfmark-api"), diff --git a/protobuf-lite/BUILD.bazel b/protobuf-lite/BUILD.bazel index 087723e95fb..dad794e8b58 100644 --- a/protobuf-lite/BUILD.bazel +++ b/protobuf-lite/BUILD.bazel @@ -10,7 +10,6 @@ java_library( "//api", artifact("com.google.code.findbugs:jsr305"), artifact("com.google.guava:guava"), - artifact("com.google.j2objc:j2objc-annotations"), ] + select({ ":android": ["@com_google_protobuf//:protobuf_javalite"], "//conditions:default": ["@com_google_protobuf//:protobuf_java"], diff --git a/protobuf/BUILD.bazel b/protobuf/BUILD.bazel index 47cc8f9d032..724c78ca6ee 100644 --- a/protobuf/BUILD.bazel +++ b/protobuf/BUILD.bazel @@ -13,6 +13,5 @@ java_library( artifact("com.google.api.grpc:proto-google-common-protos"), artifact("com.google.code.findbugs:jsr305"), artifact("com.google.guava:guava"), - artifact("com.google.j2objc:j2objc-annotations"), ], ) diff --git a/services/build.gradle b/services/build.gradle index de716c9fa1d..fade7aef3fb 100644 --- a/services/build.gradle +++ b/services/build.gradle @@ -27,11 +27,10 @@ dependencies { implementation project(':grpc-core'), project(':grpc-protobuf'), project(':grpc-util'), - libraries.protobuf.java.util, - libraries.guava.jre // JRE required by protobuf-java-util + libraries.guava.jre, // JRE required by protobuf-java-util + libraries.protobuf.java.util runtimeOnly libraries.errorprone.annotations, - libraries.j2objc.annotations, // Explicit dependency to keep in step with version used by guava libraries.gson // to fix checkUpperBoundDeps error here compileOnly libraries.javax.annotation testImplementation project(':grpc-testing'), diff --git a/stub/BUILD.bazel b/stub/BUILD.bazel index 8950a1cfd3f..6d06e01f918 100644 --- a/stub/BUILD.bazel +++ b/stub/BUILD.bazel @@ -12,7 +12,6 @@ java_library( artifact("com.google.code.findbugs:jsr305"), artifact("com.google.errorprone:error_prone_annotations"), artifact("com.google.guava:guava"), - artifact("com.google.j2objc:j2objc-annotations"), ], ) diff --git a/testing/BUILD.bazel b/testing/BUILD.bazel index 668a666c2fe..78f9b840754 100644 --- a/testing/BUILD.bazel +++ b/testing/BUILD.bazel @@ -18,7 +18,6 @@ java_library( "//util", artifact("com.google.code.findbugs:jsr305"), artifact("com.google.guava:guava"), - artifact("com.google.j2objc:j2objc-annotations"), artifact("com.google.truth:truth"), artifact("junit:junit"), ], diff --git a/util/BUILD.bazel b/util/BUILD.bazel index 7a38063a983..8fb00e21d56 100644 --- a/util/BUILD.bazel +++ b/util/BUILD.bazel @@ -15,7 +15,6 @@ java_library( artifact("com.google.code.findbugs:jsr305"), artifact("com.google.errorprone:error_prone_annotations"), artifact("com.google.guava:guava"), - artifact("com.google.j2objc:j2objc-annotations"), artifact("org.codehaus.mojo:animal-sniffer-annotations"), ], ) diff --git a/xds/build.gradle b/xds/build.gradle index a1d5aa753cb..a738145a2a0 100644 --- a/xds/build.gradle +++ b/xds/build.gradle @@ -52,6 +52,7 @@ dependencies { project(':grpc-services'), project(':grpc-auth'), project(path: ':grpc-alts', configuration: 'shadow'), + libraries.guava, libraries.gson, libraries.re2j, libraries.auto.value.annotations, From 6a9bc3ba1761ffd27961d8aa4f12a44aa45a3e6a Mon Sep 17 00:00:00 2001 From: sunpe Date: Tue, 13 Aug 2024 22:31:32 +0800 Subject: [PATCH 52/53] example: delete duplicate and unused code in KeepAliveClient.java --- .../main/java/io/grpc/examples/keepalive/KeepAliveClient.java | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/src/main/java/io/grpc/examples/keepalive/KeepAliveClient.java b/examples/src/main/java/io/grpc/examples/keepalive/KeepAliveClient.java index a7c59c3952f..414d92dea4c 100644 --- a/examples/src/main/java/io/grpc/examples/keepalive/KeepAliveClient.java +++ b/examples/src/main/java/io/grpc/examples/keepalive/KeepAliveClient.java @@ -78,7 +78,6 @@ public static void main(String[] args) throws Exception { // frames. // More details see: https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md ManagedChannel channel = Grpc.newChannelBuilder(target, InsecureChannelCredentials.create()) - .keepAliveTime(5, TimeUnit.MINUTES) .keepAliveTime(10, TimeUnit.SECONDS) // Change to a larger value, e.g. 5min. .keepAliveTimeout(1, TimeUnit.SECONDS) // Change to a larger value, e.g. 10s. .keepAliveWithoutCalls(true)// You should normally avoid enabling this. From 6dbd1b9d5a3a0a8d9c64f161e23b7f355a56e588 Mon Sep 17 00:00:00 2001 From: John Cormie Date: Wed, 14 Aug 2024 09:11:49 -0700 Subject: [PATCH 53/53] Add newAttachMetadataServerInterceptor() MetadataUtil (#11458) --- .../main/java/io/grpc/stub/MetadataUtils.java | 64 +++++++ .../java/io/grpc/stub/MetadataUtilsTest.java | 175 ++++++++++++++++++ 2 files changed, 239 insertions(+) create mode 100644 stub/src/test/java/io/grpc/stub/MetadataUtilsTest.java diff --git a/stub/src/main/java/io/grpc/stub/MetadataUtils.java b/stub/src/main/java/io/grpc/stub/MetadataUtils.java index addf54c0f81..4208d3ca652 100644 --- a/stub/src/main/java/io/grpc/stub/MetadataUtils.java +++ b/stub/src/main/java/io/grpc/stub/MetadataUtils.java @@ -22,10 +22,15 @@ import io.grpc.Channel; import io.grpc.ClientCall; import io.grpc.ClientInterceptor; +import io.grpc.ExperimentalApi; import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.ForwardingServerCall.SimpleForwardingServerCall; import io.grpc.Metadata; import io.grpc.MethodDescriptor; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; import io.grpc.Status; import java.util.concurrent.atomic.AtomicReference; @@ -143,4 +148,63 @@ public void onClose(Status status, Metadata trailers) { } } } + + /** + * Returns a ServerInterceptor that adds the specified Metadata to every response stream, one way + * or another. + * + *

If, absent this interceptor, a stream would have headers, 'extras' will be added to those + * headers. Otherwise, 'extras' will be sent as trailers. This pattern is useful when you have + * some fixed information, server identity say, that should be included no matter how the call + * turns out. The fallback to trailers avoids artificially committing clients to error responses + * that could otherwise be retried (see https://grpc.io/docs/guides/retry/ for more). + * + *

For correct operation, be sure to arrange for this interceptor to run *before* any others + * that might add headers. + * + * @param extras the Metadata to be added to each stream. Caller gives up ownership. + */ + @ExperimentalApi("https://github.com/grpc/grpc-java/issues/11462") + public static ServerInterceptor newAttachMetadataServerInterceptor(Metadata extras) { + return new MetadataAttachingServerInterceptor(extras); + } + + private static final class MetadataAttachingServerInterceptor implements ServerInterceptor { + + private final Metadata extras; + + MetadataAttachingServerInterceptor(Metadata extras) { + this.extras = extras; + } + + @Override + public ServerCall.Listener interceptCall( + ServerCall call, Metadata headers, ServerCallHandler next) { + return next.startCall(new MetadataAttachingServerCall<>(call), headers); + } + + final class MetadataAttachingServerCall + extends SimpleForwardingServerCall { + boolean headersSent; + + MetadataAttachingServerCall(ServerCall delegate) { + super(delegate); + } + + @Override + public void sendHeaders(Metadata headers) { + headers.merge(extras); + headersSent = true; + super.sendHeaders(headers); + } + + @Override + public void close(Status status, Metadata trailers) { + if (!headersSent) { + trailers.merge(extras); + } + super.close(status, trailers); + } + } + } } diff --git a/stub/src/test/java/io/grpc/stub/MetadataUtilsTest.java b/stub/src/test/java/io/grpc/stub/MetadataUtilsTest.java new file mode 100644 index 00000000000..f9890ac0433 --- /dev/null +++ b/stub/src/test/java/io/grpc/stub/MetadataUtilsTest.java @@ -0,0 +1,175 @@ +/* + * Copyright 2024 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.stub; + +import static com.google.common.truth.Truth.assertThat; +import static io.grpc.stub.MetadataUtils.newAttachMetadataServerInterceptor; +import static io.grpc.stub.MetadataUtils.newCaptureMetadataInterceptor; +import static org.junit.Assert.fail; + +import com.google.common.collect.ImmutableList; +import io.grpc.CallOptions; +import io.grpc.ManagedChannel; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptors; +import io.grpc.ServerMethodDefinition; +import io.grpc.ServerServiceDefinition; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import io.grpc.StringMarshaller; +import io.grpc.inprocess.InProcessChannelBuilder; +import io.grpc.inprocess.InProcessServerBuilder; +import io.grpc.testing.GrpcCleanupRule; +import java.io.IOException; +import java.util.Iterator; +import java.util.concurrent.atomic.AtomicReference; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class MetadataUtilsTest { + + @Rule public final GrpcCleanupRule grpcCleanup = new GrpcCleanupRule(); + + private static final String SERVER_NAME = "test"; + private static final Metadata.Key FOO_KEY = + Metadata.Key.of("foo-key", Metadata.ASCII_STRING_MARSHALLER); + + private final MethodDescriptor echoMethod = + MethodDescriptor.newBuilder(StringMarshaller.INSTANCE, StringMarshaller.INSTANCE) + .setFullMethodName("test/echo") + .setType(MethodDescriptor.MethodType.UNARY) + .build(); + + private final ServerCallHandler echoCallHandler = + ServerCalls.asyncUnaryCall( + (req, respObserver) -> { + respObserver.onNext(req); + respObserver.onCompleted(); + }); + + MethodDescriptor echoServerStreamingMethod = + MethodDescriptor.newBuilder(StringMarshaller.INSTANCE, StringMarshaller.INSTANCE) + .setFullMethodName("test/echoStream") + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .build(); + + private final AtomicReference trailersCapture = new AtomicReference<>(); + private final AtomicReference headersCapture = new AtomicReference<>(); + + @Test + public void shouldAttachHeadersToResponse() throws IOException { + Metadata extras = new Metadata(); + extras.put(FOO_KEY, "foo-value"); + + ServerServiceDefinition serviceDef = + ServerInterceptors.intercept( + ServerServiceDefinition.builder("test").addMethod(echoMethod, echoCallHandler).build(), + ImmutableList.of(newAttachMetadataServerInterceptor(extras))); + + grpcCleanup.register(newInProcessServerBuilder().addService(serviceDef).build().start()); + ManagedChannel channel = + grpcCleanup.register( + newInProcessChannelBuilder() + .intercept(newCaptureMetadataInterceptor(headersCapture, trailersCapture)) + .build()); + + String response = + ClientCalls.blockingUnaryCall(channel, echoMethod, CallOptions.DEFAULT, "hello"); + assertThat(response).isEqualTo("hello"); + assertThat(trailersCapture.get() == null || !trailersCapture.get().containsKey(FOO_KEY)) + .isTrue(); + assertThat(headersCapture.get().get(FOO_KEY)).isEqualTo("foo-value"); + } + + @Test + public void shouldAttachTrailersWhenNoResponse() throws IOException { + Metadata extras = new Metadata(); + extras.put(FOO_KEY, "foo-value"); + + ServerServiceDefinition serviceDef = + ServerInterceptors.intercept( + ServerServiceDefinition.builder("test") + .addMethod( + ServerMethodDefinition.create( + echoServerStreamingMethod, + ServerCalls.asyncUnaryCall( + (req, respObserver) -> respObserver.onCompleted()))) + .build(), + ImmutableList.of(newAttachMetadataServerInterceptor(extras))); + grpcCleanup.register(newInProcessServerBuilder().addService(serviceDef).build().start()); + + ManagedChannel channel = + grpcCleanup.register( + newInProcessChannelBuilder() + .intercept(newCaptureMetadataInterceptor(headersCapture, trailersCapture)) + .build()); + + Iterator response = + ClientCalls.blockingServerStreamingCall( + channel, echoServerStreamingMethod, CallOptions.DEFAULT, "hello"); + assertThat(response.hasNext()).isFalse(); + assertThat(headersCapture.get() == null || !headersCapture.get().containsKey(FOO_KEY)).isTrue(); + assertThat(trailersCapture.get().get(FOO_KEY)).isEqualTo("foo-value"); + } + + @Test + public void shouldAttachTrailersToErrorResponse() throws IOException { + Metadata extras = new Metadata(); + extras.put(FOO_KEY, "foo-value"); + + ServerServiceDefinition serviceDef = + ServerInterceptors.intercept( + ServerServiceDefinition.builder("test") + .addMethod( + echoMethod, + ServerCalls.asyncUnaryCall( + (req, respObserver) -> + respObserver.onError(Status.INVALID_ARGUMENT.asRuntimeException()))) + .build(), + ImmutableList.of(newAttachMetadataServerInterceptor(extras))); + grpcCleanup.register(newInProcessServerBuilder().addService(serviceDef).build().start()); + + ManagedChannel channel = + grpcCleanup.register( + newInProcessChannelBuilder() + .intercept(newCaptureMetadataInterceptor(headersCapture, trailersCapture)) + .build()); + try { + ClientCalls.blockingUnaryCall(channel, echoMethod, CallOptions.DEFAULT, "hello"); + fail(); + } catch (StatusRuntimeException e) { + assertThat(e.getStatus()).isNotNull(); + assertThat(e.getStatus().getCode()).isEqualTo(Code.INVALID_ARGUMENT); + } + assertThat(headersCapture.get() == null || !headersCapture.get().containsKey(FOO_KEY)).isTrue(); + assertThat(trailersCapture.get().get(FOO_KEY)).isEqualTo("foo-value"); + } + + private static InProcessServerBuilder newInProcessServerBuilder() { + return InProcessServerBuilder.forName(SERVER_NAME).directExecutor(); + } + + private static InProcessChannelBuilder newInProcessChannelBuilder() { + return InProcessChannelBuilder.forName(SERVER_NAME).directExecutor(); + } +}