diff --git a/agent/consul/health_endpoint.go b/agent/consul/health_endpoint.go index abf17adbaeb1..71cfe1f766c2 100644 --- a/agent/consul/health_endpoint.go +++ b/agent/consul/health_endpoint.go @@ -232,6 +232,8 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc // TODO(acl-error-enhancements) Look for ways to percolate this information up to give any feedback to the user. if authz.ServiceRead(args.ServiceName, &authzContext) != acl.Allow { // Just return nil, which will return an empty response (tested) + // We should be careful to set the index to prevent a busy loop from triggering. + reply.Index = 1 return nil } } diff --git a/agent/consul/health_endpoint_test.go b/agent/consul/health_endpoint_test.go index 4b492081c930..1981332d86b4 100644 --- a/agent/consul/health_endpoint_test.go +++ b/agent/consul/health_endpoint_test.go @@ -1127,6 +1127,7 @@ node "foo" { var resp structs.IndexedCheckServiceNodes assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &resp)) assert.Len(t, resp.Nodes, 0) + assert.Greater(t, resp.Index, uint64(0)) // List w/ token. This should work since we're requesting "foo", but should // also only contain the proxies with names that adhere to our ACL. diff --git a/agent/proxycfg/state_test.go b/agent/proxycfg/state_test.go index aa11dc43f542..792367b473be 100644 --- a/agent/proxycfg/state_test.go +++ b/agent/proxycfg/state_test.go @@ -795,7 +795,7 @@ func TestState_WatchesAndUpdates(t *testing.T) { fmt.Sprintf("upstream-target:api-failover-remote.default.default.dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-remote", "", "dc2", true), fmt.Sprintf("upstream-target:api-failover-local.default.default.dc2:%s-failover-local?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-local", "", "dc2", true), fmt.Sprintf("upstream-target:api-failover-direct.default.default.dc2:%s-failover-direct?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-direct", "", "dc2", true), - upstreamPeerWatchIDPrefix + fmt.Sprintf("%s-failover-to-peer?peer=cluster-01", apiUID.String()): genVerifyServiceSpecificPeeredRequest("api-failover-to-peer", "", "", "cluster-01", true), + upstreamPeerWatchIDPrefix + fmt.Sprintf("%s-failover-to-peer?peer=cluster-01", apiUID.String()): genVerifyServiceSpecificPeeredRequest("api-failover-to-peer", "", "dc1", "cluster-01", true), fmt.Sprintf("mesh-gateway:dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc2"), fmt.Sprintf("mesh-gateway:dc1:%s-failover-local?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc1"), }, diff --git a/agent/proxycfg/upstreams.go b/agent/proxycfg/upstreams.go index d7fee4a3fcc1..5e42072fac32 100644 --- a/agent/proxycfg/upstreams.go +++ b/agent/proxycfg/upstreams.go @@ -316,8 +316,19 @@ func (s *handlerUpstreams) resetWatchesFromChain( watchedChainEndpoints = true } - opts := targetWatchOpts{upstreamID: uid} - opts.fromChainTarget(target) + opts := targetWatchOpts{ + upstreamID: uid, + chainID: target.ID, + service: target.Service, + filter: target.Subset.Filter, + datacenter: target.Datacenter, + peer: target.Peer, + entMeta: target.GetEnterpriseMetadata(), + } + // Peering targets do not set the datacenter field, so we should default it here. + if opts.datacenter == "" { + opts.datacenter = s.source.Datacenter + } err := s.watchUpstreamTarget(ctx, snap, opts) if err != nil { @@ -435,15 +446,6 @@ type targetWatchOpts struct { entMeta *acl.EnterpriseMeta } -func (o *targetWatchOpts) fromChainTarget(t *structs.DiscoveryTarget) { - o.chainID = t.ID - o.service = t.Service - o.filter = t.Subset.Filter - o.datacenter = t.Datacenter - o.peer = t.Peer - o.entMeta = t.GetEnterpriseMetadata() -} - func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *ConfigSnapshotUpstreams, opts targetWatchOpts) error { s.logger.Trace("initializing watch of target", "upstream", opts.upstreamID,