From dd9a2e25df498c7f70f2eeebf270745d7520bf1d Mon Sep 17 00:00:00 2001 From: disksing Date: Wed, 24 Nov 2021 22:43:51 +0800 Subject: [PATCH] This is an automated cherry-pick of #4347 Signed-off-by: ti-chi-bot --- server/schedule/checker/replica_strategy.go | 6 ++- server/schedule/region_scatterer.go | 13 ++++++ server/schedulers/shuffle_region.go | 6 ++- server/statistics/store.go | 2 +- server/statistics/store_test.go | 48 +++++++++++++++++++++ 5 files changed, 72 insertions(+), 3 deletions(-) create mode 100644 server/statistics/store_test.go diff --git a/server/schedule/checker/replica_strategy.go b/server/schedule/checker/replica_strategy.go index 1adbf383ad6..0f816df28c6 100644 --- a/server/schedule/checker/replica_strategy.go +++ b/server/schedule/checker/replica_strategy.go @@ -93,8 +93,12 @@ func (s *ReplicaStrategy) SelectStoreToFix(coLocationStores []*core.StoreInfo, o func (s *ReplicaStrategy) SelectStoreToImprove(coLocationStores []*core.StoreInfo, old uint64) uint64 { // trick to avoid creating a slice with `old` removed. s.swapStoreToFirst(coLocationStores, old) + oldStore := s.cluster.GetStore(old) + if oldStore == nil { + return 0 + } filters := []filter.Filter{ - filter.NewLocationImprover(s.checkerName, s.locationLabels, coLocationStores, s.cluster.GetStore(old)), + filter.NewLocationImprover(s.checkerName, s.locationLabels, coLocationStores, oldStore), } if len(s.locationLabels) > 0 && s.isolationLevel != "" { filters = append(filters, filter.NewIsolationFilter(s.checkerName, s.isolationLevel, s.locationLabels, coLocationStores[1:])) diff --git a/server/schedule/region_scatterer.go b/server/schedule/region_scatterer.go index b98416b0061..037f8abdc06 100644 --- a/server/schedule/region_scatterer.go +++ b/server/schedule/region_scatterer.go @@ -278,6 +278,9 @@ func (r *RegionScatterer) scatterRegion(region *core.RegionInfo, group string) * // Group peers by the engine of their stores for _, peer := range region.GetPeers() { store := r.cluster.GetStore(peer.GetStoreId()) + if store == nil { + return nil + } if ordinaryFilter.Target(r.cluster.GetOpts(), store) { ordinaryPeers[peer.GetId()] = peer } else { @@ -406,7 +409,14 @@ func (r *RegionScatterer) selectAvailableLeaderStores(group string, peers map[ui leaderCandidateStores := make([]uint64, 0) for storeID := range peers { store := r.cluster.GetStore(storeID) +<<<<<<< HEAD engine := store.GetLabelValue(filter.EngineKey) +======= + if store == nil { + return 0 + } + engine := store.GetLabelValue(core.EngineKey) +>>>>>>> 63c46a1e8 (*: check if GetStore returns nil (#4347)) if len(engine) < 1 { leaderCandidateStores = append(leaderCandidateStores, storeID) } @@ -430,6 +440,9 @@ func (r *RegionScatterer) Put(peers map[uint64]*metapb.Peer, leaderStoreID uint6 for _, peer := range peers { storeID := peer.GetStoreId() store := r.cluster.GetStore(storeID) + if store == nil { + continue + } if ordinaryFilter.Target(r.cluster.GetOpts(), store) { r.ordinaryEngine.selectedPeer.Put(storeID, group) scatterDistributionCounter.WithLabelValues( diff --git a/server/schedulers/shuffle_region.go b/server/schedulers/shuffle_region.go index 487e3bcaebb..996dc2416a6 100644 --- a/server/schedulers/shuffle_region.go +++ b/server/schedulers/shuffle_region.go @@ -153,7 +153,11 @@ func (s *shuffleRegionScheduler) scheduleRemovePeer(cluster opt.Cluster) (*core. } func (s *shuffleRegionScheduler) scheduleAddPeer(cluster opt.Cluster, region *core.RegionInfo, oldPeer *metapb.Peer) *metapb.Peer { - scoreGuard := filter.NewPlacementSafeguard(s.GetName(), cluster, region, cluster.GetStore(oldPeer.GetStoreId())) + store := cluster.GetStore(oldPeer.GetStoreId()) + if store == nil { + return nil + } + scoreGuard := filter.NewPlacementSafeguard(s.GetName(), cluster, region, store) excludedFilter := filter.NewExcludedFilter(s.GetName(), nil, region.GetStoreIds()) target := filter.NewCandidates(cluster.GetStores()). diff --git a/server/statistics/store.go b/server/statistics/store.go index b1d55605d50..ce5a933f4ef 100644 --- a/server/statistics/store.go +++ b/server/statistics/store.go @@ -94,7 +94,7 @@ func (s *StoresStats) FilterUnhealthyStore(cluster core.StoreSetInformer) { defer s.Unlock() for storeID := range s.rollingStoresStats { store := cluster.GetStore(storeID) - if store.IsTombstone() || store.IsUnhealthy() || store.IsPhysicallyDestroyed() { + if store == nil || store.IsTombstone() || store.IsUnhealthy() || store.IsPhysicallyDestroyed() { delete(s.rollingStoresStats, storeID) } } diff --git a/server/statistics/store_test.go b/server/statistics/store_test.go new file mode 100644 index 00000000000..e3247ea1c46 --- /dev/null +++ b/server/statistics/store_test.go @@ -0,0 +1,48 @@ +// Copyright 2021 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/tikv/pd/server/core" +) + +var _ = Suite(&testStoreSuite{}) + +type testStoreSuite struct{} + +func (s *testStoreSuite) TestFilterUnhealtyStore(c *C) { + stats := NewStoresStats() + cluster := core.NewBasicCluster() + for i := uint64(1); i <= 5; i++ { + cluster.PutStore(core.NewStoreInfo(&metapb.Store{Id: i}, core.SetLastHeartbeatTS(time.Now()))) + stats.Observe(i, &pdpb.StoreStats{}) + } + c.Assert(stats.GetStoresLoads(), HasLen, 5) + + cluster.PutStore(cluster.GetStore(1).Clone(core.SetLastHeartbeatTS(time.Now().Add(-24 * time.Hour)))) + cluster.PutStore(cluster.GetStore(2).Clone(core.TombstoneStore())) + cluster.DeleteStore(cluster.GetStore(3)) + + stats.FilterUnhealthyStore(cluster) + loads := stats.GetStoresLoads() + c.Assert(loads, HasLen, 2) + c.Assert(loads[4], NotNil) + c.Assert(loads[5], NotNil) +}