Skip to content

Commit

Permalink
config: refine config clone (tikv#3116)
Browse files Browse the repository at this point in the history
Signed-off-by: Zheng Xiangsheng <hundundm@gmail.com>
  • Loading branch information
HunDunDM authored Oct 29, 2020
1 parent 6dc2125 commit 90a24a9
Show file tree
Hide file tree
Showing 4 changed files with 53 additions and 69 deletions.
1 change: 0 additions & 1 deletion server/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,6 @@ func (s *testClusterInfoSuite) TestRegionFlowChanged(c *C) {
newRegion = cluster.GetRegion(region.GetID())
c.Assert(region.GetBytesRead(), Equals, uint64(0))
c.Assert(newRegion.GetBytesRead(), Not(Equals), uint64(0))

}

func (s *testClusterInfoSuite) TestConcurrentRegionHeartbeat(c *C) {
Expand Down
91 changes: 26 additions & 65 deletions server/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -578,9 +578,8 @@ func (c *Config) adjustLog(meta *configMetaData) {

// Clone returns a cloned configuration.
func (c *Config) Clone() *Config {
cfg := &Config{}
*cfg = *c
return cfg
cfg := *c
return &cfg
}

func (c *Config) String() string {
Expand Down Expand Up @@ -713,50 +712,19 @@ type ScheduleConfig struct {

// Clone returns a cloned scheduling configuration.
func (c *ScheduleConfig) Clone() *ScheduleConfig {
schedulers := make(SchedulerConfigs, len(c.Schedulers))
copy(schedulers, c.Schedulers)
storeLimit := make(map[uint64]StoreLimitConfig, len(c.StoreLimit))
for k, v := range c.StoreLimit {
storeLimit[k] = v
}
return &ScheduleConfig{
MaxSnapshotCount: c.MaxSnapshotCount,
MaxPendingPeerCount: c.MaxPendingPeerCount,
MaxMergeRegionSize: c.MaxMergeRegionSize,
MaxMergeRegionKeys: c.MaxMergeRegionKeys,
SplitMergeInterval: c.SplitMergeInterval,
PatrolRegionInterval: c.PatrolRegionInterval,
MaxStoreDownTime: c.MaxStoreDownTime,
LeaderScheduleLimit: c.LeaderScheduleLimit,
LeaderSchedulePolicy: c.LeaderSchedulePolicy,
RegionScheduleLimit: c.RegionScheduleLimit,
ReplicaScheduleLimit: c.ReplicaScheduleLimit,
MergeScheduleLimit: c.MergeScheduleLimit,
EnableOneWayMerge: c.EnableOneWayMerge,
EnableCrossTableMerge: c.EnableCrossTableMerge,
HotRegionScheduleLimit: c.HotRegionScheduleLimit,
HotRegionCacheHitsThreshold: c.HotRegionCacheHitsThreshold,
StoreLimit: storeLimit,
TolerantSizeRatio: c.TolerantSizeRatio,
LowSpaceRatio: c.LowSpaceRatio,
HighSpaceRatio: c.HighSpaceRatio,
SchedulerMaxWaitingOperator: c.SchedulerMaxWaitingOperator,
DisableLearner: c.DisableLearner,
DisableRemoveDownReplica: c.DisableRemoveDownReplica,
DisableReplaceOfflineReplica: c.DisableReplaceOfflineReplica,
DisableMakeUpReplica: c.DisableMakeUpReplica,
DisableRemoveExtraReplica: c.DisableRemoveExtraReplica,
DisableLocationReplacement: c.DisableLocationReplacement,
EnableRemoveDownReplica: c.EnableRemoveDownReplica,
EnableReplaceOfflineReplica: c.EnableReplaceOfflineReplica,
EnableMakeUpReplica: c.EnableMakeUpReplica,
EnableRemoveExtraReplica: c.EnableRemoveExtraReplica,
EnableLocationReplacement: c.EnableLocationReplacement,
EnableDebugMetrics: c.EnableDebugMetrics,
EnableJointConsensus: c.EnableJointConsensus,
StoreLimitMode: c.StoreLimitMode,
Schedulers: schedulers,
schedulers := append(c.Schedulers[:0:0], c.Schedulers...)
var storeLimit map[uint64]StoreLimitConfig
if c.StoreLimit != nil {
storeLimit = make(map[uint64]StoreLimitConfig, len(c.StoreLimit))
for k, v := range c.StoreLimit {
storeLimit[k] = v
}
}
cfg := *c
cfg.StoreLimit = storeLimit
cfg.Schedulers = schedulers
cfg.SchedulersPayload = nil
return &cfg
}

const (
Expand Down Expand Up @@ -852,6 +820,10 @@ func (c *ScheduleConfig) adjust(meta *configMetaData) error {
c.StoreBalanceRate = 0
}

if c.StoreLimit == nil {
c.StoreLimit = make(map[uint64]StoreLimitConfig)
}

return c.Validate()
}

Expand Down Expand Up @@ -1013,15 +985,10 @@ type ReplicationConfig struct {

// Clone makes a deep copy of the config.
func (c *ReplicationConfig) Clone() *ReplicationConfig {
locationLabels := make(typeutil.StringSlice, len(c.LocationLabels))
copy(locationLabels, c.LocationLabels)
return &ReplicationConfig{
MaxReplicas: c.MaxReplicas,
LocationLabels: locationLabels,
StrictlyMatchLabel: c.StrictlyMatchLabel,
EnablePlacementRules: c.EnablePlacementRules,
IsolationLevel: c.IsolationLevel,
}
locationLabels := append(c.LocationLabels[:0:0], c.LocationLabels...)
cfg := *c
cfg.LocationLabels = locationLabels
return &cfg
}

// Validate is used to validate if some replication configurations are right.
Expand Down Expand Up @@ -1099,16 +1066,10 @@ func (c *PDServerConfig) adjust(meta *configMetaData) error {

// Clone returns a cloned PD server config.
func (c *PDServerConfig) Clone() *PDServerConfig {
runtimeServices := make(typeutil.StringSlice, len(c.RuntimeServices))
copy(runtimeServices, c.RuntimeServices)
return &PDServerConfig{
UseRegionStorage: c.UseRegionStorage,
MaxResetTSGap: c.MaxResetTSGap,
KeyType: c.KeyType,
MetricStorage: c.MetricStorage,
DashboardAddress: c.DashboardAddress,
RuntimeServices: runtimeServices,
}
runtimeServices := append(c.RuntimeServices[:0:0], c.RuntimeServices...)
cfg := *c
cfg.RuntimeServices = runtimeServices
return &cfg
}

// Validate is used to validate if some pd-server configurations are right.
Expand Down
24 changes: 24 additions & 0 deletions server/config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -434,3 +434,27 @@ wait-store-timeout = "120s"
c.Assert(err, IsNil)
c.Assert(cfg.ReplicationMode.ReplicationMode, Equals, "majority")
}

func (s *testConfigSuite) TestConfigClone(c *C) {
cfg := &Config{}
cfg.Adjust(nil)
c.Assert(cfg.Clone(), DeepEquals, cfg)

emptyConfigMetaData := newConfigMetadata(nil)

schedule := &ScheduleConfig{}
schedule.adjust(emptyConfigMetaData)
c.Assert(schedule.Clone(), DeepEquals, schedule)

replication := &ReplicationConfig{}
replication.adjust(emptyConfigMetaData)
c.Assert(replication.Clone(), DeepEquals, replication)

pdServer := &PDServerConfig{}
pdServer.adjust(emptyConfigMetaData)
c.Assert(pdServer.Clone(), DeepEquals, pdServer)

replicationMode := &ReplicationModeConfig{}
replicationMode.adjust(emptyConfigMetaData)
c.Assert(replicationMode.Clone(), DeepEquals, replicationMode)
}
6 changes: 3 additions & 3 deletions server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -729,9 +729,9 @@ func (s *Server) StartTimestamp() int64 {
// GetConfig gets the config information.
func (s *Server) GetConfig() *config.Config {
cfg := s.cfg.Clone()
cfg.Schedule = *s.persistOptions.GetScheduleConfig()
cfg.Replication = *s.persistOptions.GetReplicationConfig()
cfg.PDServerCfg = *s.persistOptions.GetPDServerConfig()
cfg.Schedule = *s.persistOptions.GetScheduleConfig().Clone()
cfg.Replication = *s.persistOptions.GetReplicationConfig().Clone()
cfg.PDServerCfg = *s.persistOptions.GetPDServerConfig().Clone()
cfg.ReplicationMode = *s.persistOptions.GetReplicationModeConfig()
cfg.LabelProperty = s.persistOptions.GetLabelPropertyConfig().Clone()
cfg.ClusterVersion = *s.persistOptions.GetClusterVersion()
Expand Down

0 comments on commit 90a24a9

Please sign in to comment.