Skip to content

Commit

Permalink
*: address comments
Browse files Browse the repository at this point in the history
  • Loading branch information
zimulala committed Feb 1, 2016
1 parent 7a3daf5 commit 8b362f3
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 23 deletions.
8 changes: 3 additions & 5 deletions ddl/bg_worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,6 @@ func (d *ddl) runBgJob(t *meta.Meta, job *model.Job) {
case model.ActionDropTable:
err = d.delReorgTable(t, job)
default:

job.State = model.JobCancelled
err = errors.Errorf("invalid background job %v", job)

Expand Down Expand Up @@ -155,10 +154,9 @@ func (d *ddl) finishBgJob(t *meta.Meta, job *model.Job) error {
func (d *ddl) onBackgroundWorker() {
defer d.wait.Done()

// for a ddl drop job from start to end, the state of it will be pubilc -> write only -> delete only -> none.
// for every state changes, we will wait as lease 2 * lease time.
// so here the ticker check is 8 * lease to ensure that ddl job have converted to background job.
checkTime := chooseLeaseTime(8*d.lease, 10*time.Second)
// we use 4 * lease time to check owner's timeout, so here, we will update owner's status
// every 2 * lease time, if lease is 0, we will use default 10s.
checkTime := chooseLeaseTime(2*d.lease, 10*time.Second)

ticker := time.NewTicker(checkTime)
defer ticker.Stop()
Expand Down
2 changes: 1 addition & 1 deletion ddl/ddl.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ type ddl struct {
uuid string
ddlJobCh chan struct{}
ddlJobDoneCh chan struct{}
// drop database/table job runs in the background to preform.
// drop database/table job runs in the background.
bgJobCh chan struct{}
// reorgDoneCh is for reorganization, if the reorganization job is done,
// we will use this channel to notify outer.
Expand Down
30 changes: 20 additions & 10 deletions ddl/ddl_worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ func asyncNotify(ch chan struct{}) {
}
}

func (d *ddl) checkOwner(t *meta.Meta, flag string) (*model.Owner, error) {
func (d *ddl) checkOwner(t *meta.Meta, flag JobType) (*model.Owner, error) {
var owner *model.Owner
var err error

Expand All @@ -115,7 +115,7 @@ func (d *ddl) checkOwner(t *meta.Meta, flag string) (*model.Owner, error) {
case bgJobFlag:
owner, err = t.GetBgJobOwner()
default:
err = errors.Errorf("invalid ddl flag %v", flag)
err = errors.Errorf("invalid ddl flag %v", flag.String())
}
if err != nil {
return nil, errors.Trace(err)
Expand All @@ -132,10 +132,6 @@ func (d *ddl) checkOwner(t *meta.Meta, flag string) (*model.Owner, error) {
// the owner will update its owner status every 2 * lease time, so here we use
// 4 * lease to check its timeout.
maxTimeout := int64(4 * d.lease)
if flag == bgJobFlag {
// background job doesn't need to guarantee other servers update the schema.
maxTimeout = int64(2 * d.lease)
}
if owner.OwnerID == d.uuid || now-owner.LastUpdateTS > maxTimeout {
owner.OwnerID = d.uuid
owner.LastUpdateTS = now
Expand All @@ -149,11 +145,11 @@ func (d *ddl) checkOwner(t *meta.Meta, flag string) (*model.Owner, error) {
if err != nil {
return nil, errors.Trace(err)
}
log.Debugf("[ddl] become %s job owner %s", flag, owner.OwnerID)
log.Debugf("[ddl] become %s job owner %s", flag.String(), owner.OwnerID)
}

if owner.OwnerID != d.uuid {
log.Debugf("[ddl] not %s job owner, owner is %s", flag, owner.OwnerID)
log.Debugf("[ddl] not %s job owner, owner is %s", flag.String(), owner.OwnerID)
return nil, errors.Trace(ErrNotOwner)
}

Expand Down Expand Up @@ -195,11 +191,25 @@ var ErrNotOwner = errors.New("DDL: not owner")
// ErrWorkerClosed means we have already closed the DDL worker.
var ErrWorkerClosed = errors.New("DDL: worker is closed")

// JobType is job type, including ddl/background.
type JobType int

const (
ddlJobFlag = "ddl"
bgJobFlag = "background"
ddlJobFlag = iota + 1
bgJobFlag
)

func (j JobType) String() string {
switch j {
case ddlJobFlag:
return "ddl"
case bgJobFlag:
return "background"
}

return "unknown"
}

func (d *ddl) handleDDLJobQueue() error {
for {
if d.isClosed() {
Expand Down
2 changes: 1 addition & 1 deletion ddl/ddl_worker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ func testCreateStore(c *C, name string) kv.Storage {
type testDDLSuite struct {
}

func testCheckOwner(c *C, d *ddl, isOwner bool, flag string) {
func testCheckOwner(c *C, d *ddl, isOwner bool, flag JobType) {
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
t := meta.NewMeta(txn)
_, err := d.checkOwner(t, flag)
Expand Down
2 changes: 0 additions & 2 deletions ddl/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,10 +129,8 @@ func (d *ddl) onDropSchema(t *meta.Meta, job *model.Job) error {
dbInfo.State = model.StateDeleteOnly
err = t.UpdateDatabase(dbInfo)
case model.StateDeleteOnly:
// delete only -> reorganization
dbInfo.State = model.StateDeleteReorganization
err = t.UpdateDatabase(dbInfo)
// all reorganization jobs done, drop this database
if err = t.DropDatabase(dbInfo.ID); err != nil {
break
}
Expand Down
5 changes: 1 addition & 4 deletions ddl/table.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,6 @@ func (d *ddl) onCreateTable(t *meta.Meta, job *model.Job) error {
}

func (d *ddl) delReorgTable(t *meta.Meta, job *model.Job) error {
// reorganization -> absent
tblInfo := &model.TableInfo{}
err := job.DecodeArgs(tblInfo)
if err != nil {
Expand Down Expand Up @@ -137,10 +136,8 @@ func (d *ddl) onDropTable(t *meta.Meta, job *model.Job) error {
tblInfo.State = model.StateDeleteOnly
err = t.UpdateTable(schemaID, tblInfo)
case model.StateDeleteOnly:
// delete only -> reorganization
tblInfo.State = model.StateDeleteReorganization
tblInfo.State = model.StateNone
err = t.UpdateTable(schemaID, tblInfo)
// all reorganization jobs done, drop this table.
if err = t.DropTable(job.SchemaID, job.TableID); err != nil {
break
}
Expand Down

0 comments on commit 8b362f3

Please sign in to comment.