Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ddl: fix the ddl txn commit may be conflict with reorg txn #24668

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 3 additions & 4 deletions ddl/column.go
Original file line number Diff line number Diff line change
Expand Up @@ -1011,10 +1011,10 @@ func (w *worker) doModifyColumnTypeWithData(
// enable: curl -X PUT -d "pause" "http://127.0.0.1:10080/fail/github.com/pingcap/tidb/ddl/mockDelayInModifyColumnTypeWithData".
// disable: curl -X DELETE "http://127.0.0.1:10080/fail/github.com/pingcap/tidb/ddl/mockDelayInModifyColumnTypeWithData"
failpoint.Inject("mockDelayInModifyColumnTypeWithData", func() {})
err = w.runReorgJob(t, reorgInfo, tbl.Meta(), d.lease, func() (addIndexErr error) {
err = w.runReorgJob(reorgInfo, tbl.Meta(), d.lease, func() (reorgErr error) {
defer util.Recover(metrics.LabelDDL, "onModifyColumn",
func() {
addIndexErr = errCancelledDDLJob.GenWithStack("modify table `%v` column `%v` panic", tblInfo.Name, oldCol.Name)
reorgErr = errCancelledDDLJob.GenWithStack("modify table `%v` column `%v` panic", tblInfo.Name, oldCol.Name)
}, false)
return w.updateColumnAndIndexes(tbl, oldCol, changingCol, changingIdxs, reorgInfo)
})
Expand All @@ -1024,10 +1024,9 @@ func (w *worker) doModifyColumnTypeWithData(
return ver, nil
}
if needRollbackData(err) {
if err1 := t.RemoveDDLReorgHandle(job, reorgInfo.elements); err1 != nil {
if err1 := reorgInfo.CleanReorgMeta(); err1 != nil {
logutil.BgLogger().Warn("[ddl] run modify column job failed, RemoveDDLReorgHandle failed, can't convert job to rollback",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need update this log?

zap.String("job", job.String()), zap.Error(err1))
return ver, errors.Trace(err)
}
logutil.BgLogger().Warn("[ddl] run modify column job failed, convert job to rollback", zap.String("job", job.String()), zap.Error(err))
// When encounter these error above, we change the job to rolling back job directly.
Expand Down
17 changes: 17 additions & 0 deletions ddl/column_type_change_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1797,3 +1797,20 @@ func (s *testColumnTypeChangeSuite) TestChangeIntToBitWillPanicInBackfillIndexes
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustQuery("select * from t").Check(testkit.Rows("\x13 1 1.00", "\x11 2 2.00"))
}

// Close issue #24427
func (s *testColumnTypeChangeSuite) TestFixDDLTxnWillConflictWithReorgTxn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
// Enable column change variable.
tk.Se.GetSessionVars().EnableChangeColumnType = true

tk.MustExec("create table t(a int)")
tk.MustExec("alter table t add index(a)")
tk.MustExec("set @@sql_mode=\"\"")
tk.MustExec("insert into t values(128),(129)")
tk.MustExec("set @@tidb_enable_change_column_type=1")
tk.MustExec("alter table t modify column a tinyint")

tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1690 constant 128 overflows tinyint", "Warning 1690 constant 128 overflows tinyint"))
}
4 changes: 2 additions & 2 deletions ddl/index.go
Original file line number Diff line number Diff line change
Expand Up @@ -546,7 +546,7 @@ func (w *worker) onCreateIndex(d *ddlCtx, t *meta.Meta, job *model.Job, isPK boo
return ver, errors.Trace(err)
}

err = w.runReorgJob(t, reorgInfo, tbl.Meta(), d.lease, func() (addIndexErr error) {
err = w.runReorgJob(reorgInfo, tbl.Meta(), d.lease, func() (addIndexErr error) {
defer util.Recover(metrics.LabelDDL, "onCreateIndex",
func() {
addIndexErr = errCancelledDDLJob.GenWithStack("add table `%v` index `%v` panic", tblInfo.Name, indexInfo.Name)
Expand All @@ -561,7 +561,7 @@ func (w *worker) onCreateIndex(d *ddlCtx, t *meta.Meta, job *model.Job, isPK boo
if kv.ErrKeyExists.Equal(err) || errCancelledDDLJob.Equal(err) || errCantDecodeRecord.Equal(err) {
logutil.BgLogger().Warn("[ddl] run add index job failed, convert job to rollback", zap.String("job", job.String()), zap.Error(err))
ver, err = convertAddIdxJob2RollbackJob(t, job, tblInfo, indexInfo, err)
if err1 := t.RemoveDDLReorgHandle(job, reorgInfo.elements); err1 != nil {
if err1 := reorgInfo.CleanReorgMeta(); err1 != nil {
logutil.BgLogger().Warn("[ddl] run add index job failed, convert job to rollback, RemoveDDLReorgHandle failed", zap.String("job", job.String()), zap.Error(err1))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ditto.

}
}
Expand Down
2 changes: 1 addition & 1 deletion ddl/partition.go
Original file line number Diff line number Diff line change
Expand Up @@ -1006,7 +1006,7 @@ func (w *worker) onDropTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (
// and then run the reorg next time.
return ver, errors.Trace(err)
}
err = w.runReorgJob(t, reorgInfo, tbl.Meta(), d.lease, func() (dropIndexErr error) {
err = w.runReorgJob(reorgInfo, tbl.Meta(), d.lease, func() (dropIndexErr error) {
defer tidbutil.Recover(metrics.LabelDDL, "onDropTablePartition",
func() {
dropIndexErr = errCancelledDDLJob.GenWithStack("drop partition panic")
Expand Down
36 changes: 32 additions & 4 deletions ddl/reorg.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,24 @@ func (rc *reorgCtx) clean() {
rc.doneCh = nil
}

func (w *worker) runReorgJob(t *meta.Meta, reorgInfo *reorgInfo, tblInfo *model.TableInfo, lease time.Duration, f func() error) error {
// runReorgJob is used to do `READ` action of reorg info in ddl txn.
// If we want to do `WRITE` action of recording some reorg handle down, we need to additional kv txn.
// Otherwise, the below write conflicts will occur.
//
// ddl txn ------------+-------------------------------+
// (RunInKVTxn: start) | | (ddl done successfully)
// | (Write) | (RunInKVTxn committed fail: write conflict)
// V V
// reorg handle
// ^
// | (Update handle, eg: change element)
// | (RunInKVTxn: committed instantly)
// reorg txn--------------------------+
//
// For this case:
// Let's take the ddl txn as a Daemon thread, it isn't response for writing the reorg handle down, but care for reading
// reorg result. Because read won't be conflict with any write action of the reorg txn.
func (w *worker) runReorgJob(reorgInfo *reorgInfo, tblInfo *model.TableInfo, lease time.Duration, f func() error) error {
// lease = 0 means it's in an integration test. In this case we don't delay so the test won't run too slowly.
if lease > 0 {
delayForAsyncCommit()
Expand Down Expand Up @@ -220,7 +237,7 @@ func (w *worker) runReorgJob(t *meta.Meta, reorgInfo *reorgInfo, tblInfo *model.
case model.ActionModifyColumn:
metrics.GetBackfillProgressByLabel(metrics.LblModifyColumn).Set(100)
}
if err1 := t.RemoveDDLReorgHandle(job, reorgInfo.elements); err1 != nil {
if err1 := reorgInfo.CleanReorgMeta(); err1 != nil {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This txn and DDL txn are not the same txn. There may be data and schema out of sync. I think this plan can be further optimized

logutil.BgLogger().Warn("[ddl] run reorg job done, removeDDLReorgHandle failed", zap.Error(err1))
return errors.Trace(err1)
}
Expand All @@ -245,7 +262,7 @@ func (w *worker) runReorgJob(t *meta.Meta, reorgInfo *reorgInfo, tblInfo *model.
// Update a reorgInfo's handle.
// Since daemon-worker is triggered by timer to store the info half-way.
// you should keep these infos is read-only (like job) / atomic (like doneKey & element) / concurrent safe.
err := t.UpdateDDLReorgStartHandle(job, currentElement, doneKey)
err := reorgInfo.UpdateReorgMeta(doneKey)

logutil.BgLogger().Info("[ddl] run reorg job wait timeout",
zap.Duration("waitTime", waitTimeout),
Expand Down Expand Up @@ -566,7 +583,7 @@ func getReorgInfo(d *ddlCtx, t *meta.Meta, job *model.Job, tbl table.Table, elem
})

info.first = true
// get the current version for reorganization if we don't have
// get the current version for reorganization if we don't have one.
ver, err := getValidCurrentVersion(d.store)
if err != nil {
return nil, errors.Trace(err)
Expand Down Expand Up @@ -707,3 +724,14 @@ func (r *reorgInfo) UpdateReorgMeta(startKey kv.Key) error {
}
return nil
}

func (r *reorgInfo) CleanReorgMeta() error {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It seems that this function is similar to RemoveDDLReorgHandle, could we use a similar name?

err := kv.RunInNewTxn(context.Background(), r.d.store, true, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
return errors.Trace(t.RemoveDDLReorgHandle(r.Job, r.elements))
})
if err != nil {
return errors.Trace(err)
}
return nil
}
15 changes: 10 additions & 5 deletions ddl/reorg_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,20 +93,25 @@ func (s *testDDLSuite) TestReorg(c *C) {
c.Assert(err, IsNil)
txn, err = ctx.Txn(true)
c.Assert(err, IsNil)
m := meta.NewMeta(txn)
e := &meta.Element{ID: 333, TypeKey: meta.IndexElementKey}
dCtx := &ddlCtx{
uuid: d.uuid,
store: d.store,
}
rInfo := &reorgInfo{
Job: job,
currElement: e,
// reorgJob depend on ddlCtx's store to do the extra txn.
d: dCtx,
}
mockTbl := tables.MockTableFromMeta(&model.TableInfo{IsCommonHandle: s.IsCommonHandle, CommonHandleVersion: 1})
err = d.generalWorker().runReorgJob(m, rInfo, mockTbl.Meta(), d.lease, f)
err = d.generalWorker().runReorgJob(rInfo, mockTbl.Meta(), d.lease, f)
c.Assert(err, NotNil)

// The longest to wait for 5 seconds to make sure the function of f is returned.
for i := 0; i < 1000; i++ {
time.Sleep(5 * time.Millisecond)
err = d.generalWorker().runReorgJob(m, rInfo, mockTbl.Meta(), d.lease, f)
err = d.generalWorker().runReorgJob(rInfo, mockTbl.Meta(), d.lease, f)
if err == nil {
c.Assert(job.RowCount, Equals, rowCount)
c.Assert(d.generalWorker().reorgCtx.rowCount, Equals, int64(0))
Expand All @@ -117,7 +122,7 @@ func (s *testDDLSuite) TestReorg(c *C) {
err = ctx.NewTxn(context.Background())
c.Assert(err, IsNil)

m = meta.NewMeta(txn)
m := meta.NewMeta(txn)
info, err1 := getReorgInfo(d.ddlCtx, m, job, mockTbl, nil)
c.Assert(err1, IsNil)
c.Assert(info.StartKey, DeepEquals, kv.Key(handle.Encoded()))
Expand Down Expand Up @@ -172,7 +177,7 @@ func (s *testDDLSuite) TestReorg(c *C) {

err = d.Stop()
c.Assert(err, IsNil)
err = d.generalWorker().runReorgJob(m, rInfo, mockTbl.Meta(), d.lease, func() error {
err = d.generalWorker().runReorgJob(rInfo, mockTbl.Meta(), d.lease, func() error {
time.Sleep(4 * testLease)
return nil
})
Expand Down