From 636afc862bb7e58a11a0e140d2cec88c701fd099 Mon Sep 17 00:00:00 2001 From: tangenta Date: Sun, 4 Oct 2020 12:59:40 +0800 Subject: [PATCH 1/4] cherry pick #18036 to release-4.0 Signed-off-by: ti-srebot --- ddl/column.go | 293 +++++++++++++++++++++++++++++++++++++ ddl/db_integration_test.go | 12 ++ ddl/ddl_api.go | 37 ++++- planner/core/plan_to_pb.go | 2 +- statistics/handle/ddl.go | 7 + table/column.go | 8 +- table/tables/tables.go | 2 +- util/admin/admin.go | 2 +- 8 files changed, 346 insertions(+), 17 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index f7f1675b4ec1e..3e6ca0f4b0671 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -229,6 +229,299 @@ func onAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) return ver, errors.Trace(err) } +<<<<<<< HEAD +======= +func checkAddColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, []*model.ColumnInfo, []*ast.ColumnPosition, []int, []bool, error) { + schemaID := job.SchemaID + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + if err != nil { + return nil, nil, nil, nil, nil, nil, errors.Trace(err) + } + columns := []*model.ColumnInfo{} + positions := []*ast.ColumnPosition{} + offsets := []int{} + ifNotExists := []bool{} + err = job.DecodeArgs(&columns, &positions, &offsets, &ifNotExists) + if err != nil { + job.State = model.JobStateCancelled + return nil, nil, nil, nil, nil, nil, errors.Trace(err) + } + + columnInfos := make([]*model.ColumnInfo, 0, len(columns)) + newColumns := make([]*model.ColumnInfo, 0, len(columns)) + newPositions := make([]*ast.ColumnPosition, 0, len(columns)) + newOffsets := make([]int, 0, len(columns)) + newIfNotExists := make([]bool, 0, len(columns)) + for i, col := range columns { + columnInfo := model.FindColumnInfo(tblInfo.Columns, col.Name.L) + if columnInfo != nil { + if columnInfo.State == model.StatePublic { + // We already have a column with the same column name. + if ifNotExists[i] { + // TODO: Should return a warning. + logutil.BgLogger().Warn("[ddl] check add columns, duplicate column", zap.Stringer("col", col.Name)) + continue + } + job.State = model.JobStateCancelled + return nil, nil, nil, nil, nil, nil, infoschema.ErrColumnExists.GenWithStackByArgs(col.Name) + } + columnInfos = append(columnInfos, columnInfo) + } + newColumns = append(newColumns, columns[i]) + newPositions = append(newPositions, positions[i]) + newOffsets = append(newOffsets, offsets[i]) + newIfNotExists = append(newIfNotExists, ifNotExists[i]) + } + return tblInfo, columnInfos, newColumns, newPositions, newOffsets, newIfNotExists, nil +} + +func setColumnsState(columnInfos []*model.ColumnInfo, state model.SchemaState) { + for i := range columnInfos { + columnInfos[i].State = state + } +} + +func setIndicesState(indexInfos []*model.IndexInfo, state model.SchemaState) { + for _, indexInfo := range indexInfos { + indexInfo.State = state + } +} + +func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) { + // Handle the rolling back job. + if job.IsRollingback() { + ver, err = onDropColumns(t, job) + if err != nil { + return ver, errors.Trace(err) + } + return ver, nil + } + + failpoint.Inject("errorBeforeDecodeArgs", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(ver, errors.New("occur an error before decode args")) + } + }) + + tblInfo, columnInfos, columns, positions, offsets, ifNotExists, err := checkAddColumns(t, job) + if err != nil { + return ver, errors.Trace(err) + } + if len(columnInfos) == 0 { + if len(columns) == 0 { + job.State = model.JobStateCancelled + return ver, nil + } + for i := range columns { + columnInfo, pos, offset, err := createColumnInfo(tblInfo, columns[i], positions[i]) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + logutil.BgLogger().Info("[ddl] run add columns job", zap.String("job", job.String()), zap.Reflect("columnInfo", *columnInfo), zap.Int("offset", offset)) + positions[i] = pos + offsets[i] = offset + if err = checkAddColumnTooManyColumns(len(tblInfo.Columns)); err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + columnInfos = append(columnInfos, columnInfo) + } + // Set arg to job. + job.Args = []interface{}{columnInfos, positions, offsets, ifNotExists} + } + + originalState := columnInfos[0].State + switch columnInfos[0].State { + case model.StateNone: + // none -> delete only + job.SchemaState = model.StateDeleteOnly + setColumnsState(columnInfos, model.StateDeleteOnly) + ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != columnInfos[0].State) + case model.StateDeleteOnly: + // delete only -> write only + job.SchemaState = model.StateWriteOnly + setColumnsState(columnInfos, model.StateWriteOnly) + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State) + case model.StateWriteOnly: + // write only -> reorganization + job.SchemaState = model.StateWriteReorganization + setColumnsState(columnInfos, model.StateWriteReorganization) + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State) + case model.StateWriteReorganization: + // reorganization -> public + // Adjust table column offsets. + oldCols := tblInfo.Columns[:len(tblInfo.Columns)-len(offsets)] + newCols := tblInfo.Columns[len(tblInfo.Columns)-len(offsets):] + tblInfo.Columns = oldCols + for i := range offsets { + // For multiple columns with after position, should adjust offsets. + // e.g. create table t(a int); + // alter table t add column b int after a, add column c int after a; + // alter table t add column a1 int after a, add column b1 int after b, add column c1 int after c; + // alter table t add column a1 int after a, add column b1 int first; + if positions[i].Tp == ast.ColumnPositionAfter { + for j := 0; j < i; j++ { + if (positions[j].Tp == ast.ColumnPositionAfter && offsets[j] < offsets[i]) || positions[j].Tp == ast.ColumnPositionFirst { + offsets[i]++ + } + } + } + tblInfo.Columns = append(tblInfo.Columns, newCols[i]) + adjustColumnInfoInAddColumn(tblInfo, offsets[i]) + } + setColumnsState(columnInfos, model.StatePublic) + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State) + if err != nil { + return ver, errors.Trace(err) + } + // Finish this job. + job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) + asyncNotifyEvent(d, &ddlutil.Event{Tp: model.ActionAddColumns, TableInfo: tblInfo, ColumnInfos: columnInfos}) + default: + err = ErrInvalidDDLState.GenWithStackByArgs("column", columnInfos[0].State) + } + + return ver, errors.Trace(err) +} + +func onDropColumns(t *meta.Meta, job *model.Job) (ver int64, _ error) { + tblInfo, colInfos, delCount, idxInfos, err := checkDropColumns(t, job) + if err != nil { + return ver, errors.Trace(err) + } + if len(colInfos) == 0 { + job.State = model.JobStateCancelled + return ver, nil + } + + originalState := colInfos[0].State + switch colInfos[0].State { + case model.StatePublic: + // public -> write only + job.SchemaState = model.StateWriteOnly + setColumnsState(colInfos, model.StateWriteOnly) + setIndicesState(idxInfos, model.StateWriteOnly) + for _, colInfo := range colInfos { + err = checkDropColumnForStatePublic(tblInfo, colInfo) + if err != nil { + return ver, errors.Trace(err) + } + } + ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != colInfos[0].State) + case model.StateWriteOnly: + // write only -> delete only + job.SchemaState = model.StateDeleteOnly + setColumnsState(colInfos, model.StateDeleteOnly) + setIndicesState(idxInfos, model.StateDeleteOnly) + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfos[0].State) + case model.StateDeleteOnly: + // delete only -> reorganization + job.SchemaState = model.StateDeleteReorganization + setColumnsState(colInfos, model.StateDeleteReorganization) + setIndicesState(idxInfos, model.StateDeleteReorganization) + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfos[0].State) + case model.StateDeleteReorganization: + // reorganization -> absent + // All reorganization jobs are done, drop this column. + if len(idxInfos) > 0 { + newIndices := make([]*model.IndexInfo, 0, len(tblInfo.Indices)) + for _, idx := range tblInfo.Indices { + if !indexInfoContains(idx.ID, idxInfos) { + newIndices = append(newIndices, idx) + } + } + tblInfo.Indices = newIndices + } + + indexIDs := indexInfosToIDList(idxInfos) + tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-delCount] + setColumnsState(colInfos, model.StateNone) + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfos[0].State) + if err != nil { + return ver, errors.Trace(err) + } + + // Finish this job. + if job.IsRollingback() { + job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo) + } else { + job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) + job.Args = append(job.Args, indexIDs, getPartitionIDs(tblInfo)) + } + default: + err = errInvalidDDLJob.GenWithStackByArgs("table", tblInfo.State) + } + return ver, errors.Trace(err) +} + +func checkDropColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, int, []*model.IndexInfo, error) { + schemaID := job.SchemaID + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + if err != nil { + return nil, nil, 0, nil, errors.Trace(err) + } + + var colNames []model.CIStr + var ifExists []bool + err = job.DecodeArgs(&colNames, &ifExists) + if err != nil { + job.State = model.JobStateCancelled + return nil, nil, 0, nil, errors.Trace(err) + } + + newColNames := make([]model.CIStr, 0, len(colNames)) + colInfos := make([]*model.ColumnInfo, 0, len(colNames)) + newIfExists := make([]bool, 0, len(colNames)) + indexInfos := make([]*model.IndexInfo, 0) + for i, colName := range colNames { + colInfo := model.FindColumnInfo(tblInfo.Columns, colName.L) + if colInfo == nil || colInfo.Hidden { + if ifExists[i] { + // TODO: Should return a warning. + logutil.BgLogger().Warn(fmt.Sprintf("column %s doesn't exist", colName)) + continue + } + job.State = model.JobStateCancelled + return nil, nil, 0, nil, ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName) + } + if err = isDroppableColumn(tblInfo, colName); err != nil { + job.State = model.JobStateCancelled + return nil, nil, 0, nil, errors.Trace(err) + } + newColNames = append(newColNames, colName) + newIfExists = append(newIfExists, ifExists[i]) + colInfos = append(colInfos, colInfo) + idxInfos := listIndicesWithColumn(colName.L, tblInfo.Indices) + indexInfos = append(indexInfos, idxInfos...) + } + job.Args = []interface{}{newColNames, newIfExists} + return tblInfo, colInfos, len(colInfos), indexInfos, nil +} + +func checkDropColumnForStatePublic(tblInfo *model.TableInfo, colInfo *model.ColumnInfo) (err error) { + // Set this column's offset to the last and reset all following columns' offsets. + adjustColumnInfoInDropColumn(tblInfo, colInfo.Offset) + // When the dropping column has not-null flag and it hasn't the default value, we can backfill the column value like "add column". + // NOTE: If the state of StateWriteOnly can be rollbacked, we'd better reconsider the original default value. + // And we need consider the column without not-null flag. + if colInfo.GetOriginDefaultValue() == nil && mysql.HasNotNullFlag(colInfo.Flag) { + // If the column is timestamp default current_timestamp, and DDL owner is new version TiDB that set column.Version to 1, + // then old TiDB update record in the column write only stage will uses the wrong default value of the dropping column. + // Because new version of the column default value is UTC time, but old version TiDB will think the default value is the time in system timezone. + // But currently will be ok, because we can't cancel the drop column job when the job is running, + // so the column will be dropped succeed and client will never see the wrong default value of the dropped column. + // More info about this problem, see PR#9115. + originDefVal, err := generateOriginDefaultValue(colInfo) + if err != nil { + return err + } + return colInfo.SetOriginDefaultValue(originDefVal) + } + return nil +} + +>>>>>>> 6342fa6a5... ddl: fix corrupted default value for bit type column (#18036) func onDropColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) { tblInfo, colInfo, err := checkDropColumn(t, job) if err != nil { diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index 3c9a97d004a0b..6f8d7f9056ea7 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -1012,6 +1012,18 @@ func (s *testIntegrationSuite5) TestBitDefaultValue(c *C) { tk.MustQuery("select c from t_bit").Check(testkit.Rows("\x19\xb9")) tk.MustExec("update t_bit set c = b'11100000000111'") tk.MustQuery("select c from t_bit").Check(testkit.Rows("\x38\x07")) + tk.MustExec("drop table t_bit") + + tk.MustExec("create table t_bit (a int)") + tk.MustExec("insert into t_bit value (1)") + tk.MustExec("alter table t_bit add column b bit(1) default b'0';") + tk.MustExec("alter table t_bit modify column b bit(1) default b'1';") + tk.MustQuery("select b from t_bit").Check(testkit.Rows("\x00")) + tk.MustExec("drop table t_bit") + + tk.MustExec("create table t_bit (a bit);") + tk.MustExec("insert into t_bit values (null);") + tk.MustQuery("select count(*) from t_bit where a is null;").Check(testkit.Rows("1")) tk.MustExec(`create table testalltypes1 ( field_1 bit default 1, diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 2c0cdd2411f5b..dd03f206edef9 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2426,8 +2426,30 @@ func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTab return errors.Trace(err) } - col.OriginDefaultValue, err = generateOriginDefaultValue(col.ToInfo()) + originDefVal, err := generateOriginDefaultValue(col.ToInfo()) if err != nil { +<<<<<<< HEAD +======= + return nil, errors.Trace(err) + } + + err = col.SetOriginDefaultValue(originDefVal) + return col, err +} + +// AddColumn will add a new column to the table. +func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTableSpec) error { + specNewColumn := spec.NewColumns[0] + schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) + if err != nil { + return errors.Trace(err) + } + if err = checkAddColumnTooManyColumns(len(t.Cols()) + 1); err != nil { + return errors.Trace(err) + } + col, err := checkAndCreateNewColumn(ctx, ti, schema, spec, t, specNewColumn) + if err != nil { +>>>>>>> 6342fa6a5... ddl: fix corrupted default value for bit type column (#18036) return errors.Trace(err) } @@ -2970,12 +2992,13 @@ func (d *ddl) getModifiableColumnJob(ctx sessionctx.Context, ident ast.Ident, or // a new version TiDB builds the DDL job that doesn't be set the column's offset and state, // and the old version TiDB is the DDL owner, it doesn't get offset and state from the store. Then it will encounter errors. // So here we set offset and state to support the rolling upgrade. - Offset: col.Offset, - State: col.State, - OriginDefaultValue: col.OriginDefaultValue, - FieldType: *specNewColumn.Tp, - Name: newColName, - Version: col.Version, + Offset: col.Offset, + State: col.State, + OriginDefaultValue: col.OriginDefaultValue, + OriginDefaultValueBit: col.OriginDefaultValueBit, + FieldType: *specNewColumn.Tp, + Name: newColName, + Version: col.Version, }) var chs, coll string diff --git a/planner/core/plan_to_pb.go b/planner/core/plan_to_pb.go index 8fb84f203adbc..c0450055f3724 100644 --- a/planner/core/plan_to_pb.go +++ b/planner/core/plan_to_pb.go @@ -279,7 +279,7 @@ func SetPBColumnsDefaultValue(ctx sessionctx.Context, pbColumns []*tipb.ColumnIn if c.IsGenerated() && !c.GeneratedStored { pbColumns[i].DefaultVal = []byte{codec.NilFlag} } - if c.OriginDefaultValue == nil { + if c.GetOriginDefaultValue() == nil { continue } diff --git a/statistics/handle/ddl.go b/statistics/handle/ddl.go index a974b857bb0e0..e69a2b21436ee 100644 --- a/statistics/handle/ddl.go +++ b/statistics/handle/ddl.go @@ -141,6 +141,7 @@ func (h *Handle) insertColStats2KV(physicalID int64, colInfo *model.ColumnInfo) return } count := req.GetRow(0).GetInt64(0) +<<<<<<< HEAD value := types.NewDatum(colInfo.OriginDefaultValue) value, err = value.ConvertTo(h.mu.ctx.GetSessionVars().StmtCtx, &colInfo.FieldType) if err != nil { @@ -154,6 +155,12 @@ func (h *Handle) insertColStats2KV(physicalID int64, colInfo *model.ColumnInfo) // If this stats exists, we insert histogram meta first, the distinct_count will always be one. sqls = append(sqls, fmt.Sprintf("insert into mysql.stats_histograms (version, table_id, is_index, hist_id, distinct_count, tot_col_size) values (%d, %d, 0, %d, 1, %d)", startTS, physicalID, colInfo.ID, int64(len(value.GetBytes()))*count)) value, err = value.ConvertTo(h.mu.ctx.GetSessionVars().StmtCtx, types.NewFieldType(mysql.TypeBlob)) +======= + sqls := make([]string, 0, len(colInfos)) + for _, colInfo := range colInfos { + value := types.NewDatum(colInfo.GetOriginDefaultValue()) + value, err = value.ConvertTo(h.mu.ctx.GetSessionVars().StmtCtx, &colInfo.FieldType) +>>>>>>> 6342fa6a5... ddl: fix corrupted default value for bit type column (#18036) if err != nil { return } diff --git a/table/column.go b/table/column.go index 890293e23660a..dd90400885877 100644 --- a/table/column.go +++ b/table/column.go @@ -377,13 +377,7 @@ func CheckNotNull(cols []*Column, row []types.Datum) error { // GetColOriginDefaultValue gets default value of the column from original default value. func GetColOriginDefaultValue(ctx sessionctx.Context, col *model.ColumnInfo) (types.Datum, error) { - // If the column type is BIT, both `OriginDefaultValue` and `DefaultValue` of ColumnInfo are corrupted, because - // after JSON marshaling and unmarshaling against the field with type `interface{}`, the content with actual type `[]byte` is changed. - // We need `DefaultValueBit` to restore OriginDefaultValue before reading it. - if col.Tp == mysql.TypeBit && col.DefaultValueBit != nil && col.OriginDefaultValue != nil { - col.OriginDefaultValue = col.DefaultValueBit - } - return getColDefaultValue(ctx, col, col.OriginDefaultValue) + return getColDefaultValue(ctx, col, col.GetOriginDefaultValue()) } // GetColDefaultValue gets default value of the column. diff --git a/table/tables/tables.go b/table/tables/tables.go index 7b1563fa3a879..2fce193d438ff 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -1006,7 +1006,7 @@ func (t *TableCommon) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols // The defaultVals is used to avoid calculating the default value multiple times. func GetColDefaultValue(ctx sessionctx.Context, col *table.Column, defaultVals []types.Datum) ( colVal types.Datum, err error) { - if col.OriginDefaultValue == nil && mysql.HasNotNullFlag(col.Flag) { + if col.GetOriginDefaultValue() == nil && mysql.HasNotNullFlag(col.Flag) { return colVal, errors.New("Miss column") } if col.State != model.StatePublic { diff --git a/util/admin/admin.go b/util/admin/admin.go index cdee332b84c09..a52c0994abe7d 100644 --- a/util/admin/admin.go +++ b/util/admin/admin.go @@ -354,7 +354,7 @@ func CheckRecordAndIndex(sessCtx sessionctx.Context, txn kv.Transaction, t table for i, val := range vals1 { col := cols[i] if val.IsNull() { - if mysql.HasNotNullFlag(col.Flag) && col.ToInfo().OriginDefaultValue == nil { + if mysql.HasNotNullFlag(col.Flag) && col.ToInfo().GetOriginDefaultValue() == nil { return false, errors.Errorf("Column %v define as not null, but can't find the value where handle is %v", col.Name, h1) } // NULL value is regarded as its default value. From a6e856e79dca7ff1f221046e2df1ab3cbec0c81f Mon Sep 17 00:00:00 2001 From: tangenta Date: Thu, 22 Oct 2020 12:03:03 +0800 Subject: [PATCH 2/4] resolve conflicts --- ddl/column.go | 293 --------------------------------------- ddl/ddl_api.go | 24 +--- go.mod | 2 +- go.sum | 2 + statistics/handle/ddl.go | 9 +- 5 files changed, 5 insertions(+), 325 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index 3e6ca0f4b0671..f7f1675b4ec1e 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -229,299 +229,6 @@ func onAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) return ver, errors.Trace(err) } -<<<<<<< HEAD -======= -func checkAddColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, []*model.ColumnInfo, []*ast.ColumnPosition, []int, []bool, error) { - schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) - if err != nil { - return nil, nil, nil, nil, nil, nil, errors.Trace(err) - } - columns := []*model.ColumnInfo{} - positions := []*ast.ColumnPosition{} - offsets := []int{} - ifNotExists := []bool{} - err = job.DecodeArgs(&columns, &positions, &offsets, &ifNotExists) - if err != nil { - job.State = model.JobStateCancelled - return nil, nil, nil, nil, nil, nil, errors.Trace(err) - } - - columnInfos := make([]*model.ColumnInfo, 0, len(columns)) - newColumns := make([]*model.ColumnInfo, 0, len(columns)) - newPositions := make([]*ast.ColumnPosition, 0, len(columns)) - newOffsets := make([]int, 0, len(columns)) - newIfNotExists := make([]bool, 0, len(columns)) - for i, col := range columns { - columnInfo := model.FindColumnInfo(tblInfo.Columns, col.Name.L) - if columnInfo != nil { - if columnInfo.State == model.StatePublic { - // We already have a column with the same column name. - if ifNotExists[i] { - // TODO: Should return a warning. - logutil.BgLogger().Warn("[ddl] check add columns, duplicate column", zap.Stringer("col", col.Name)) - continue - } - job.State = model.JobStateCancelled - return nil, nil, nil, nil, nil, nil, infoschema.ErrColumnExists.GenWithStackByArgs(col.Name) - } - columnInfos = append(columnInfos, columnInfo) - } - newColumns = append(newColumns, columns[i]) - newPositions = append(newPositions, positions[i]) - newOffsets = append(newOffsets, offsets[i]) - newIfNotExists = append(newIfNotExists, ifNotExists[i]) - } - return tblInfo, columnInfos, newColumns, newPositions, newOffsets, newIfNotExists, nil -} - -func setColumnsState(columnInfos []*model.ColumnInfo, state model.SchemaState) { - for i := range columnInfos { - columnInfos[i].State = state - } -} - -func setIndicesState(indexInfos []*model.IndexInfo, state model.SchemaState) { - for _, indexInfo := range indexInfos { - indexInfo.State = state - } -} - -func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) { - // Handle the rolling back job. - if job.IsRollingback() { - ver, err = onDropColumns(t, job) - if err != nil { - return ver, errors.Trace(err) - } - return ver, nil - } - - failpoint.Inject("errorBeforeDecodeArgs", func(val failpoint.Value) { - if val.(bool) { - failpoint.Return(ver, errors.New("occur an error before decode args")) - } - }) - - tblInfo, columnInfos, columns, positions, offsets, ifNotExists, err := checkAddColumns(t, job) - if err != nil { - return ver, errors.Trace(err) - } - if len(columnInfos) == 0 { - if len(columns) == 0 { - job.State = model.JobStateCancelled - return ver, nil - } - for i := range columns { - columnInfo, pos, offset, err := createColumnInfo(tblInfo, columns[i], positions[i]) - if err != nil { - job.State = model.JobStateCancelled - return ver, errors.Trace(err) - } - logutil.BgLogger().Info("[ddl] run add columns job", zap.String("job", job.String()), zap.Reflect("columnInfo", *columnInfo), zap.Int("offset", offset)) - positions[i] = pos - offsets[i] = offset - if err = checkAddColumnTooManyColumns(len(tblInfo.Columns)); err != nil { - job.State = model.JobStateCancelled - return ver, errors.Trace(err) - } - columnInfos = append(columnInfos, columnInfo) - } - // Set arg to job. - job.Args = []interface{}{columnInfos, positions, offsets, ifNotExists} - } - - originalState := columnInfos[0].State - switch columnInfos[0].State { - case model.StateNone: - // none -> delete only - job.SchemaState = model.StateDeleteOnly - setColumnsState(columnInfos, model.StateDeleteOnly) - ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != columnInfos[0].State) - case model.StateDeleteOnly: - // delete only -> write only - job.SchemaState = model.StateWriteOnly - setColumnsState(columnInfos, model.StateWriteOnly) - ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State) - case model.StateWriteOnly: - // write only -> reorganization - job.SchemaState = model.StateWriteReorganization - setColumnsState(columnInfos, model.StateWriteReorganization) - ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State) - case model.StateWriteReorganization: - // reorganization -> public - // Adjust table column offsets. - oldCols := tblInfo.Columns[:len(tblInfo.Columns)-len(offsets)] - newCols := tblInfo.Columns[len(tblInfo.Columns)-len(offsets):] - tblInfo.Columns = oldCols - for i := range offsets { - // For multiple columns with after position, should adjust offsets. - // e.g. create table t(a int); - // alter table t add column b int after a, add column c int after a; - // alter table t add column a1 int after a, add column b1 int after b, add column c1 int after c; - // alter table t add column a1 int after a, add column b1 int first; - if positions[i].Tp == ast.ColumnPositionAfter { - for j := 0; j < i; j++ { - if (positions[j].Tp == ast.ColumnPositionAfter && offsets[j] < offsets[i]) || positions[j].Tp == ast.ColumnPositionFirst { - offsets[i]++ - } - } - } - tblInfo.Columns = append(tblInfo.Columns, newCols[i]) - adjustColumnInfoInAddColumn(tblInfo, offsets[i]) - } - setColumnsState(columnInfos, model.StatePublic) - ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - // Finish this job. - job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) - asyncNotifyEvent(d, &ddlutil.Event{Tp: model.ActionAddColumns, TableInfo: tblInfo, ColumnInfos: columnInfos}) - default: - err = ErrInvalidDDLState.GenWithStackByArgs("column", columnInfos[0].State) - } - - return ver, errors.Trace(err) -} - -func onDropColumns(t *meta.Meta, job *model.Job) (ver int64, _ error) { - tblInfo, colInfos, delCount, idxInfos, err := checkDropColumns(t, job) - if err != nil { - return ver, errors.Trace(err) - } - if len(colInfos) == 0 { - job.State = model.JobStateCancelled - return ver, nil - } - - originalState := colInfos[0].State - switch colInfos[0].State { - case model.StatePublic: - // public -> write only - job.SchemaState = model.StateWriteOnly - setColumnsState(colInfos, model.StateWriteOnly) - setIndicesState(idxInfos, model.StateWriteOnly) - for _, colInfo := range colInfos { - err = checkDropColumnForStatePublic(tblInfo, colInfo) - if err != nil { - return ver, errors.Trace(err) - } - } - ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != colInfos[0].State) - case model.StateWriteOnly: - // write only -> delete only - job.SchemaState = model.StateDeleteOnly - setColumnsState(colInfos, model.StateDeleteOnly) - setIndicesState(idxInfos, model.StateDeleteOnly) - ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfos[0].State) - case model.StateDeleteOnly: - // delete only -> reorganization - job.SchemaState = model.StateDeleteReorganization - setColumnsState(colInfos, model.StateDeleteReorganization) - setIndicesState(idxInfos, model.StateDeleteReorganization) - ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfos[0].State) - case model.StateDeleteReorganization: - // reorganization -> absent - // All reorganization jobs are done, drop this column. - if len(idxInfos) > 0 { - newIndices := make([]*model.IndexInfo, 0, len(tblInfo.Indices)) - for _, idx := range tblInfo.Indices { - if !indexInfoContains(idx.ID, idxInfos) { - newIndices = append(newIndices, idx) - } - } - tblInfo.Indices = newIndices - } - - indexIDs := indexInfosToIDList(idxInfos) - tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-delCount] - setColumnsState(colInfos, model.StateNone) - ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - - // Finish this job. - if job.IsRollingback() { - job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo) - } else { - job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) - job.Args = append(job.Args, indexIDs, getPartitionIDs(tblInfo)) - } - default: - err = errInvalidDDLJob.GenWithStackByArgs("table", tblInfo.State) - } - return ver, errors.Trace(err) -} - -func checkDropColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, int, []*model.IndexInfo, error) { - schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) - if err != nil { - return nil, nil, 0, nil, errors.Trace(err) - } - - var colNames []model.CIStr - var ifExists []bool - err = job.DecodeArgs(&colNames, &ifExists) - if err != nil { - job.State = model.JobStateCancelled - return nil, nil, 0, nil, errors.Trace(err) - } - - newColNames := make([]model.CIStr, 0, len(colNames)) - colInfos := make([]*model.ColumnInfo, 0, len(colNames)) - newIfExists := make([]bool, 0, len(colNames)) - indexInfos := make([]*model.IndexInfo, 0) - for i, colName := range colNames { - colInfo := model.FindColumnInfo(tblInfo.Columns, colName.L) - if colInfo == nil || colInfo.Hidden { - if ifExists[i] { - // TODO: Should return a warning. - logutil.BgLogger().Warn(fmt.Sprintf("column %s doesn't exist", colName)) - continue - } - job.State = model.JobStateCancelled - return nil, nil, 0, nil, ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName) - } - if err = isDroppableColumn(tblInfo, colName); err != nil { - job.State = model.JobStateCancelled - return nil, nil, 0, nil, errors.Trace(err) - } - newColNames = append(newColNames, colName) - newIfExists = append(newIfExists, ifExists[i]) - colInfos = append(colInfos, colInfo) - idxInfos := listIndicesWithColumn(colName.L, tblInfo.Indices) - indexInfos = append(indexInfos, idxInfos...) - } - job.Args = []interface{}{newColNames, newIfExists} - return tblInfo, colInfos, len(colInfos), indexInfos, nil -} - -func checkDropColumnForStatePublic(tblInfo *model.TableInfo, colInfo *model.ColumnInfo) (err error) { - // Set this column's offset to the last and reset all following columns' offsets. - adjustColumnInfoInDropColumn(tblInfo, colInfo.Offset) - // When the dropping column has not-null flag and it hasn't the default value, we can backfill the column value like "add column". - // NOTE: If the state of StateWriteOnly can be rollbacked, we'd better reconsider the original default value. - // And we need consider the column without not-null flag. - if colInfo.GetOriginDefaultValue() == nil && mysql.HasNotNullFlag(colInfo.Flag) { - // If the column is timestamp default current_timestamp, and DDL owner is new version TiDB that set column.Version to 1, - // then old TiDB update record in the column write only stage will uses the wrong default value of the dropping column. - // Because new version of the column default value is UTC time, but old version TiDB will think the default value is the time in system timezone. - // But currently will be ok, because we can't cancel the drop column job when the job is running, - // so the column will be dropped succeed and client will never see the wrong default value of the dropped column. - // More info about this problem, see PR#9115. - originDefVal, err := generateOriginDefaultValue(colInfo) - if err != nil { - return err - } - return colInfo.SetOriginDefaultValue(originDefVal) - } - return nil -} - ->>>>>>> 6342fa6a5... ddl: fix corrupted default value for bit type column (#18036) func onDropColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) { tblInfo, colInfo, err := checkDropColumn(t, job) if err != nil { diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index dd03f206edef9..1fab58def5eb1 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2426,30 +2426,8 @@ func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTab return errors.Trace(err) } - originDefVal, err := generateOriginDefaultValue(col.ToInfo()) + col.OriginDefaultValue, err = generateOriginDefaultValue(col.ToInfo()) if err != nil { -<<<<<<< HEAD -======= - return nil, errors.Trace(err) - } - - err = col.SetOriginDefaultValue(originDefVal) - return col, err -} - -// AddColumn will add a new column to the table. -func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTableSpec) error { - specNewColumn := spec.NewColumns[0] - schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) - if err != nil { - return errors.Trace(err) - } - if err = checkAddColumnTooManyColumns(len(t.Cols()) + 1); err != nil { - return errors.Trace(err) - } - col, err := checkAndCreateNewColumn(ctx, ti, schema, spec, t, specNewColumn) - if err != nil { ->>>>>>> 6342fa6a5... ddl: fix corrupted default value for bit type column (#18036) return errors.Trace(err) } diff --git a/go.mod b/go.mod index 854c4aab2eb1f..77ee14df605a0 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 github.com/pingcap/kvproto v0.0.0-20200907074027-32a3a0accf7d github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 - github.com/pingcap/parser v0.0.0-20200921063432-e220cfcfd026 + github.com/pingcap/parser v0.0.0-20201021051712-3f9da5f8e980 github.com/pingcap/sysutil v0.0.0-20200715082929-4c47bcac246a github.com/pingcap/tidb-tools v4.0.6-0.20200828085514-03575b185007+incompatible github.com/pingcap/tipb v0.0.0-20200618092958-4fad48b4c8c3 diff --git a/go.sum b/go.sum index 54496ad875728..dfb84dd21cbfa 100644 --- a/go.sum +++ b/go.sum @@ -475,6 +475,8 @@ github.com/pingcap/parser v0.0.0-20200803072748-fdf66528323d/go.mod h1:vQdbJqobJ github.com/pingcap/parser v0.0.0-20200901062802-475ea5e2e0a7/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= github.com/pingcap/parser v0.0.0-20200921063432-e220cfcfd026 h1:i+r4P7hb4KpW74nPn+P/hqtsW3fu4U9A4JGAYKWMvtw= github.com/pingcap/parser v0.0.0-20200921063432-e220cfcfd026/go.mod h1:dMMvhqeowLnAsDWspyalgxXoRUnP09cZ7wAnpt2e/S8= +github.com/pingcap/parser v0.0.0-20201021051712-3f9da5f8e980 h1:Jr5nIOUo9LrlzePHqagepETBaKh4sT9V3dLSuP4BKcg= +github.com/pingcap/parser v0.0.0-20201021051712-3f9da5f8e980/go.mod h1:dMMvhqeowLnAsDWspyalgxXoRUnP09cZ7wAnpt2e/S8= github.com/pingcap/pd/v4 v4.0.0-rc.1.0.20200422143320-428acd53eba2/go.mod h1:s+utZtXDznOiL24VK0qGmtoHjjXNsscJx3m1n8cC56s= github.com/pingcap/pd/v4 v4.0.0-rc.2.0.20200520083007-2c251bd8f181/go.mod h1:q4HTx/bA8aKBa4S7L+SQKHvjRPXCRV0tA0yRw0qkZSA= github.com/pingcap/pd/v4 v4.0.5-0.20200817114353-e465cafe8a91 h1:zCOWP+kIzM6ZsXdu2QoM/W6+3vFZj04MYboMP2Obc0E= diff --git a/statistics/handle/ddl.go b/statistics/handle/ddl.go index e69a2b21436ee..127608edb7bb8 100644 --- a/statistics/handle/ddl.go +++ b/statistics/handle/ddl.go @@ -141,8 +141,7 @@ func (h *Handle) insertColStats2KV(physicalID int64, colInfo *model.ColumnInfo) return } count := req.GetRow(0).GetInt64(0) -<<<<<<< HEAD - value := types.NewDatum(colInfo.OriginDefaultValue) + value := types.NewDatum(colInfo.GetOriginDefaultValue()) value, err = value.ConvertTo(h.mu.ctx.GetSessionVars().StmtCtx, &colInfo.FieldType) if err != nil { return @@ -155,12 +154,6 @@ func (h *Handle) insertColStats2KV(physicalID int64, colInfo *model.ColumnInfo) // If this stats exists, we insert histogram meta first, the distinct_count will always be one. sqls = append(sqls, fmt.Sprintf("insert into mysql.stats_histograms (version, table_id, is_index, hist_id, distinct_count, tot_col_size) values (%d, %d, 0, %d, 1, %d)", startTS, physicalID, colInfo.ID, int64(len(value.GetBytes()))*count)) value, err = value.ConvertTo(h.mu.ctx.GetSessionVars().StmtCtx, types.NewFieldType(mysql.TypeBlob)) -======= - sqls := make([]string, 0, len(colInfos)) - for _, colInfo := range colInfos { - value := types.NewDatum(colInfo.GetOriginDefaultValue()) - value, err = value.ConvertTo(h.mu.ctx.GetSessionVars().StmtCtx, &colInfo.FieldType) ->>>>>>> 6342fa6a5... ddl: fix corrupted default value for bit type column (#18036) if err != nil { return } From cb27c1fa2719c01a030e049941fae6cbd1fd6720 Mon Sep 17 00:00:00 2001 From: tangenta Date: Thu, 22 Oct 2020 16:40:41 +0800 Subject: [PATCH 3/4] update parser dependency --- ddl/column.go | 8 ++++++-- ddl/ddl_api.go | 3 ++- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index f7f1675b4ec1e..568812d9bfc13 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -246,14 +246,18 @@ func onDropColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) { // When the dropping column has not-null flag and it hasn't the default value, we can backfill the column value like "add column". // NOTE: If the state of StateWriteOnly can be rollbacked, we'd better reconsider the original default value. // And we need consider the column without not-null flag. - if colInfo.OriginDefaultValue == nil && mysql.HasNotNullFlag(colInfo.Flag) { + if colInfo.GetOriginDefaultValue() == nil && mysql.HasNotNullFlag(colInfo.Flag) { // If the column is timestamp default current_timestamp, and DDL owner is new version TiDB that set column.Version to 1, // then old TiDB update record in the column write only stage will uses the wrong default value of the dropping column. // Because new version of the column default value is UTC time, but old version TiDB will think the default value is the time in system timezone. // But currently will be ok, because we can't cancel the drop column job when the job is running, // so the column will be dropped succeed and client will never see the wrong default value of the dropped column. // More info about this problem, see PR#9115. - colInfo.OriginDefaultValue, err = generateOriginDefaultValue(colInfo) + oldDVal, err := generateOriginDefaultValue(colInfo) + if err != nil { + return ver, errors.Trace(err) + } + err = colInfo.SetOriginDefaultValue(oldDVal) if err != nil { return ver, errors.Trace(err) } diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 1fab58def5eb1..d176279539e9f 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2426,10 +2426,11 @@ func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTab return errors.Trace(err) } - col.OriginDefaultValue, err = generateOriginDefaultValue(col.ToInfo()) + originDefVal, err := generateOriginDefaultValue(col.ToInfo()) if err != nil { return errors.Trace(err) } + err = col.SetOriginDefaultValue(originDefVal) job := &model.Job{ SchemaID: schema.ID, diff --git a/go.mod b/go.mod index e4aa7aff2e9c2..b4fcc84b217e2 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 github.com/pingcap/kvproto v0.0.0-20200907074027-32a3a0accf7d github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 - github.com/pingcap/parser v0.0.0-20201021051712-3f9da5f8e980 + github.com/pingcap/parser v0.0.0-20201022083903-fbe80b0c40bb github.com/pingcap/sysutil v0.0.0-20200715082929-4c47bcac246a github.com/pingcap/tidb-tools v4.0.6-0.20200828085514-03575b185007+incompatible github.com/pingcap/tipb v0.0.0-20200618092958-4fad48b4c8c3 diff --git a/go.sum b/go.sum index 5437ace2d009c..b5eb6d10adf32 100644 --- a/go.sum +++ b/go.sum @@ -457,8 +457,8 @@ github.com/pingcap/parser v0.0.0-20200803072748-fdf66528323d/go.mod h1:vQdbJqobJ github.com/pingcap/parser v0.0.0-20200901062802-475ea5e2e0a7/go.mod h1:vQdbJqobJAgFyiRNNtXahpMoGWwPEuWciVEK5A20NS0= github.com/pingcap/parser v0.0.0-20200921063432-e220cfcfd026 h1:i+r4P7hb4KpW74nPn+P/hqtsW3fu4U9A4JGAYKWMvtw= github.com/pingcap/parser v0.0.0-20200921063432-e220cfcfd026/go.mod h1:dMMvhqeowLnAsDWspyalgxXoRUnP09cZ7wAnpt2e/S8= -github.com/pingcap/parser v0.0.0-20201021051712-3f9da5f8e980 h1:Jr5nIOUo9LrlzePHqagepETBaKh4sT9V3dLSuP4BKcg= -github.com/pingcap/parser v0.0.0-20201021051712-3f9da5f8e980/go.mod h1:dMMvhqeowLnAsDWspyalgxXoRUnP09cZ7wAnpt2e/S8= +github.com/pingcap/parser v0.0.0-20201022083903-fbe80b0c40bb h1:yT+Y2yiuLOXdVlERMA7vPQ8Shwk920S9LpuS/cFj6uo= +github.com/pingcap/parser v0.0.0-20201022083903-fbe80b0c40bb/go.mod h1:dMMvhqeowLnAsDWspyalgxXoRUnP09cZ7wAnpt2e/S8= github.com/pingcap/pd/v4 v4.0.0-rc.1.0.20200422143320-428acd53eba2/go.mod h1:s+utZtXDznOiL24VK0qGmtoHjjXNsscJx3m1n8cC56s= github.com/pingcap/pd/v4 v4.0.0-rc.2.0.20200520083007-2c251bd8f181/go.mod h1:q4HTx/bA8aKBa4S7L+SQKHvjRPXCRV0tA0yRw0qkZSA= github.com/pingcap/pd/v4 v4.0.5-0.20200817114353-e465cafe8a91/go.mod h1:m9OEkKoPMQWjrbJ9pqjjeCqzqxraZrPEuWa1OI6Wcek= From a76c447a08ec30a9287226cd7bf995f731595a6f Mon Sep 17 00:00:00 2001 From: tangenta Date: Thu, 22 Oct 2020 16:58:29 +0800 Subject: [PATCH 4/4] fix linter --- ddl/ddl_api.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index d176279539e9f..d60241d186029 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2431,6 +2431,9 @@ func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTab return errors.Trace(err) } err = col.SetOriginDefaultValue(originDefVal) + if err != nil { + return errors.Trace(err) + } job := &model.Job{ SchemaID: schema.ID,