Skip to content

Commit

Permalink
fix download command corner cases for increment backup for tables w…
Browse files Browse the repository at this point in the history
…ith projections, fix #830

more informative error during try to `restore` not exists local backup
  • Loading branch information
Slach committed Feb 12, 2024
1 parent 4aff436 commit daaef48
Show file tree
Hide file tree
Showing 4 changed files with 54 additions and 26 deletions.
5 changes: 5 additions & 0 deletions ChangeLog.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
# v2.4.30
BUG FIXES
- fix `download` command corner cases for increment backup for tables with projections, fix [830](https://github.com/Altinity/clickhouse-backup/issues/830)
- more informative error during try to `restore` not exists local backup

# v2.4.29
IMPROVEMENTS
- add `AZBLOB_DEBUG` environment and `debug` config parameter in `azblob` section
Expand Down
37 changes: 22 additions & 15 deletions pkg/backup/download.go
Original file line number Diff line number Diff line change
Expand Up @@ -983,34 +983,41 @@ func (b *Backuper) ReadBackupMetadataRemote(ctx context.Context, backupName stri

func (b *Backuper) makePartHardlinks(exists, new string) error {
log := apexLog.WithField("logger", "makePartHardlinks")
ex, err := os.Open(exists)
_, err := os.Stat(exists)
if err != nil {
return err
}
defer func() {
if err = ex.Close(); err != nil {
log.Warnf("Can't close %s", exists)
}
}()
files, err := ex.Readdirnames(-1)
if err != nil {
return err
}
if err := os.MkdirAll(new, 0750); err != nil {
if err = os.MkdirAll(new, 0750); err != nil {
log.Warnf("MkDirAll(%s) error: %v", new, err)
return err
}
for _, f := range files {
existsF := path.Join(exists, f)
newF := path.Join(new, f)
if err := os.Link(existsF, newF); err != nil {
if walkErr := filepath.Walk(exists, func(fPath string, fInfo os.FileInfo, err error) error {
if err != nil {
return err
}
fPath = strings.TrimPrefix(fPath, exists)
existsF := path.Join(exists, fPath)
newF := path.Join(new, fPath)
if fInfo.IsDir() {
if err = os.MkdirAll(newF, fInfo.Mode()); err != nil {
log.Warnf("MkdirAll(%s) error: %v", fPath, err)
return err
}
return nil
}

if err = os.Link(existsF, newF); err != nil {
existsFInfo, existsStatErr := os.Stat(existsF)
newFInfo, newStatErr := os.Stat(newF)
if existsStatErr != nil || newStatErr != nil || !os.SameFile(existsFInfo, newFInfo) {
log.Warnf("Link %s -> %s error: %v", newF, existsF, err)
return err
}
}
return nil
}); walkErr != nil {
log.Warnf("Link recursively %s -> %s return error: %v", new, exists, walkErr)
return walkErr
}
return nil
}
Expand Down
16 changes: 11 additions & 5 deletions pkg/backup/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,12 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par
log.Warnf("%v", err)
return ErrUnknownClickhouseDataPath
}
if b.cfg.General.RestoreSchemaOnCluster != "" {
if b.cfg.General.RestoreSchemaOnCluster, err = b.ch.ApplyMacros(ctx, b.cfg.General.RestoreSchemaOnCluster); err != nil {
log.Warnf("%v", err)
return err
}
}
backupMetafileLocalPaths := []string{path.Join(b.DefaultDataPath, "backup", backupName, "metadata.json")}
var backupMetadataBody []byte
b.EmbeddedBackupDataPath, err = b.ch.GetEmbeddedBackupPath(disks)
Expand All @@ -89,9 +95,6 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par
break
}
}
if b.cfg.General.RestoreSchemaOnCluster != "" {
b.cfg.General.RestoreSchemaOnCluster, err = b.ch.ApplyMacros(ctx, b.cfg.General.RestoreSchemaOnCluster)
}
if err == nil {
backupMetadata := metadata.BackupMetadata{}
if err := json.Unmarshal(backupMetadataBody, &backupMetadata); err != nil {
Expand Down Expand Up @@ -122,8 +125,11 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par
return nil
}
}
} else if !os.IsNotExist(err) { // Legacy backups don't contain metadata.json
return err
} else if os.IsNotExist(err) { // Legacy backups don't have metadata.json, but we need handle not exists local backup
backupPath := path.Join(b.DefaultDataPath, "backup", backupName)
if fInfo, fErr := os.Stat(backupPath); fErr != nil || !fInfo.IsDir() {
return fmt.Errorf("'%s' stat return %v, %v", backupPath, fInfo, fErr)
}
}
needRestart := false
if (rbacOnly || restoreRBAC) && !b.isEmbedded {
Expand Down
22 changes: 16 additions & 6 deletions test/integration/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1199,21 +1199,31 @@ func TestProjections(t *testing.T) {
defer ch.chbackend.Close()

r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml"))
err = ch.chbackend.Query("CREATE TABLE default.table_with_projection(dt DateTime, v UInt64, PROJECTION x (SELECT toStartOfMonth(dt) m, sum(v) GROUP BY m)) ENGINE=MergeTree() ORDER BY dt")
err = ch.chbackend.Query("CREATE TABLE default.table_with_projection(dt DateTime, v UInt64, PROJECTION x (SELECT toStartOfMonth(dt) m, sum(v) GROUP BY m)) ENGINE=MergeTree() PARTITION BY toYYYYMMDD(dt) ORDER BY dt")
r.NoError(err)

err = ch.chbackend.Query("INSERT INTO default.table_with_projection SELECT today() - INTERVAL number DAY, number FROM numbers(10)")
r.NoError(err)
ch.queryWithNoError(r, "INSERT INTO default.table_with_projection SELECT today() - INTERVAL number DAY, number FROM numbers(5)")
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "test_backup_projection_full"))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_full"))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "--rm", "test_backup_projection_full"))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_full"))

ch.queryWithNoError(r, "INSERT INTO default.table_with_projection SELECT today() - INTERVAL number WEEK, number FROM numbers(5)")
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "--diff-from-remote", "test_backup_projection_full", "test_backup_projection_increment"))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment"))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "--rm", "test_backup_projection_increment"))

r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_backup_projection"))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--rm", "test_backup_projection"))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection"))
var counts uint64
r.NoError(ch.chbackend.SelectSingleRowNoCtx(&counts, "SELECT count() FROM default.table_with_projection"))
r.Equal(uint64(10), counts)
err = ch.chbackend.Query("DROP TABLE default.table_with_projection NO DELAY")
r.NoError(err)

r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment"))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_full"))

r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_increment"))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_full"))
}

func TestCheckSystemPartsColumns(t *testing.T) {
Expand Down

0 comments on commit daaef48

Please sign in to comment.