diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index c2696863..74a5f9b2 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -108,6 +108,7 @@ jobs: - '24.8' - '25.3' - '25.8' + - '25.11' steps: - name: Checkout project uses: actions/checkout@v4 @@ -219,7 +220,7 @@ jobs: - '24.8' - '25.3' - '25.8' - - '25.10' + - '25.11' steps: - name: Checkout project uses: actions/checkout@v4 diff --git a/pkg/backup/backuper.go b/pkg/backup/backuper.go index 1e63b0a9..688da6a6 100644 --- a/pkg/backup/backuper.go +++ b/pkg/backup/backuper.go @@ -254,10 +254,7 @@ func (b *Backuper) getEmbeddedBackupLocation(ctx context.Context, backupName str return fmt.Sprintf("Disk('%s','%s')", b.cfg.ClickHouse.EmbeddedBackupDisk, backupName), nil } if b.cfg.General.RemoteStorage == "s3" { - s3Endpoint, err := b.ch.ApplyMacros(ctx, b.buildEmbeddedLocationS3()) - if err != nil { - return "", err - } + s3Endpoint := b.buildEmbeddedLocationS3(ctx) if b.cfg.S3.AccessKey != "" { return fmt.Sprintf("S3('%s/%s/','%s','%s')", s3Endpoint, backupName, b.cfg.S3.AccessKey, b.cfg.S3.SecretKey), nil } @@ -267,10 +264,7 @@ func (b *Backuper) getEmbeddedBackupLocation(ctx context.Context, backupName str return "", errors.WithStack(errors.New("provide s3->access_key and s3->secret_key in config to allow embedded backup without `clickhouse->embedded_backup_disk`")) } if b.cfg.General.RemoteStorage == "gcs" { - gcsEndpoint, err := b.ch.ApplyMacros(ctx, b.buildEmbeddedLocationGCS()) - if err != nil { - return "", err - } + gcsEndpoint := b.buildEmbeddedLocationGCS(ctx) if b.cfg.GCS.EmbeddedAccessKey != "" { return fmt.Sprintf("S3('%s/%s/','%s','%s')", gcsEndpoint, backupName, b.cfg.GCS.EmbeddedAccessKey, b.cfg.GCS.EmbeddedSecretKey), nil } @@ -280,45 +274,50 @@ func (b *Backuper) getEmbeddedBackupLocation(ctx context.Context, backupName str return "", fmt.Errorf("provide gcs->embedded_access_key and gcs->embedded_secret_key in config to allow embedded backup without `clickhouse->embedded_backup_disk`") } if b.cfg.General.RemoteStorage == "azblob" { - azblobEndpoint, err := b.ch.ApplyMacros(ctx, b.buildEmbeddedLocationAZBLOB()) + azblobEndpoint := b.buildEmbeddedLocationAZBLOB() + azblobPath, err := b.ch.ApplyMacros(ctx, b.cfg.AzureBlob.ObjectDiskPath) if err != nil { return "", err } if b.cfg.AzureBlob.Container != "" { - return fmt.Sprintf("AzureBlobStorage('%s','%s','%s/%s/')", azblobEndpoint, b.cfg.AzureBlob.Container, b.cfg.AzureBlob.ObjectDiskPath, backupName), nil + return fmt.Sprintf("AzureBlobStorage('%s','%s','%s/%s/')", azblobEndpoint, b.cfg.AzureBlob.Container, azblobPath, backupName), nil } return "", fmt.Errorf("provide azblob->container and azblob->account_name, azblob->account_key in config to allow embedded backup without `clickhouse->embedded_backup_disk`") } return "", fmt.Errorf("empty clickhouse->embedded_backup_disk and invalid general->remote_storage: %s", b.cfg.General.RemoteStorage) } - -func (b *Backuper) buildEmbeddedLocationS3() string { +func (b *Backuper) buildEmbeddedLocationS3(ctx context.Context) string { s3backupURL := url.URL{} s3backupURL.Scheme = "https" + s3Path, err := b.ch.ApplyMacros(ctx, b.cfg.S3.ObjectDiskPath) + if err != nil { + log.Error().Stack().Err(err).Send() + return "" + } if strings.HasPrefix(b.cfg.S3.Endpoint, "http") { newUrl, _ := s3backupURL.Parse(b.cfg.S3.Endpoint) s3backupURL = *newUrl - s3backupURL.Path = path.Join(b.cfg.S3.Bucket, b.cfg.S3.ObjectDiskPath) + s3backupURL.Path = path.Join(b.cfg.S3.Bucket, s3Path) } else { s3backupURL.Host = b.cfg.S3.Endpoint - s3backupURL.Path = path.Join(b.cfg.S3.Bucket, b.cfg.S3.ObjectDiskPath) + s3backupURL.Path = path.Join(b.cfg.S3.Bucket, s3Path) } if b.cfg.S3.DisableSSL { s3backupURL.Scheme = "http" } if s3backupURL.Host == "" && b.cfg.S3.Region != "" && b.cfg.S3.ForcePathStyle { s3backupURL.Host = "s3." + b.cfg.S3.Region + ".amazonaws.com" - s3backupURL.Path = path.Join(b.cfg.S3.Bucket, b.cfg.S3.ObjectDiskPath) + s3backupURL.Path = path.Join(b.cfg.S3.Bucket, s3Path) } if s3backupURL.Host == "" && b.cfg.S3.Bucket != "" && !b.cfg.S3.ForcePathStyle { s3backupURL.Host = b.cfg.S3.Bucket + "." + "s3." + b.cfg.S3.Region + ".amazonaws.com" - s3backupURL.Path = b.cfg.S3.ObjectDiskPath + s3backupURL.Path = s3Path } return s3backupURL.String() } -func (b *Backuper) buildEmbeddedLocationGCS() string { +func (b *Backuper) buildEmbeddedLocationGCS(ctx context.Context) string { gcsBackupURL := url.URL{} gcsBackupURL.Scheme = "https" if b.cfg.GCS.ForceHttp { @@ -328,14 +327,24 @@ func (b *Backuper) buildEmbeddedLocationGCS() string { if !strings.HasPrefix(b.cfg.GCS.Endpoint, "http") { gcsBackupURL.Host = b.cfg.GCS.Endpoint } else { - newUrl, _ := gcsBackupURL.Parse(b.cfg.GCS.Endpoint) + newUrl, err := gcsBackupURL.Parse(b.cfg.GCS.Endpoint) + if err != nil { + log.Error().Err(err).Stack().Send() + return "" + } gcsBackupURL = *newUrl } } if gcsBackupURL.Host == "" { gcsBackupURL.Host = "storage.googleapis.com" } - gcsBackupURL.Path = path.Join(b.cfg.GCS.Bucket, b.cfg.GCS.ObjectDiskPath) + gcsPath, err := b.ch.ApplyMacros(ctx, b.cfg.GCS.ObjectDiskPath) + if err != nil { + log.Error().Err(err).Stack().Send() + return "" + } + + gcsBackupURL.Path = path.Join(b.cfg.GCS.Bucket, gcsPath) return gcsBackupURL.String() } diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 01e608a9..35810341 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -1898,9 +1898,32 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ba } // https://github.com/Altinity/clickhouse-backup/issues/937 if len(b.cfg.General.RestoreTableMapping) > 0 { - if targetTable, isMapped := b.cfg.General.RestoreTableMapping[table.Table]; isMapped { - dstTableName = targetTable - tablesForRestore[i].Table = targetTable + // Check full qualified name first (db.table), then table name only + fullName := table.Database + "." + table.Table + if targetValue, isMapped := b.cfg.General.RestoreTableMapping[fullName]; isMapped { + // Target may contain database (e.g., target_db.new_table) + if strings.Contains(targetValue, ".") { + parts := strings.SplitN(targetValue, ".", 2) + dstDatabase = parts[0] + dstTableName = parts[1] + tablesForRestore[i].Database = parts[0] + tablesForRestore[i].Table = parts[1] + } else { + dstTableName = targetValue + tablesForRestore[i].Table = targetValue + } + } else if targetTable, isMapped := b.cfg.General.RestoreTableMapping[table.Table]; isMapped { + // Handle target with database prefix + if strings.Contains(targetTable, ".") { + parts := strings.SplitN(targetTable, ".", 2) + dstDatabase = parts[0] + dstTableName = parts[1] + tablesForRestore[i].Database = parts[0] + tablesForRestore[i].Table = parts[1] + } else { + dstTableName = targetTable + tablesForRestore[i].Table = targetTable + } } } logger := log.With().Str("table", fmt.Sprintf("%s.%s", dstDatabase, dstTableName)).Logger() @@ -2255,8 +2278,24 @@ func (b *Backuper) checkMissingTables(tablesForRestore ListOfTables, chTables [] } } if len(b.cfg.General.RestoreTableMapping) > 0 { - if targetTable, isMapped := b.cfg.General.RestoreTableMapping[table.Table]; isMapped { - dstTable = targetTable + // Check full qualified name first (db.table), then table name only + fullName := table.Database + "." + table.Table + if targetValue, isMapped := b.cfg.General.RestoreTableMapping[fullName]; isMapped { + if strings.Contains(targetValue, ".") { + parts := strings.SplitN(targetValue, ".", 2) + dstDatabase = parts[0] + dstTable = parts[1] + } else { + dstTable = targetValue + } + } else if targetTable, isMapped := b.cfg.General.RestoreTableMapping[table.Table]; isMapped { + if strings.Contains(targetTable, ".") { + parts := strings.SplitN(targetTable, ".", 2) + dstDatabase = parts[0] + dstTable = parts[1] + } else { + dstTable = targetTable + } } } found := false @@ -2267,7 +2306,7 @@ func (b *Backuper) checkMissingTables(tablesForRestore ListOfTables, chTables [] } } if !found { - missingTables = append(missingTables, fmt.Sprintf("'%s.%s'", dstDatabase, table.Table)) + missingTables = append(missingTables, fmt.Sprintf("'%s.%s'", dstDatabase, dstTable)) } } return missingTables @@ -2290,25 +2329,80 @@ func (b *Backuper) changeTablePatternFromRestoreMapping(tablePattern, objType st case "database": mapping = b.cfg.General.RestoreDatabaseMapping case "table": - mapping = b.cfg.General.RestoreDatabaseMapping + mapping = b.cfg.General.RestoreTableMapping default: - return "" + return tablePattern } + isDatabase := objType == "database" for sourceObj, targetObj := range mapping { if tablePattern != "" { - sourceObjRE := regexp.MustCompile(fmt.Sprintf("(^%s.*)|(,%s.*)", sourceObj, sourceObj)) + var sourceObjRE *regexp.Regexp + if isDatabase { + sourceObjRE = regexp.MustCompile(fmt.Sprintf("(^%s\\.[^,]*)|(,%s\\.[^,]*)", sourceObj, sourceObj)) + } else { + // Check if sourceObj is a full qualified name (db.table) + if strings.Contains(sourceObj, ".") { + // Full qualified mapping: source_db.table -> target_db.new_table + escapedSource := regexp.QuoteMeta(sourceObj) + sourceObjRE = regexp.MustCompile(fmt.Sprintf("(^%s)|(,%s)", escapedSource, escapedSource)) + } else { + sourceObjRE = regexp.MustCompile(fmt.Sprintf("(^([^\\.]+)\\.%s)|(,([^\\.]+)\\.%s)", sourceObj, sourceObj)) + } + } + if sourceObjRE.MatchString(tablePattern) { matches := sourceObjRE.FindAllStringSubmatch(tablePattern, -1) - substitution := targetObj + ".*" - if strings.HasPrefix(matches[0][1], ",") { + var substitution string + if isDatabase { + substitution = targetObj + ".*" + } else { + // Check if sourceObj is full qualified + if strings.Contains(sourceObj, ".") { + // Use targetObj as-is (may contain database) + substitution = targetObj + } else { + // matches[0][2] has database name when first alternative matches (^...) + // matches[0][4] has database name when second alternative matches (,...) + dbName := matches[0][2] + if dbName == "" && len(matches[0]) > 4 { + dbName = matches[0][4] + } + // Check if targetObj contains database + if strings.Contains(targetObj, ".") { + substitution = targetObj + } else { + substitution = dbName + "." + targetObj + } + } + } + if strings.HasPrefix(matches[0][0], ",") { substitution = "," + substitution } + tablePattern = sourceObjRE.ReplaceAllString(tablePattern, substitution) } else { - tablePattern += "," + targetObj + ".*" + if isDatabase { + tablePattern += "," + targetObj + ".*" + } else { + // Check if targetObj contains database + if strings.Contains(targetObj, ".") { + tablePattern += "," + targetObj + } else { + tablePattern += ",*." + targetObj + } + } } } else { - tablePattern += targetObj + ".*" + if isDatabase { + tablePattern += targetObj + ".*" + } else { + // Check if targetObj contains database + if strings.Contains(targetObj, ".") { + tablePattern += targetObj + } else { + tablePattern += "*." + targetObj + } + } } } return tablePattern diff --git a/pkg/backup/restore_test.go b/pkg/backup/restore_test.go index 00400b5f..adedc93d 100644 --- a/pkg/backup/restore_test.go +++ b/pkg/backup/restore_test.go @@ -2,6 +2,7 @@ package backup import ( "fmt" + "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/stretchr/testify/assert" "testing" ) @@ -83,3 +84,140 @@ func TestDetectRBACObject(t *testing.T) { } } } + +func TestChangeTablePatternFromRestoreMapping(t *testing.T) { + testCases := []struct { + name string + tablePattern string + objType string + restoreDatabaseMapping map[string]string + restoreTableMapping map[string]string + expected string + }{ + // Database mapping tests + { + name: "database mapping with matching pattern", + tablePattern: "db1.*", + objType: "database", + restoreDatabaseMapping: map[string]string{"db1": "db2"}, + expected: "db2.*", + }, + { + name: "database mapping without matching pattern appends", + tablePattern: "other.*", + objType: "database", + restoreDatabaseMapping: map[string]string{"db1": "db2"}, + expected: "other.*,db2.*", + }, + { + name: "database mapping with empty pattern", + tablePattern: "", + objType: "database", + restoreDatabaseMapping: map[string]string{"db1": "db2"}, + expected: "db2.*", + }, + { + name: "database mapping with comma-separated patterns", + tablePattern: "other.*,db1.table1", + objType: "database", + restoreDatabaseMapping: map[string]string{"db1": "db2"}, + expected: "other.*,db2.*", + }, + // Table mapping tests + { + name: "table mapping with matching pattern", + tablePattern: "db1.t1", + objType: "table", + restoreTableMapping: map[string]string{"t1": "t2"}, + expected: "db1.t2", + }, + { + name: "table mapping without matching pattern appends", + tablePattern: "db1.other", + objType: "table", + restoreTableMapping: map[string]string{"t1": "t2"}, + expected: "db1.other,*.t2", + }, + { + name: "table mapping with empty pattern", + tablePattern: "", + objType: "table", + restoreTableMapping: map[string]string{"t1": "t2"}, + expected: "*.t2", + }, + { + name: "table mapping with comma-separated patterns", + tablePattern: "db1.other,db2.t1", + objType: "table", + restoreTableMapping: map[string]string{"t1": "t2"}, + expected: "db1.other,db2.t2", + }, + { + name: "table mapping preserves database name", + tablePattern: "mydb.t1", + objType: "table", + restoreTableMapping: map[string]string{"t1": "renamed_t1"}, + expected: "mydb.renamed_t1", + }, + // Default/unknown objType tests + { + name: "unknown objType returns pattern unchanged", + tablePattern: "db1.*", + objType: "unknown", + restoreDatabaseMapping: map[string]string{"db1": "db2"}, + restoreTableMapping: map[string]string{"t1": "t2"}, + expected: "db1.*", + }, + { + name: "empty objType returns pattern unchanged", + tablePattern: "db1.*", + objType: "", + restoreDatabaseMapping: map[string]string{"db1": "db2"}, + expected: "db1.*", + }, + // Multiple mappings tests + { + name: "multiple database mappings", + tablePattern: "db1.*,db3.*", + objType: "database", + restoreDatabaseMapping: map[string]string{"db1": "db2", "db3": "db4"}, + expected: "db2.*,db4.*", + }, + { + name: "multiple table mappings", + tablePattern: "db1.t1,db2.t3", + objType: "table", + restoreTableMapping: map[string]string{"t1": "t2", "t3": "t4"}, + expected: "db1.t2,db2.t4", + }, + // Empty mapping tests + { + name: "empty database mapping returns pattern unchanged", + tablePattern: "db1.*", + objType: "database", + restoreDatabaseMapping: map[string]string{}, + expected: "db1.*", + }, + { + name: "empty table mapping returns pattern unchanged", + tablePattern: "db1.t1", + objType: "table", + restoreTableMapping: map[string]string{}, + expected: "db1.t1", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cfg := &config.Config{ + General: config.GeneralConfig{ + RestoreDatabaseMapping: tc.restoreDatabaseMapping, + RestoreTableMapping: tc.restoreTableMapping, + }, + } + b := &Backuper{cfg: cfg} + result := b.changeTablePatternFromRestoreMapping(tc.tablePattern, tc.objType) + assert.Equal(t, tc.expected, result) + }) + } +} diff --git a/pkg/backup/table_pattern.go b/pkg/backup/table_pattern.go index 9d41baaa..ec590897 100644 --- a/pkg/backup/table_pattern.go +++ b/pkg/backup/table_pattern.go @@ -406,10 +406,36 @@ func changeTableQueryToAdjustDatabaseMapping(originTables *ListOfTables, dbMapRu return nil } +// lookupTableMapping checks mapping first by full qualified name (db.table), then by table name only. +// Returns target value, target database (if specified), target table, and whether mapping was found. +func lookupTableMapping(tableMapRule map[string]string, database, table string) (targetValue string, targetDatabase string, targetTable string, isMapped bool) { + // First try full qualified name + fullName := database + "." + table + if targetValue, isMapped = tableMapRule[fullName]; isMapped { + // Check if target also contains database + if strings.Contains(targetValue, ".") { + parts := strings.SplitN(targetValue, ".", 2) + return targetValue, parts[0], parts[1], true + } + return targetValue, "", targetValue, true + } + // Fall back to table name only + if targetValue, isMapped = tableMapRule[table]; isMapped { + // Check if target contains database + if strings.Contains(targetValue, ".") { + parts := strings.SplitN(targetValue, ".", 2) + return targetValue, parts[0], parts[1], true + } + return targetValue, "", targetValue, true + } + return "", "", "", false +} + func changeTableQueryToAdjustTableMapping(originTables *ListOfTables, tableMapRule map[string]string) error { for i := 0; i < len(*originTables); i++ { originTable := (*originTables)[i] - if targetTable, isMapped := tableMapRule[originTable.Table]; isMapped { + _, targetDatabase, targetTable, isMapped := lookupTableMapping(tableMapRule, originTable.Database, originTable.Table) + if isMapped { // substitute table in the table create query var substitution string @@ -428,15 +454,23 @@ func changeTableQueryToAdjustTableMapping(originTables *ListOfTables, tableMapRu } return clauseTargetTable } - createTargetTable := targetTable + createTargetTableName := targetTable // https://github.com/Altinity/clickhouse-backup/issues/820#issuecomment-2773501803 - if !usualIdentifier.MatchString(createTargetTable) && !strings.Contains(matches[0][6], "`") { - createTargetTable = "`" + createTargetTable + "`" + if !usualIdentifier.MatchString(createTargetTableName) && !strings.Contains(matches[0][6], "`") { + createTargetTableName = "`" + createTargetTableName + "`" + } + // Handle database in target mapping (e.g., source_db.table:target_db.new_table) + createTargetDatabase := "${4}" + if targetDatabase != "" { + createTargetDatabase = targetDatabase + if !usualIdentifier.MatchString(createTargetDatabase) && !strings.Contains(matches[0][3], "`") { + createTargetDatabase = "`" + createTargetDatabase + "`" + } } toClauseTargetTable := setMatchedTable(matches[0][16], matches[0][15]) fromClauseTargetTable := setMatchedTable(matches[0][24], matches[0][23]) // matching CREATE|ATTACH ... TO .. SELECT ... FROM ... command - substitution = fmt.Sprintf("${1} ${2} ${3}${4}${5}.${6}%v${8}${9}${10}${11}${12}${13}${14}${15}%v${17}${18}${19}${20}${21}${22}${23}%v${25}", createTargetTable, toClauseTargetTable, fromClauseTargetTable) + substitution = fmt.Sprintf("${1} ${2} ${3}%v${5}.${6}%v${8}${9}${10}${11}${12}${13}${14}${15}%v${17}${18}${19}${20}${21}${22}${23}%v${25}", createTargetDatabase, createTargetTableName, toClauseTargetTable, fromClauseTargetTable) } else { if originTable.Query == "" { continue @@ -470,6 +504,10 @@ func changeTableQueryToAdjustTableMapping(originTables *ListOfTables, tableMapRu } } originTable.Table = targetTable + // Update database if target mapping includes database + if targetDatabase != "" { + originTable.Database = targetDatabase + } (*originTables)[i] = originTable } } diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index 70b5f331..54abbe88 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -1305,6 +1305,9 @@ func (ch *ClickHouse) GetInProgressMutations(ctx context.Context, database strin } func (ch *ClickHouse) ApplyMacros(ctx context.Context, s string) (string, error) { + if !strings.Contains(s, "{") { + return s, nil + } var macrosExists uint64 err := ch.SelectSingleRow(ctx, ¯osExists, "SELECT count() AS is_macros_exists FROM system.tables WHERE database='system' AND name='macros' SETTINGS empty_result_for_aggregation_by_empty_set=0") if err != nil || macrosExists == 0 { diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index bff20f3c..cec1d8e4 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -2133,7 +2133,7 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { env.queryWithNoError(r, "CREATE TABLE IF NOT EXISTS test_skip_tables.test_merge_tree (id UInt64, s String) ENGINE=MergeTree() ORDER BY id") env.queryWithNoError(r, "CREATE TABLE IF NOT EXISTS test_skip_tables.test_memory (id UInt64) ENGINE=Memory") env.queryWithNoError(r, "CREATE MATERIALIZED VIEW IF NOT EXISTS test_skip_tables.test_mv (id UInt64) ENGINE=MergeTree() ORDER BY id AS SELECT id FROM test_skip_tables.test_merge_tree") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "25.11") < 0 { query := "CREATE LIVE VIEW IF NOT EXISTS test_skip_tables.test_live_view AS SELECT count() FROM test_skip_tables.test_merge_tree" allowExperimentalAnalyzer, err := env.ch.TurnAnalyzerOffIfNecessary(version, query, "") r.NoError(err) @@ -2153,7 +2153,7 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_memory.json") env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_mv.json") env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/*inner*.json") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "25.11") < 0 { env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_live_view.json") } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { @@ -2165,7 +2165,7 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_memory.json")) r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_mv.json")) env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/*inner*.json") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "25.11") < 0 { r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_live_view.json")) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { @@ -2183,7 +2183,7 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { r.Error(env.DockerExec("minio", "ls", "-la", "/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) env.DockerExecNoError(r, "minio", "ls", "-la", "/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_mv.json") env.DockerExecNoError(r, "minio", "bash", "-ce", "ls -la /minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "25.11") < 0 { env.DockerExecNoError(r, "minio", "ls", "-la", "/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json") } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { @@ -2196,7 +2196,7 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { r.Error(env.DockerExec("minio", "ls", "-la", "/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) r.Error(env.DockerExec("minio", "ls", "-la", "/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) env.DockerExecNoError(r, "minio", "bash", "-ce", "ls -la /minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "25.11") < 0 { r.Error(env.DockerExec("minio", "ls", "-la", "/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { @@ -2209,7 +2209,7 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { env.DockerExecNoError(r, "minio", "ls", "-la", "/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_memory.json") env.DockerExecNoError(r, "minio", "ls", "-la", "/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_mv.json") env.DockerExecNoError(r, "minio", "bash", "-ce", "ls -la /minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "25.11") < 0 { env.DockerExecNoError(r, "minio", "ls", "-la", "/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json") } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { @@ -2226,7 +2226,7 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_memory.json") env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_mv.json") env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "25.11") < 0 { env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json") } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { @@ -2239,7 +2239,7 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "25.11") < 0 { r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { @@ -2252,7 +2252,7 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_memory.json") env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_mv.json") env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "25.11") < 0 { env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json") } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { @@ -2276,9 +2276,11 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { expectedTables = 6 } //*.inner.target.* for WINDOW VIEW created only after 22.6 - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.6") >= 0 { + //LIVE VIEW removed in 25.11+ + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.6") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "25.11") < 0 { expectedTables = 7 } + found := false for _, item := range result { if item.Name == "test_memory" { @@ -2323,7 +2325,8 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { expectedTables = 7 } //*.inner.target.* for WINDOW VIEW created only after 22.6 - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.6") >= 0 { + //LIVE VIEW removed in 25.11+ + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.6") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "25.11") < 0 { expectedTables = 8 } r.Equal(expectedTables, len(result), "unexpected tables after full restore in test_skip_tables %#v", result) @@ -3464,6 +3467,161 @@ func TestRestoreMapping(t *testing.T) { env.checkCount(r, 1, 1, "SELECT count() FROM `database-2`.v2") fullCleanup(t, r, env, []string{testBackupName}, []string{"local"}, databaseList, false, true, true, "config-database-mapping.yml") + + // Corner case 1: Table-only mapping without database mapping + log.Debug().Msg("Corner case 1: Table-only mapping without database mapping") + testBackupName2 := "test_table_only_mapping" + databaseList2 := []string{"database-3"} + fullCleanup(t, r, env, []string{testBackupName2}, []string{"local"}, databaseList2, false, false, false, "config-database-mapping.yml") + + env.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS `database-3`") + env.queryWithNoError(r, "CREATE TABLE `database-3`.src_table (dt DateTime, v UInt64) ENGINE=MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY dt") + env.queryWithNoError(r, "INSERT INTO `database-3`.src_table SELECT '2022-01-01 00:00:00', number FROM numbers(5)") + + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "create", testBackupName2) + + log.Debug().Msg("Restore with table-only mapping (src_table -> dst_table)") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--schema", "--rm", "--restore-table-mapping", "src_table:dst_table", "--tables", "database-3.src_table", testBackupName2) + + env.checkCount(r, 1, 1, "SELECT count() FROM system.tables WHERE database='database-3' AND name='dst_table'") + + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--data", "--restore-table-mapping", "src_table:dst_table", "--tables", "database-3.src_table", testBackupName2) + env.checkCount(r, 1, 5, "SELECT count() FROM `database-3`.dst_table") + + fullCleanup(t, r, env, []string{testBackupName2}, []string{"local"}, databaseList2, false, true, true, "config-database-mapping.yml") + + // Corner case 2: Multiple databases with comma-separated patterns + log.Debug().Msg("Corner case 2: Multiple databases with comma-separated table patterns") + testBackupName3 := "test_multi_db_mapping" + databaseList3 := []string{"db_a", "db_b", "db_c", "db_d"} + fullCleanup(t, r, env, []string{testBackupName3}, []string{"local"}, databaseList3, false, false, false, "config-database-mapping.yml") + + env.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS `db_a`") + env.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS `db_b`") + env.queryWithNoError(r, "CREATE TABLE `db_a`.t1 (id UInt64) ENGINE=MergeTree() ORDER BY id") + env.queryWithNoError(r, "CREATE TABLE `db_b`.t1 (id UInt64) ENGINE=MergeTree() ORDER BY id") + env.queryWithNoError(r, "INSERT INTO `db_a`.t1 SELECT number FROM numbers(3)") + env.queryWithNoError(r, "INSERT INTO `db_b`.t1 SELECT number FROM numbers(7)") + + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "create", testBackupName3) + + log.Debug().Msg("Drop source databases before restore to simulate migration") + r.NoError(env.dropDatabase("db_a", false)) + r.NoError(env.dropDatabase("db_b", false)) + + log.Debug().Msg("Restore with multiple database mappings (db_a->db_c, db_b->db_d)") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--rm", "--restore-database-mapping", "db_a:db_c,db_b:db_d", "--tables", "db_a.*,db_b.*", testBackupName3) + + env.checkCount(r, 1, 3, "SELECT count() FROM `db_c`.t1") + env.checkCount(r, 1, 7, "SELECT count() FROM `db_d`.t1") + env.checkCount(r, 1, 0, "SELECT count() FROM system.databases WHERE name IN ('db_a','db_b') SETTINGS empty_result_for_aggregation_by_empty_set=0") + + fullCleanup(t, r, env, []string{testBackupName3}, []string{"local"}, databaseList3, false, true, true, "config-database-mapping.yml") + + // Corner case 3: Combined database and table mapping with comma-separated patterns + log.Debug().Msg("Corner case 3: Combined database and table mapping with comma-separated patterns") + testBackupName4 := "test_combined_mapping" + databaseList4 := []string{"src_db1", "src_db2", "dst_db1", "dst_db2"} + fullCleanup(t, r, env, []string{testBackupName4}, []string{"local"}, databaseList4, false, false, false, "config-database-mapping.yml") + + env.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS `src_db1`") + env.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS `src_db2`") + env.queryWithNoError(r, "CREATE TABLE `src_db1`.old_name (id UInt64) ENGINE=MergeTree() ORDER BY id") + env.queryWithNoError(r, "CREATE TABLE `src_db2`.old_name (id UInt64) ENGINE=MergeTree() ORDER BY id") + env.queryWithNoError(r, "INSERT INTO `src_db1`.old_name SELECT number FROM numbers(4)") + env.queryWithNoError(r, "INSERT INTO `src_db2`.old_name SELECT number + 100 FROM numbers(6)") + + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "create", testBackupName4) + + log.Debug().Msg("Restore with database mapping and table mapping on comma-separated patterns") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--rm", "--restore-database-mapping", "src_db1:dst_db1,src_db2:dst_db2", "--restore-table-mapping", "old_name:new_name", "--tables", "src_db1.old_name,src_db2.old_name", testBackupName4) + + env.checkCount(r, 1, 4, "SELECT count() FROM `dst_db1`.new_name") + env.checkCount(r, 1, 6, "SELECT count() FROM `dst_db2`.new_name") + // Verify data integrity - dst_db2 should have values starting from 100 + env.checkCount(r, 1, 6, "SELECT count() FROM `dst_db2`.new_name WHERE id >= 100") + + fullCleanup(t, r, env, []string{testBackupName4}, []string{"local"}, databaseList4, false, true, true, "config-database-mapping.yml") + + // Corner case 4: Table mapping with same table name in different databases + log.Debug().Msg("Corner case 4: Table mapping for same table name across databases") + testBackupName5 := "test_same_table_diff_db" + databaseList5 := []string{"alpha", "beta"} + fullCleanup(t, r, env, []string{testBackupName5}, []string{"local"}, databaseList5, false, false, false, "config-database-mapping.yml") + + env.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS `alpha`") + env.queryWithNoError(r, "CREATE TABLE `alpha`.common (id UInt64) ENGINE=MergeTree() ORDER BY id") + env.queryWithNoError(r, "CREATE TABLE `alpha`.unique_a (id UInt64) ENGINE=MergeTree() ORDER BY id") + env.queryWithNoError(r, "INSERT INTO `alpha`.common SELECT number FROM numbers(2)") + env.queryWithNoError(r, "INSERT INTO `alpha`.unique_a SELECT number FROM numbers(3)") + + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "create", testBackupName5) + + log.Debug().Msg("Drop source database before restore to simulate migration") + r.NoError(env.dropDatabase("alpha", false)) + + log.Debug().Msg("Restore to different database with selective table mapping") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--rm", "--restore-database-mapping", "alpha:beta", "--restore-table-mapping", "common:shared,unique_a:unique_b", "--tables", "alpha.*", testBackupName5) + + env.checkCount(r, 1, 2, "SELECT count() FROM `beta`.shared") + env.checkCount(r, 1, 3, "SELECT count() FROM `beta`.unique_b") + env.checkCount(r, 1, 0, "SELECT count() FROM system.databases WHERE name='alpha' SETTINGS empty_result_for_aggregation_by_empty_set=0") + + fullCleanup(t, r, env, []string{testBackupName5}, []string{"local"}, databaseList5, false, true, true, "config-database-mapping.yml") + + // Corner case 5: Full qualified table mapping (db.table:db.table_v2, src_db.table:dst_db.table_v2) - verify DROP uses target table name + // https://github.com/Altinity/clickhouse-backup/issues/1302 + log.Debug().Msg("Corner case 5: Full qualified table mapping with --schema restore") + testBackupName6 := "test_fq_table_mapping" + databaseList6 := []string{"db-5", "source_db", "target_db"} + fullCleanup(t, r, env, []string{testBackupName6}, []string{"local"}, databaseList6, false, false, false, "config-database-mapping.yml") + + env.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS `source_db`") + env.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS `target_db`") + env.queryWithNoError(r, "CREATE TABLE `source_db`.original_table (dt DateTime, v UInt64) ENGINE=MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY dt") + env.queryWithNoError(r, "INSERT INTO `source_db`.original_table SELECT '2022-01-01 00:00:00', number FROM numbers(5)") + env.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS `db-5`") + env.queryWithNoError(r, "CREATE TABLE `db-5`.table (dt DateTime, v UInt64) ENGINE=MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY dt") + env.queryWithNoError(r, "INSERT INTO `db-5`.table SELECT '2022-01-01 00:00:00', number FROM numbers(5)") + // Create target table to verify DROP operates on correct table + env.queryWithNoError(r, "CREATE TABLE `target_db`.renamed_table_v2 (dt DateTime, v UInt64) ENGINE=MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY dt") + env.queryWithNoError(r, "INSERT INTO `target_db`.renamed_table_v2 SELECT '2023-01-01 00:00:00', number FROM numbers(3)") + env.queryWithNoError(r, "CREATE TABLE `db-5`.table_v2 (dt DateTime, v UInt64) ENGINE=MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY dt") + env.queryWithNoError(r, "INSERT INTO `db-5`.table_v2 SELECT '2023-01-01 00:00:00', number FROM numbers(3)") + + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "create", testBackupName6) + + restoreFqMappingCases := []struct { + srcDb string + srcTable string + dstDb string + dstTable string + }{ + {srcDb: "db-5", srcTable: "table", dstDb: "db-5", dstTable: "table_v2"}, + {srcDb: "source_db", srcTable: "original_table", dstDb: "target_db", dstTable: "renamed_table_v2"}, + } + + for _, tc := range restoreFqMappingCases { + tableMapping := fmt.Sprintf("%s.%s:%s.%s", tc.srcDb, tc.srcTable, tc.dstDb, tc.dstTable) + log.Debug().Msgf("Restore with full qualified table mapping %s", tableMapping) + out, err := env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--schema", "--rm", "--restore-table-mapping", tableMapping, "--tables", tc.srcDb+"."+tc.srcTable, testBackupName6) + log.Debug().Msg(out) + r.NoError(err) + + // Verify DROP used target table name, not source table name + r.Contains(out, fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", tc.dstDb, tc.dstTable), "DROP should use target table name from mapping") + r.NotContains(out, fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", tc.srcDb, tc.srcTable), "DROP should NOT use source table name") + + // Verify table was created in target location + env.checkCount(r, 1, 1, fmt.Sprintf("SELECT count() FROM system.tables WHERE database='%s' AND name='%s'", tc.dstDb, tc.dstTable)) + + // Restore data and verify + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--data", "--restore-table-mapping", tableMapping, "--tables", tc.srcDb+"."+tc.srcTable, testBackupName6) + env.checkCount(r, 1, 5, fmt.Sprintf("SELECT count() FROM `%s`.`%s`", tc.dstDb, tc.dstTable)) + } + + fullCleanup(t, r, env, []string{testBackupName6}, []string{"local"}, databaseList6, false, true, true, "config-database-mapping.yml") + env.Cleanup(t, r) } diff --git a/test/testflows/clickhouse_backup/tests/views.py b/test/testflows/clickhouse_backup/tests/views.py index 888f1194..ec310d5a 100644 --- a/test/testflows/clickhouse_backup/tests/views.py +++ b/test/testflows/clickhouse_backup/tests/views.py @@ -1,4 +1,5 @@ import os + from clickhouse_backup.requirements.requirements import * from clickhouse_backup.tests.common import * from clickhouse_backup.tests.steps import * @@ -83,12 +84,12 @@ def live_view(self): """Test that live view is handled properly by clickhouse-backup. """ base_table_name = self.context.views_base_name - + if os.environ.get('CLICKHOUSE_VERSION', '25.8') >= '25.8' or os.environ.get('CLICKHOUSE_VERSION', '25.10') >= '25.10': + skip("25.9+ LIVE VIEW was removed") views_outline(view_name="lview", view_contents_query=f"SELECT * FROM {base_table_name}_lview", view_create_query=f"CREATE LIVE VIEW {base_table_name}_lview AS " f"SELECT Version, Path, Time FROM default.{base_table_name}") - @TestScenario @Requirements( RQ_SRS_013_ClickHouse_BackupUtility_Views_WindowView("1.0")