From debfc320af2811b3fa878b4d70b8fc8ef8062762 Mon Sep 17 00:00:00 2001 From: Wenqi Mou Date: Sun, 26 Jan 2025 02:43:44 -0500 Subject: [PATCH] br: add tests to cover sequence id, auto increment id and auto random id, revert meta kv capture logic (#59109) close pingcap/tidb#59108 --- br/pkg/stream/BUILD.bazel | 3 +- br/pkg/stream/rewrite_meta_rawkv.go | 18 +- br/pkg/stream/rewrite_meta_rawkv_test.go | 2 +- br/pkg/stream/table_mapping.go | 181 +++++++++++++----- br/pkg/stream/table_mapping_test.go | 152 ++++++++++++++- br/tests/br_pitr/check/check_key_types.sh | 73 +++++++ .../br_pitr/incremental_data/key_types.sql | 38 ++++ br/tests/br_pitr/prepare_data/key_types.sql | 33 ++++ br/tests/br_pitr/run.sh | 8 +- 9 files changed, 441 insertions(+), 67 deletions(-) create mode 100644 br/tests/br_pitr/check/check_key_types.sh create mode 100644 br/tests/br_pitr/incremental_data/key_types.sql create mode 100644 br/tests/br_pitr/prepare_data/key_types.sql diff --git a/br/pkg/stream/BUILD.bazel b/br/pkg/stream/BUILD.bazel index f78f44527c559..e3629ca718deb 100644 --- a/br/pkg/stream/BUILD.bazel +++ b/br/pkg/stream/BUILD.bazel @@ -69,11 +69,12 @@ go_test( ], embed = [":stream"], flaky = True, - shard_count = 48, + shard_count = 49, deps = [ "//br/pkg/storage", "//br/pkg/streamhelper", "//pkg/ddl", + "//pkg/kv", "//pkg/meta", "//pkg/meta/model", "//pkg/parser/ast", diff --git a/br/pkg/stream/rewrite_meta_rawkv.go b/br/pkg/stream/rewrite_meta_rawkv.go index 94e05221b2424..cebafbd247fb9 100644 --- a/br/pkg/stream/rewrite_meta_rawkv.go +++ b/br/pkg/stream/rewrite_meta_rawkv.go @@ -129,8 +129,7 @@ func (sr *SchemasReplace) rewriteKeyForDB(key []byte, cf string) ([]byte, error) dbMap, exist := sr.DbMap[dbID] if !exist { - // db filtered out - return nil, nil + return nil, errors.Annotatef(berrors.ErrInvalidArgument, "failed to find db id:%v in maps", dbID) } rawMetaKey.UpdateField(meta.DBkey(dbMap.DbID)) @@ -148,8 +147,7 @@ func (sr *SchemasReplace) rewriteDBInfo(value []byte) ([]byte, error) { dbMap, exist := sr.DbMap[dbInfo.ID] if !exist { - // db filtered out - return nil, nil + return nil, errors.Annotatef(berrors.ErrInvalidArgument, "failed to find db id:%v in maps", dbInfo.ID) } dbInfo.ID = dbMap.DbID @@ -208,14 +206,12 @@ func (sr *SchemasReplace) rewriteKeyForTable( dbReplace, exist := sr.DbMap[dbID] if !exist { - // db filtered out - return nil, nil + return nil, errors.Annotatef(berrors.ErrInvalidArgument, "failed to find db id:%v in maps", dbID) } tableReplace, exist := dbReplace.TableMap[tableID] if !exist { - // table filtered out - return nil, nil + return nil, errors.Annotatef(berrors.ErrInvalidArgument, "failed to find table id:%v in maps", tableID) } rawMetaKey.UpdateKey(meta.DBkey(dbReplace.DbID)) @@ -241,14 +237,12 @@ func (sr *SchemasReplace) rewriteTableInfo(value []byte, dbID int64) ([]byte, er // construct or find the id map. dbReplace, exist = sr.DbMap[dbID] if !exist { - // db filtered out - return nil, nil + return nil, errors.Annotatef(berrors.ErrInvalidArgument, "failed to find db id:%v in maps", dbID) } tableReplace, exist = dbReplace.TableMap[tableInfo.ID] if !exist { - // table filtered out - return nil, nil + return nil, errors.Annotatef(berrors.ErrInvalidArgument, "failed to find table id:%v in maps", tableInfo.ID) } // update table ID and partition ID. diff --git a/br/pkg/stream/rewrite_meta_rawkv_test.go b/br/pkg/stream/rewrite_meta_rawkv_test.go index c926f6a574bb6..6c9b4e846105f 100644 --- a/br/pkg/stream/rewrite_meta_rawkv_test.go +++ b/br/pkg/stream/rewrite_meta_rawkv_test.go @@ -401,7 +401,7 @@ func TestRewriteTableInfoForExchangePartition(t *testing.T) { dbMap[dbID2] = NewDBReplace(db2.Name.O, dbID2+100) dbMap[dbID2].TableMap[tableID2] = NewTableReplace(t2.Name.O, tableID2+100) - tc := NewTableMappingManager(dbMap, mockGenGenGlobalID) + tc := NewTableMappingManager(dbMap, mockGenGlobalID) //exchange partition, t1 partition0 with the t2 t1Copy := t1.Clone() diff --git a/br/pkg/stream/table_mapping.go b/br/pkg/stream/table_mapping.go index ff44f1fdc7b35..29773e556c980 100644 --- a/br/pkg/stream/table_mapping.go +++ b/br/pkg/stream/table_mapping.go @@ -17,11 +17,9 @@ package stream import ( "context" "encoding/json" - "fmt" "github.com/pingcap/errors" backuppb "github.com/pingcap/kvproto/pkg/brpb" - "github.com/pingcap/log" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/model" @@ -60,6 +58,9 @@ func NewTableMappingManager( } // ParseMetaKvAndUpdateIdMapping collect table information +// the keys and values that are selected to parse here follows the implementation in rewrite_meta_rawkv. Maybe +// parsing a subset of these keys/values would suffice, but to make it safe we decide to parse exactly same as +// in rewrite_meta_rawkv. func (tc *TableMappingManager) ParseMetaKvAndUpdateIdMapping(e *kv.Entry, cf string) error { if !IsMetaDBKey(e.Key) { return nil @@ -70,92 +71,174 @@ func (tc *TableMappingManager) ParseMetaKvAndUpdateIdMapping(e *kv.Entry, cf str return errors.Trace(err) } - value, err := extractValue(e, cf) - if err != nil { - return errors.Trace(err) - } - // sanity check - if value == nil { - log.Warn("entry suggests having short value but is nil") - return nil - } - if meta.IsDBkey(rawKey.Field) { - return tc.parseDBValueAndUpdateIdMapping(value) + // parse db key + err := tc.parseDBKeyAndUpdateIdMapping(rawKey.Field) + if err != nil { + return errors.Trace(err) + } + + // parse value and update if exists + value, err := extractValue(e, cf) + if err != nil { + return errors.Trace(err) + } + if value != nil { + return tc.parseDBValueAndUpdateIdMapping(value) + } } else if !meta.IsDBkey(rawKey.Key) { return nil } if meta.IsTableKey(rawKey.Field) { - dbID, err := ParseDBIDFromTableKey(e.Key) + dbID, err := meta.ParseDBKey(rawKey.Key) + if err != nil { + return errors.Trace(err) + } + + // parse table key and update + err = tc.parseTableIdAndUpdateIdMapping(rawKey.Key, rawKey.Field, meta.ParseTableKey) + if err != nil { + return errors.Trace(err) + } + + // parse value and update if exists + value, err := extractValue(e, cf) + if err != nil { + return errors.Trace(err) + } + if value != nil { + return tc.parseTableValueAndUpdateIdMapping(dbID, value) + } + } else if meta.IsAutoIncrementIDKey(rawKey.Field) { + // parse auto increment key and update + err = tc.parseTableIdAndUpdateIdMapping(rawKey.Key, rawKey.Field, meta.ParseAutoIncrementIDKey) + if err != nil { + return errors.Trace(err) + } + } else if meta.IsAutoTableIDKey(rawKey.Field) { + // parse auto table key and update + err = tc.parseTableIdAndUpdateIdMapping(rawKey.Key, rawKey.Field, meta.ParseAutoTableIDKey) + if err != nil { + return errors.Trace(err) + } + } else if meta.IsSequenceKey(rawKey.Field) { + // parse sequence key and update + err = tc.parseTableIdAndUpdateIdMapping(rawKey.Key, rawKey.Field, meta.ParseSequenceKey) + if err != nil { + return errors.Trace(err) + } + } else if meta.IsAutoRandomTableIDKey(rawKey.Field) { + // parse sequence key and update + err = tc.parseTableIdAndUpdateIdMapping(rawKey.Key, rawKey.Field, meta.ParseAutoRandomTableIDKey) if err != nil { return errors.Trace(err) } - return tc.parseTableValueAndUpdateIdMapping(dbID, value) } + return nil } +func (tc *TableMappingManager) parseDBKeyAndUpdateIdMapping(field []byte) error { + dbID, err := meta.ParseDBKey(field) + if err != nil { + return errors.Trace(err) + } + + _, err = tc.getOrCreateDBReplace(dbID) + return errors.Trace(err) +} + func (tc *TableMappingManager) parseDBValueAndUpdateIdMapping(value []byte) error { dbInfo := new(model.DBInfo) if err := json.Unmarshal(value, dbInfo); err != nil { return errors.Trace(err) } - if dr, exist := tc.DbReplaceMap[dbInfo.ID]; !exist { - newID, err := tc.genGlobalIdFn(context.Background()) - if err != nil { - return errors.Trace(err) - } - tc.DbReplaceMap[dbInfo.ID] = NewDBReplace(dbInfo.Name.O, newID) - tc.globalIdMap[dbInfo.ID] = newID - } else { - dr.Name = dbInfo.Name.O + dbReplace, err := tc.getOrCreateDBReplace(dbInfo.ID) + if err != nil { + return errors.Trace(err) } + dbReplace.Name = dbInfo.Name.O return nil } -func (tc *TableMappingManager) parseTableValueAndUpdateIdMapping(dbID int64, value []byte) error { - var ( - tableInfo model.TableInfo - err error - exist bool - dbReplace *DBReplace - tableReplace *TableReplace - ) - - if err := json.Unmarshal(value, &tableInfo); err != nil { - return errors.Trace(err) - } - - // construct or find the id map. - dbReplace, exist = tc.DbReplaceMap[dbID] +// getOrCreateDBReplace gets an existing DBReplace or creates a new one if not found +func (tc *TableMappingManager) getOrCreateDBReplace(dbID int64) (*DBReplace, error) { + dbReplace, exist := tc.DbReplaceMap[dbID] if !exist { newID, err := tc.genGlobalIdFn(context.Background()) if err != nil { - return errors.Trace(err) + return nil, errors.Trace(err) } tc.globalIdMap[dbID] = newID dbReplace = NewDBReplace("", newID) tc.DbReplaceMap[dbID] = dbReplace } + return dbReplace, nil +} - tableReplace, exist = dbReplace.TableMap[tableInfo.ID] +// getOrCreateTableReplace gets an existing TableReplace or creates a new one if not found +func (tc *TableMappingManager) getOrCreateTableReplace(dbReplace *DBReplace, tableID int64) (*TableReplace, error) { + tableReplace, exist := dbReplace.TableMap[tableID] if !exist { - newID, exist := tc.globalIdMap[tableInfo.ID] + newID, exist := tc.globalIdMap[tableID] if !exist { + var err error newID, err = tc.genGlobalIdFn(context.Background()) if err != nil { - return errors.Trace(err) + return nil, errors.Trace(err) } - tc.globalIdMap[tableInfo.ID] = newID + tc.globalIdMap[tableID] = newID } + tableReplace = NewTableReplace("", newID) + dbReplace.TableMap[tableID] = tableReplace + } + return tableReplace, nil +} + +func (tc *TableMappingManager) parseTableIdAndUpdateIdMapping( + key []byte, + field []byte, + parseField func([]byte) (tableID int64, err error)) error { + dbID, err := meta.ParseDBKey(key) + if err != nil { + return errors.Trace(err) + } + + tableID, err := parseField(field) + if err != nil { + return errors.Trace(err) + } + + dbReplace, err := tc.getOrCreateDBReplace(dbID) + if err != nil { + return errors.Trace(err) + } + + _, err = tc.getOrCreateTableReplace(dbReplace, tableID) + if err != nil { + return errors.Trace(err) + } + return nil +} - tableReplace = NewTableReplace(tableInfo.Name.O, newID) - dbReplace.TableMap[tableInfo.ID] = tableReplace - } else { - tableReplace.Name = tableInfo.Name.O +func (tc *TableMappingManager) parseTableValueAndUpdateIdMapping(dbID int64, value []byte) error { + var tableInfo model.TableInfo + if err := json.Unmarshal(value, &tableInfo); err != nil { + return errors.Trace(err) + } + + dbReplace, err := tc.getOrCreateDBReplace(dbID) + if err != nil { + return errors.Trace(err) + } + + tableReplace, err := tc.getOrCreateTableReplace(dbReplace, tableInfo.ID) + if err != nil { + return errors.Trace(err) } + tableReplace.Name = tableInfo.Name.O // update table ID and partition ID. tableInfo.ID = tableReplace.TableID @@ -252,6 +335,6 @@ func extractValue(e *kv.Entry, cf string) ([]byte, error) { } return nil, nil default: - panic(fmt.Sprintf("not support cf:%s", cf)) + return nil, errors.Errorf("unsupported column family: %s", cf) } } diff --git a/br/pkg/stream/table_mapping_test.go b/br/pkg/stream/table_mapping_test.go index 3f816c5399665..358c3c09ce872 100644 --- a/br/pkg/stream/table_mapping_test.go +++ b/br/pkg/stream/table_mapping_test.go @@ -16,19 +16,25 @@ package stream import ( "context" + "encoding/json" "testing" + "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/parser/ast" "github.com/stretchr/testify/require" ) var increaseID int64 = 100 -func mockGenGenGlobalID(_ctx context.Context) (int64, error) { +func mockGenGlobalID(_ctx context.Context) (int64, error) { increaseID++ return increaseID, nil } func TestToProto(t *testing.T) { + increaseID = 100 var ( dbName, tblName string = "db1", "t1" oldDBID UpstreamID = 100 @@ -49,7 +55,7 @@ func TestToProto(t *testing.T) { drs[oldDBID] = dr // create schemas replace and test ToProto(). - tc := NewTableMappingManager(drs, mockGenGenGlobalID) + tc := NewTableMappingManager(drs, mockGenGlobalID) dbMap := tc.ToProto() require.Equal(t, len(dbMap), 1) @@ -80,3 +86,145 @@ func TestToProto(t *testing.T) { drs2 := FromDBMapProto(dbMap) require.Equal(t, drs2, drs) } + +func TestParseMetaKvAndUpdateIdMapping(t *testing.T) { + increaseID = 100 + var ( + dbID int64 = 40 + dbName = "test_db" + tableID int64 = 100 + tableName = "test_table" + pt1ID int64 = 101 + pt2ID int64 = 102 + pt1Name = "pt1" + pt2Name = "pt2" + ts uint64 = 400036290571534337 + ) + + // Create a TableMappingManager with empty maps + tc := NewTableMappingManager(nil, mockGenGlobalID) + + // Test DB key + dbKey := meta.DBkey(dbID) + dbInfo := &model.DBInfo{ + ID: dbID, + Name: ast.NewCIStr(dbName), + } + dbValue, err := json.Marshal(dbInfo) + require.NoError(t, err) + + // Encode DB key in a transaction + txnDBKey := encodeTxnMetaKey([]byte("DBs"), dbKey, ts) + entry := &kv.Entry{ + Key: txnDBKey, + Value: dbValue, + } + + // Test parsing DB key and value + err = tc.ParseMetaKvAndUpdateIdMapping(entry, DefaultCF) + require.NoError(t, err) + require.Contains(t, tc.DbReplaceMap, dbID) + require.Equal(t, dbName, tc.DbReplaceMap[dbID].Name) + require.Equal(t, increaseID, tc.DbReplaceMap[dbID].DbID) + + // Test table key + pi := model.PartitionInfo{ + Enable: true, + Definitions: make([]model.PartitionDefinition, 0), + } + pi.Definitions = append(pi.Definitions, + model.PartitionDefinition{ + ID: pt1ID, + Name: ast.NewCIStr(pt1Name), + }, + model.PartitionDefinition{ + ID: pt2ID, + Name: ast.NewCIStr(pt2Name), + }, + ) + + tableInfo := &model.TableInfo{ + ID: tableID, + Name: ast.NewCIStr(tableName), + Partition: &pi, + } + tableValue, err := json.Marshal(tableInfo) + require.NoError(t, err) + + // Encode table key in a transaction + txnTableKey := encodeTxnMetaKey(meta.DBkey(dbID), meta.TableKey(tableID), ts) + tableEntry := &kv.Entry{ + Key: txnTableKey, + Value: tableValue, + } + + // Test parsing table key and value + err = tc.ParseMetaKvAndUpdateIdMapping(tableEntry, DefaultCF) + require.NoError(t, err) + require.Contains(t, tc.DbReplaceMap[dbID].TableMap, tableID) + require.Equal(t, tableName, tc.DbReplaceMap[dbID].TableMap[tableID].Name) + + // Verify partition IDs are mapped + require.Contains(t, tc.DbReplaceMap[dbID].TableMap[tableID].PartitionMap, pt1ID) + require.Contains(t, tc.DbReplaceMap[dbID].TableMap[tableID].PartitionMap, pt2ID) + + // Test non-meta key + nonMetaEntry := &kv.Entry{ + Key: []byte("not_a_meta_key"), + Value: []byte("some_value"), + } + err = tc.ParseMetaKvAndUpdateIdMapping(nonMetaEntry, DefaultCF) + require.NoError(t, err) + + // Test auto increment key with different IDs + autoIncrDBID := int64(50) + autoIncrTableID := int64(200) + autoIncrKey := encodeTxnMetaKey(meta.DBkey(autoIncrDBID), meta.AutoIncrementIDKey(autoIncrTableID), ts) + autoIncrEntry := &kv.Entry{ + Key: autoIncrKey, + Value: []byte("1"), + } + err = tc.ParseMetaKvAndUpdateIdMapping(autoIncrEntry, DefaultCF) + require.NoError(t, err) + require.Contains(t, tc.DbReplaceMap, autoIncrDBID) + require.Contains(t, tc.DbReplaceMap[autoIncrDBID].TableMap, autoIncrTableID) + + // Test auto table ID key with different IDs + autoTableDBID := int64(60) + autoTableTableID := int64(300) + autoTableKey := encodeTxnMetaKey(meta.DBkey(autoTableDBID), meta.AutoTableIDKey(autoTableTableID), ts) + autoTableEntry := &kv.Entry{ + Key: autoTableKey, + Value: []byte("1"), + } + err = tc.ParseMetaKvAndUpdateIdMapping(autoTableEntry, DefaultCF) + require.NoError(t, err) + require.Contains(t, tc.DbReplaceMap, autoTableDBID) + require.Contains(t, tc.DbReplaceMap[autoTableDBID].TableMap, autoTableTableID) + + // Test sequence key with different IDs + seqDBID := int64(70) + seqTableID := int64(400) + seqKey := encodeTxnMetaKey(meta.DBkey(seqDBID), meta.SequenceKey(seqTableID), ts) + seqEntry := &kv.Entry{ + Key: seqKey, + Value: []byte("1"), + } + err = tc.ParseMetaKvAndUpdateIdMapping(seqEntry, DefaultCF) + require.NoError(t, err) + require.Contains(t, tc.DbReplaceMap, seqDBID) + require.Contains(t, tc.DbReplaceMap[seqDBID].TableMap, seqTableID) + + // Test auto random table ID key with different IDs + autoRandomDBID := int64(80) + autoRandomTableID := int64(500) + autoRandomKey := encodeTxnMetaKey(meta.DBkey(autoRandomDBID), meta.AutoRandomTableIDKey(autoRandomTableID), ts) + autoRandomEntry := &kv.Entry{ + Key: autoRandomKey, + Value: []byte("1"), + } + err = tc.ParseMetaKvAndUpdateIdMapping(autoRandomEntry, DefaultCF) + require.NoError(t, err) + require.Contains(t, tc.DbReplaceMap, autoRandomDBID) + require.Contains(t, tc.DbReplaceMap[autoRandomDBID].TableMap, autoRandomTableID) +} diff --git a/br/tests/br_pitr/check/check_key_types.sh b/br/tests/br_pitr/check/check_key_types.sh new file mode 100644 index 0000000000000..745bee9ff307e --- /dev/null +++ b/br/tests/br_pitr/check/check_key_types.sh @@ -0,0 +1,73 @@ +#!/bin/sh + +echo "=== Verifying Data Integrity ===" + +# Verify original table key data is intact +run_sql "SELECT COUNT(*) as cnt FROM key_types_test.table_key_test;" +check_contains "cnt: 4" +run_sql "SELECT id, name, value FROM key_types_test.table_key_test ORDER BY id;" +check_contains "id: 1" +check_contains "name: test1" +check_contains "value: 100" +check_contains "id: 2" +check_contains "name: test2" +check_contains "value: 200" +check_contains "id: 3" +check_contains "name: test3" +check_contains "value: 300" + +# Verify new table key data +run_sql "SELECT COUNT(*) as cnt FROM key_types_test.table_key_test2;" +check_contains "cnt: 1" +run_sql "SELECT id, name, value FROM key_types_test.table_key_test2 ORDER BY id;" +check_contains "id: 1" +check_contains "name: test1" +check_contains "value: 100" + +# Verify original auto increment data +run_sql "SELECT COUNT(*) as cnt FROM key_types_test.auto_inc_test;" +check_contains "cnt: 4" +run_sql "SELECT id FROM key_types_test.auto_inc_test ORDER BY id;" +check_contains "id: 1" +check_contains "id: 2" +check_contains "id: 3" +check_contains "id: 4" + +# Verify new auto increment data +run_sql "SELECT COUNT(*) as cnt FROM key_types_test.auto_inc_test2;" +check_contains "cnt: 2" +run_sql "SELECT id FROM key_types_test.auto_inc_test2 ORDER BY id;" +check_contains "id: 1" +check_contains "id: 2" + +# Verify original sequence data +run_sql "SELECT COUNT(*) as cnt FROM key_types_test.sequence_test;" +check_contains "cnt: 4" +run_sql "SELECT id FROM key_types_test.sequence_test ORDER BY id;" +check_contains "id: 1" +check_contains "id: 3" +check_contains "id: 5" +check_contains "id: 7" + +# Verify new sequence data +run_sql "SELECT COUNT(*) as cnt FROM key_types_test.sequence_test2;" +check_contains "cnt: 2" +run_sql "SELECT id FROM key_types_test.sequence_test2 ORDER BY id;" +check_contains "id: 1" +check_contains "id: 3" + +# Verify original auto random data +run_sql "SELECT COUNT(*) as cnt FROM key_types_test.auto_random_test;" +check_contains "cnt: 4" +run_sql "SELECT name FROM key_types_test.auto_random_test ORDER BY id;" +check_contains "name: rand1" +check_contains "name: rand2" +check_contains "name: rand3" +check_contains "name: random4" + +# Verify new auto random data +run_sql "SELECT COUNT(*) as cnt FROM key_types_test.auto_random_test2;" +check_contains "cnt: 2" +run_sql "SELECT name FROM key_types_test.auto_random_test2 ORDER BY id;" +check_contains "name: rand1" +check_contains "name: rand2" diff --git a/br/tests/br_pitr/incremental_data/key_types.sql b/br/tests/br_pitr/incremental_data/key_types.sql new file mode 100644 index 0000000000000..8e16bbbf1a948 --- /dev/null +++ b/br/tests/br_pitr/incremental_data/key_types.sql @@ -0,0 +1,38 @@ +-- incremental changes to test during log backup + +-- test existing tables +INSERT INTO key_types_test.table_key_test VALUES (3, 'test3', 300); +INSERT INTO key_types_test.auto_inc_test (name) VALUES ('auto4'); +INSERT INTO key_types_test.sequence_test (name) VALUES ('seq4'); +INSERT INTO key_types_test.auto_random_test (name) VALUES ('random4'); + +-- Create new tables during log backup to test table creation with special keys +-- 1. New table with regular key +CREATE TABLE key_types_test.table_key_test2 ( + id INT PRIMARY KEY NONCLUSTERED, + name VARCHAR(255), + value INT +); +INSERT INTO key_types_test.table_key_test2 VALUES (1, 'test1', 100); + +-- 2. New table with auto increment +CREATE TABLE key_types_test.auto_inc_test2 ( + id INT PRIMARY KEY AUTO_INCREMENT, + name VARCHAR(255) +); +INSERT INTO key_types_test.auto_inc_test2 (name) VALUES ('auto1'), ('auto2'); + +-- 3. New sequence and table using it +CREATE SEQUENCE key_types_test.seq2 START WITH 1 INCREMENT BY 2 NOCACHE; +CREATE TABLE key_types_test.sequence_test2 ( + id INT PRIMARY KEY DEFAULT NEXT VALUE FOR key_types_test.seq2, + name VARCHAR(255) +); +INSERT INTO key_types_test.sequence_test2 (name) VALUES ('seq1'), ('seq2'); + +-- 4. New table with auto random +CREATE TABLE key_types_test.auto_random_test2 ( + id BIGINT PRIMARY KEY AUTO_RANDOM(5), + name VARCHAR(255) +); +INSERT INTO key_types_test.auto_random_test2 (name) VALUES ('rand1'), ('rand2'); diff --git a/br/tests/br_pitr/prepare_data/key_types.sql b/br/tests/br_pitr/prepare_data/key_types.sql new file mode 100644 index 0000000000000..f4b65867be5e5 --- /dev/null +++ b/br/tests/br_pitr/prepare_data/key_types.sql @@ -0,0 +1,33 @@ +-- Test cases for different key types during PITR restore + +-- 1. regular table key +CREATE DATABASE IF NOT EXISTS key_types_test; +CREATE TABLE key_types_test.table_key_test ( + id INT PRIMARY KEY NONCLUSTERED, + name VARCHAR(255), + value INT +); +INSERT INTO key_types_test.table_key_test VALUES (1, 'test1', 100), (2, 'test2', 200); + +-- 2. auto Increment ID Key Test +CREATE TABLE key_types_test.auto_inc_test ( + id INT PRIMARY KEY AUTO_INCREMENT, + name VARCHAR(255) +); + +INSERT INTO key_types_test.auto_inc_test (name) VALUES ('auto1'), ('auto2'), ('auto3'); + +-- 3. sequence Key Test +CREATE SEQUENCE key_types_test.seq1 START WITH 1 INCREMENT BY 2 NOCACHE; +CREATE TABLE key_types_test.sequence_test ( + id INT PRIMARY KEY DEFAULT NEXT VALUE FOR key_types_test.seq1, + name VARCHAR(255) +); +INSERT INTO key_types_test.sequence_test (name) VALUES ('seq1'), ('seq2'), ('seq3'); + +-- 4. auto Random Table ID Key Test +CREATE TABLE key_types_test.auto_random_test ( + id BIGINT PRIMARY KEY AUTO_RANDOM(5), + name VARCHAR(255) +); +INSERT INTO key_types_test.auto_random_test (name) VALUES ('rand1'), ('rand2'), ('rand3'); \ No newline at end of file diff --git a/br/tests/br_pitr/run.sh b/br/tests/br_pitr/run.sh index 0816537b7b087..6ee6a70c0885c 100644 --- a/br/tests/br_pitr/run.sh +++ b/br/tests/br_pitr/run.sh @@ -37,7 +37,7 @@ restart_services_allowing_huge_index echo "prepare the data" run_sql_file $CUR/prepare_data/delete_range.sql run_sql_file $CUR/prepare_data/ingest_repair.sql -# ... +run_sql_file $CUR/prepare_data/key_types.sql # check something after prepare the data prepare_delete_range_count=$(run_sql "select count(*) DELETE_RANGE_CNT from (select * from mysql.gc_delete_range union all select * from mysql.gc_delete_range_done) del_range;" | tail -n 1 | awk '{print $2}') @@ -65,7 +65,7 @@ run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$PREFIX/inc" --lastbackup echo "load the incremental data" run_sql_file $CUR/incremental_data/delete_range.sql run_sql_file $CUR/incremental_data/ingest_repair.sql -# ... +run_sql_file $CUR/incremental_data/key_types.sql # run incremental snapshot backup, but this incremental backup will fail to restore. due to limitation of ddl. echo "run incremental backup with special ddl jobs, modify column e.g." @@ -95,6 +95,8 @@ check_result() { check_contains "DELETE_RANGE_CNT: $expect_delete_range" ## check feature compatibility between PITR and accelerate indexing bash $CUR/check/check_ingest_repair.sh + # check key types are restored correctly + bash $CUR/check/check_key_types.sh } # start a new cluster @@ -191,3 +193,5 @@ if [ $restore_fail -ne 1 ]; then echo 'pitr success on file lost' exit 1 fi + +echo "br pitr test passed"