FIX (retention): Fix GFS retention while hourly backups prevent daily from cleanup

This commit is contained in:
Rostislav Dugin
2026-03-11 15:35:53 +03:00
parent c96d3db337
commit 812f11bc2f
3 changed files with 1239 additions and 555 deletions

View File

@@ -80,8 +80,7 @@ func (c *BackupCleaner) DeleteBackup(backup *backups_core.Backup) error {
return err
}
err = storage.DeleteFile(c.fieldEncryptor, backup.FileName)
if err != nil {
if err := storage.DeleteFile(c.fieldEncryptor, backup.FileName); err != nil {
// we do not return error here, because sometimes clean up performed
// before unavailable storage removal or change - therefore we should
// proceed even in case of error. It's possible that some S3 or
@@ -408,6 +407,10 @@ func buildGFSKeepSet(
) map[uuid.UUID]bool {
keep := make(map[uuid.UUID]bool)
if len(backups) == 0 {
return keep
}
hoursSeen := make(map[string]bool)
daysSeen := make(map[string]bool)
weeksSeen := make(map[string]bool)
@@ -416,6 +419,52 @@ func buildGFSKeepSet(
hoursKept, daysKept, weeksKept, monthsKept, yearsKept := 0, 0, 0, 0, 0
// Compute per-level time-window cutoffs so higher-frequency slots
// cannot absorb backups that belong to lower-frequency levels.
ref := backups[0].CreatedAt
rawHourlyCutoff := ref.Add(-time.Duration(hours) * time.Hour)
rawDailyCutoff := ref.Add(-time.Duration(days) * 24 * time.Hour)
rawWeeklyCutoff := ref.Add(-time.Duration(weeks) * 7 * 24 * time.Hour)
rawMonthlyCutoff := ref.AddDate(0, -months, 0)
rawYearlyCutoff := ref.AddDate(-years, 0, 0)
// Hierarchical capping: each level's window cannot extend further back
// than the nearest active lower-frequency level's window.
yearlyCutoff := rawYearlyCutoff
monthlyCutoff := rawMonthlyCutoff
if years > 0 {
monthlyCutoff = laterOf(monthlyCutoff, yearlyCutoff)
}
weeklyCutoff := rawWeeklyCutoff
if months > 0 {
weeklyCutoff = laterOf(weeklyCutoff, monthlyCutoff)
} else if years > 0 {
weeklyCutoff = laterOf(weeklyCutoff, yearlyCutoff)
}
dailyCutoff := rawDailyCutoff
if weeks > 0 {
dailyCutoff = laterOf(dailyCutoff, weeklyCutoff)
} else if months > 0 {
dailyCutoff = laterOf(dailyCutoff, monthlyCutoff)
} else if years > 0 {
dailyCutoff = laterOf(dailyCutoff, yearlyCutoff)
}
hourlyCutoff := rawHourlyCutoff
if days > 0 {
hourlyCutoff = laterOf(hourlyCutoff, dailyCutoff)
} else if weeks > 0 {
hourlyCutoff = laterOf(hourlyCutoff, weeklyCutoff)
} else if months > 0 {
hourlyCutoff = laterOf(hourlyCutoff, monthlyCutoff)
} else if years > 0 {
hourlyCutoff = laterOf(hourlyCutoff, yearlyCutoff)
}
for _, backup := range backups {
t := backup.CreatedAt
@@ -426,31 +475,31 @@ func buildGFSKeepSet(
monthKey := t.Format("2006-01")
yearKey := t.Format("2006")
if hours > 0 && hoursKept < hours && !hoursSeen[hourKey] {
if hours > 0 && hoursKept < hours && !hoursSeen[hourKey] && t.After(hourlyCutoff) {
keep[backup.ID] = true
hoursSeen[hourKey] = true
hoursKept++
}
if days > 0 && daysKept < days && !daysSeen[dayKey] {
if days > 0 && daysKept < days && !daysSeen[dayKey] && t.After(dailyCutoff) {
keep[backup.ID] = true
daysSeen[dayKey] = true
daysKept++
}
if weeks > 0 && weeksKept < weeks && !weeksSeen[weekKey] {
if weeks > 0 && weeksKept < weeks && !weeksSeen[weekKey] && t.After(weeklyCutoff) {
keep[backup.ID] = true
weeksSeen[weekKey] = true
weeksKept++
}
if months > 0 && monthsKept < months && !monthsSeen[monthKey] {
if months > 0 && monthsKept < months && !monthsSeen[monthKey] && t.After(monthlyCutoff) {
keep[backup.ID] = true
monthsSeen[monthKey] = true
monthsKept++
}
if years > 0 && yearsKept < years && !yearsSeen[yearKey] {
if years > 0 && yearsKept < years && !yearsSeen[yearKey] && t.After(yearlyCutoff) {
keep[backup.ID] = true
yearsSeen[yearKey] = true
yearsKept++
@@ -459,3 +508,11 @@ func buildGFSKeepSet(
return keep
}
func laterOf(a, b time.Time) time.Time {
if a.After(b) {
return a
}
return b
}

File diff suppressed because it is too large Load Diff

View File

@@ -697,160 +697,6 @@ func Test_CleanByCount_DoesNotDeleteInProgressBackups(t *testing.T) {
assert.True(t, inProgressFound, "In-progress backup should not be deleted by count policy")
}
func Test_CleanByGFS_KeepsCorrectBackupsPerSlot(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeGFS,
RetentionGfsDays: 3,
RetentionGfsWeeks: 0,
RetentionGfsMonths: 0,
RetentionGfsYears: 0,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
now := time.Now().UTC()
// Create 5 backups on 5 different days; only the 3 newest days should be kept
var backupIDs []uuid.UUID
for i := 0; i < 5; i++ {
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-time.Duration(4-i) * 24 * time.Hour).Truncate(24 * time.Hour),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
backupIDs = append(backupIDs, backup.ID)
}
cleaner := GetBackupCleaner()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 3, len(remainingBackups))
remainingIDs := make(map[uuid.UUID]bool)
for _, backup := range remainingBackups {
remainingIDs[backup.ID] = true
}
assert.False(t, remainingIDs[backupIDs[0]], "Oldest daily backup should be deleted")
assert.False(t, remainingIDs[backupIDs[1]], "2nd oldest daily backup should be deleted")
assert.True(t, remainingIDs[backupIDs[2]], "3rd backup should remain")
assert.True(t, remainingIDs[backupIDs[3]], "4th backup should remain")
assert.True(t, remainingIDs[backupIDs[4]], "Newest backup should remain")
}
func Test_CleanByGFS_WithWeeklyAndMonthlySlots_KeepsWiderSpread(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeGFS,
RetentionGfsDays: 2,
RetentionGfsWeeks: 2,
RetentionGfsMonths: 1,
RetentionGfsYears: 0,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
now := time.Now().UTC()
// Create one backup per week for 6 weeks (each on Monday of that week)
// GFS should keep: 2 daily (most recent 2 unique days) + 2 weekly + 1 monthly = up to 5 unique
var createdIDs []uuid.UUID
for i := 0; i < 6; i++ {
weekOffset := time.Duration(5-i) * 7 * 24 * time.Hour
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-weekOffset).Truncate(24 * time.Hour),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
createdIDs = append(createdIDs, backup.ID)
}
cleaner := GetBackupCleaner()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
// We should have at most 5 backups kept (2 daily + 2 weekly + 1 monthly, but with overlap possible)
// The exact count depends on how many unique periods are covered
assert.LessOrEqual(t, len(remainingBackups), 5)
assert.GreaterOrEqual(t, len(remainingBackups), 1)
// The two most recent backups should always be retained (daily slots)
remainingIDs := make(map[uuid.UUID]bool)
for _, backup := range remainingBackups {
remainingIDs[backup.ID] = true
}
assert.True(t, remainingIDs[createdIDs[4]], "Second newest backup should be retained (daily)")
assert.True(t, remainingIDs[createdIDs[5]], "Newest backup should be retained (daily)")
}
// Test_DeleteBackup_WhenStorageDeleteFails_BackupStillRemovedFromDatabase verifies resilience
// when storage becomes unavailable. Even if storage.DeleteFile fails (e.g., storage is offline,
// credentials changed, or storage was deleted), the backup record should still be removed from
@@ -897,292 +743,6 @@ func Test_DeleteBackup_WhenStorageDeleteFails_BackupStillRemovedFromDatabase(t *
assert.Nil(t, deletedBackup)
}
func Test_CleanByGFS_WithHourlySlots_KeepsCorrectBackups(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
testStorage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, testStorage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(testStorage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeGFS,
RetentionGfsHours: 3,
StorageID: &testStorage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
now := time.Now().UTC()
// Create 5 backups spaced 1 hour apart; only the 3 newest hours should be kept
var backupIDs []uuid.UUID
for i := 0; i < 5; i++ {
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: testStorage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-time.Duration(4-i) * time.Hour).Truncate(time.Hour),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
backupIDs = append(backupIDs, backup.ID)
}
cleaner := GetBackupCleaner()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 3, len(remainingBackups))
remainingIDs := make(map[uuid.UUID]bool)
for _, backup := range remainingBackups {
remainingIDs[backup.ID] = true
}
assert.False(t, remainingIDs[backupIDs[0]], "Oldest hourly backup should be deleted")
assert.False(t, remainingIDs[backupIDs[1]], "2nd oldest hourly backup should be deleted")
assert.True(t, remainingIDs[backupIDs[2]], "3rd backup should remain")
assert.True(t, remainingIDs[backupIDs[3]], "4th backup should remain")
assert.True(t, remainingIDs[backupIDs[4]], "Newest backup should remain")
}
func Test_BuildGFSKeepSet(t *testing.T) {
// Fixed reference time: a Wednesday mid-month to avoid boundary edge cases in the default tests.
// Use time.Date for determinism across test runs.
ref := time.Date(2025, 6, 18, 12, 0, 0, 0, time.UTC) // Wednesday, 2025-06-18
day := 24 * time.Hour
week := 7 * day
newBackup := func(createdAt time.Time) *backups_core.Backup {
return &backups_core.Backup{ID: uuid.New(), CreatedAt: createdAt}
}
// backupsEveryDay returns n backups, newest-first, each 1 day apart.
backupsEveryDay := func(n int) []*backups_core.Backup {
bs := make([]*backups_core.Backup, n)
for i := 0; i < n; i++ {
bs[i] = newBackup(ref.Add(-time.Duration(i) * day))
}
return bs
}
// backupsEveryWeek returns n backups, newest-first, each 7 days apart.
backupsEveryWeek := func(n int) []*backups_core.Backup {
bs := make([]*backups_core.Backup, n)
for i := 0; i < n; i++ {
bs[i] = newBackup(ref.Add(-time.Duration(i) * week))
}
return bs
}
hour := time.Hour
// backupsEveryHour returns n backups, newest-first, each 1 hour apart.
backupsEveryHour := func(n int) []*backups_core.Backup {
bs := make([]*backups_core.Backup, n)
for i := 0; i < n; i++ {
bs[i] = newBackup(ref.Add(-time.Duration(i) * hour))
}
return bs
}
tests := []struct {
name string
backups []*backups_core.Backup
hours int
days int
weeks int
months int
years int
keptIndices []int // which indices in backups should be kept
deletedRange *[2]int // optional: all indices in [from, to) must be deleted
}{
{
name: "OnlyHourlySlots_KeepsNewest3Of5",
backups: backupsEveryHour(5),
hours: 3,
keptIndices: []int{0, 1, 2},
},
{
name: "SameHourDedup_OnlyNewestKeptForHourlySlot",
backups: []*backups_core.Backup{
newBackup(ref.Truncate(hour).Add(45 * time.Minute)),
newBackup(ref.Truncate(hour).Add(10 * time.Minute)),
},
hours: 1,
keptIndices: []int{0},
},
{
name: "OnlyDailySlots_KeepsNewest3Of5",
backups: backupsEveryDay(5),
days: 3,
keptIndices: []int{0, 1, 2},
},
{
name: "OnlyDailySlots_FewerBackupsThanSlots_KeepsAll",
backups: backupsEveryDay(2),
days: 5,
keptIndices: []int{0, 1},
},
{
name: "OnlyWeeklySlots_KeepsNewest2Weeks",
backups: backupsEveryWeek(4),
weeks: 2,
keptIndices: []int{0, 1},
},
{
name: "OnlyMonthlySlots_KeepsNewest2Months",
backups: []*backups_core.Backup{
newBackup(time.Date(2025, 6, 1, 12, 0, 0, 0, time.UTC)),
newBackup(time.Date(2025, 5, 1, 12, 0, 0, 0, time.UTC)),
newBackup(time.Date(2025, 4, 1, 12, 0, 0, 0, time.UTC)),
},
months: 2,
keptIndices: []int{0, 1},
},
{
name: "OnlyYearlySlots_KeepsNewest2Years",
backups: []*backups_core.Backup{
newBackup(time.Date(2025, 6, 1, 12, 0, 0, 0, time.UTC)),
newBackup(time.Date(2024, 6, 1, 12, 0, 0, 0, time.UTC)),
newBackup(time.Date(2023, 6, 1, 12, 0, 0, 0, time.UTC)),
},
years: 2,
keptIndices: []int{0, 1},
},
{
name: "SameDayDedup_OnlyNewestKeptForDailySlot",
backups: []*backups_core.Backup{
// Two backups on the same day; newest-first order
newBackup(ref.Truncate(day).Add(10 * time.Hour)),
newBackup(ref.Truncate(day).Add(2 * time.Hour)),
},
days: 1,
keptIndices: []int{0},
},
{
name: "SameWeekDedup_OnlyNewestKeptForWeeklySlot",
backups: []*backups_core.Backup{
// ref is Wednesday; add Thursday of same week
newBackup(ref.Add(1 * day)), // Thursday same week
newBackup(ref), // Wednesday same week
},
weeks: 1,
keptIndices: []int{0},
},
{
name: "AdditiveSlots_NewestFillsDailyAndWeeklyAndMonthly",
// Newest backup fills daily + weekly + monthly simultaneously
backups: []*backups_core.Backup{
newBackup(time.Date(2025, 6, 18, 12, 0, 0, 0, time.UTC)), // newest
newBackup(time.Date(2025, 6, 11, 12, 0, 0, 0, time.UTC)), // 1 week ago
newBackup(time.Date(2025, 5, 18, 12, 0, 0, 0, time.UTC)), // 1 month ago
newBackup(time.Date(2025, 4, 18, 12, 0, 0, 0, time.UTC)), // 2 months ago
},
days: 1,
weeks: 2,
months: 2,
keptIndices: []int{0, 1, 2},
},
{
name: "YearBoundary_CorrectlySplitsAcrossYears",
backups: []*backups_core.Backup{
newBackup(time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC)),
newBackup(time.Date(2024, 12, 31, 12, 0, 0, 0, time.UTC)),
newBackup(time.Date(2024, 6, 1, 12, 0, 0, 0, time.UTC)),
newBackup(time.Date(2023, 6, 1, 12, 0, 0, 0, time.UTC)),
},
years: 2,
keptIndices: []int{0, 1}, // 2025 and 2024 kept; 2024-06 and 2023 deleted
},
{
name: "ISOWeekBoundary_Jan1UsesCorrectISOWeek",
// 2025-01-01 is ISO week 1 of 2025; 2024-12-28 is ISO week 52 of 2024
backups: []*backups_core.Backup{
newBackup(time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC)), // ISO week 2025-W01
newBackup(time.Date(2024, 12, 28, 12, 0, 0, 0, time.UTC)), // ISO week 2024-W52
},
weeks: 2,
keptIndices: []int{0, 1}, // different ISO weeks → both kept
},
{
name: "EmptyBackups_ReturnsEmptyKeepSet",
backups: []*backups_core.Backup{},
hours: 3,
days: 3,
weeks: 2,
months: 1,
years: 1,
keptIndices: []int{},
},
{
name: "AllZeroSlots_KeepsNothing",
backups: backupsEveryDay(5),
hours: 0,
days: 0,
weeks: 0,
months: 0,
years: 0,
keptIndices: []int{},
},
{
name: "AllSlotsActive_FullCombination",
backups: backupsEveryWeek(12),
days: 2,
weeks: 3,
months: 2,
years: 1,
// 2 daily (indices 0,1) + 3rd weekly slot (index 2) + 2nd monthly slot (index 3 or later).
// Additive slots: newest fills daily+weekly+monthly+yearly; each subsequent week fills another weekly,
// and a backup ~4 weeks later fills the 2nd monthly slot.
keptIndices: []int{0, 1, 2, 3},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
keepSet := buildGFSKeepSet(tc.backups, tc.hours, tc.days, tc.weeks, tc.months, tc.years)
keptIndexSet := make(map[int]bool, len(tc.keptIndices))
for _, idx := range tc.keptIndices {
keptIndexSet[idx] = true
}
for i, backup := range tc.backups {
if keptIndexSet[i] {
assert.True(t, keepSet[backup.ID], "backup at index %d should be kept", i)
} else {
assert.False(t, keepSet[backup.ID], "backup at index %d should be deleted", i)
}
}
})
}
}
func Test_CleanByTimePeriod_SkipsRecentBackup_EvenIfOlderThanRetention(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
@@ -1354,114 +914,6 @@ func Test_CleanByCount_SkipsRecentBackup_EvenIfOverLimit(t *testing.T) {
assert.True(t, remainingIDs[newestBackup.ID], "Newest backup should be preserved")
}
func Test_CleanByGFS_SkipsRecentBackup_WhenNotInKeepSet(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
interval := createTestInterval()
// Keep only 1 daily slot. We create 2 old backups plus two recent backups on today.
// Backups are ordered newest-first, so the 15-min-old backup fills the single daily slot.
// The 30-min-old backup is the same day → not in the GFS keep-set, but it is still recent
// (within grace period) and must be preserved.
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeGFS,
RetentionGfsDays: 1,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
now := time.Now().UTC()
oldBackup1 := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-3 * 24 * time.Hour).Truncate(24 * time.Hour),
}
oldBackup2 := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-2 * 24 * time.Hour).Truncate(24 * time.Hour),
}
// Newest backup today — will fill the single GFS daily slot.
newestTodayBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-15 * time.Minute),
}
// Slightly older backup, also today — NOT in GFS keep-set (duplicate day),
// but within the 60-minute grace period so it must survive.
recentNotInKeepSet := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-30 * time.Minute),
}
for _, b := range []*backups_core.Backup{oldBackup1, oldBackup2, newestTodayBackup, recentNotInKeepSet} {
err = backupRepository.Save(b)
assert.NoError(t, err)
}
cleaner := GetBackupCleaner()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
remainingIDs := make(map[uuid.UUID]bool)
for _, backup := range remainingBackups {
remainingIDs[backup.ID] = true
}
assert.False(t, remainingIDs[oldBackup1.ID], "Old backup 1 should be deleted by GFS")
assert.False(t, remainingIDs[oldBackup2.ID], "Old backup 2 should be deleted by GFS")
assert.True(
t,
remainingIDs[newestTodayBackup.ID],
"Newest backup fills GFS daily slot and must remain",
)
assert.True(
t,
remainingIDs[recentNotInKeepSet.ID],
"Recent backup not in keep-set must be preserved by grace period",
)
}
func Test_CleanExceededBackups_SkipsRecentBackup_WhenOverTotalSizeLimit(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)