From 61e937bc2aff4687cf727ba5b78f8edf03e1027d Mon Sep 17 00:00:00 2001 From: Rostislav Dugin Date: Fri, 20 Feb 2026 14:27:00 +0300 Subject: [PATCH] FEATURE (backups): Add GFS retention policy --- README.md | 12 +- .../backups/backups/backuping/cleaner.go | 286 ++++- .../backups/backups/backuping/cleaner_test.go | 1127 +++++++++++++++-- .../backups/backuping/scheduler_test.go | 45 +- .../backups/config/controller_test.go | 146 ++- .../internal/features/backups/config/enums.go | 8 + .../internal/features/backups/config/model.go | 65 +- .../features/backups/config/model_test.go | 124 +- .../features/backups/config/service.go | 3 +- .../features/backups/config/storages_test.go | 28 +- .../features/backups/config/testing.go | 7 +- backend/internal/features/plan/model.go | 6 +- backend/internal/util/period/enums.go | 28 +- .../20260220000000_add_retention_policy.sql | 38 + frontend/src/entity/backups/index.ts | 1 + .../src/entity/backups/model/BackupConfig.ts | 12 +- .../backups/model/RetentionPolicyType.ts | 5 + .../features/backups/ui/BackupsComponent.tsx | 93 +- .../backups/ui/EditBackupConfigComponent.tsx | 204 ++- .../backups/ui/ShowBackupConfigComponent.tsx | 46 +- 20 files changed, 1935 insertions(+), 349 deletions(-) create mode 100644 backend/migrations/20260220000000_add_retention_policy.sql create mode 100644 frontend/src/entity/backups/model/RetentionPolicyType.ts diff --git a/README.md b/README.md index 3c6b07b..0b155a7 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,13 @@ - **Precise timing**: run backups at specific times (e.g., 4 AM during low traffic) - **Smart compression**: 4-8x space savings with balanced compression (~20% overhead) +### πŸ—‘οΈ **Retention policies** + +- **Time period**: Keep backups for a fixed duration (e.g., 7 days, 3 months, 1 year) +- **Count**: Keep a fixed number of the most recent backups (e.g., last 30) +- **GFS (Grandfather-Father-Son)**: Layered retention β€” keep hourly, daily, weekly, monthly and yearly backups independently for fine-grained long-term history (enterprises requirement) +- **Size limits**: Set per-backup and total storage size caps to control storage Π³Ρ‹Ρ„ΠΏΡƒ + ### πŸ—„οΈ **Multiple storage destinations** (view supported) - **Local storage**: Keep backups on your VPS/server @@ -220,8 +227,9 @@ For more options (NodePort, TLS, HTTPRoute for Gateway API), see the [Helm chart 3. **Configure schedule**: Choose from hourly, daily, weekly, monthly or cron intervals 4. **Set database connection**: Enter your database credentials and connection details 5. **Choose storage**: Select where to store your backups (local, S3, Google Drive, etc.) -6. **Add notifications** (optional): Configure email, Telegram, Slack, or webhook notifications -7. **Save and start**: Databasus will validate settings and begin the backup schedule +6. **Configure retention policy**: Choose time period, count or GFS to control how long backups are kept +7. **Add notifications** (optional): Configure email, Telegram, Slack, or webhook notifications +8. **Save and start**: Databasus will validate settings and begin the backup schedule ### πŸ”‘ Resetting password (docs) diff --git a/backend/internal/features/backups/backups/backuping/cleaner.go b/backend/internal/features/backups/backups/backuping/cleaner.go index d3c82b7..167b781 100644 --- a/backend/internal/features/backups/backups/backuping/cleaner.go +++ b/backend/internal/features/backups/backups/backuping/cleaner.go @@ -18,7 +18,8 @@ import ( ) const ( - cleanerTickerInterval = 1 * time.Minute + cleanerTickerInterval = 1 * time.Minute + recentBackupGracePeriod = 60 * time.Minute ) type BackupCleaner struct { @@ -51,8 +52,8 @@ func (c *BackupCleaner) Run(ctx context.Context) { case <-ctx.Done(): return case <-ticker.C: - if err := c.cleanOldBackups(); err != nil { - c.logger.Error("Failed to clean old backups", "error", err) + if err := c.cleanByRetentionPolicy(); err != nil { + c.logger.Error("Failed to clean backups by retention policy", "error", err) } if err := c.cleanExceededBackups(); err != nil { @@ -100,49 +101,30 @@ func (c *BackupCleaner) AddBackupRemoveListener(listener backups_core.BackupRemo c.backupRemoveListeners = append(c.backupRemoveListeners, listener) } -func (c *BackupCleaner) cleanOldBackups() error { +func (c *BackupCleaner) cleanByRetentionPolicy() error { enabledBackupConfigs, err := c.backupConfigService.GetBackupConfigsWithEnabledBackups() if err != nil { return err } for _, backupConfig := range enabledBackupConfigs { - backupStorePeriod := backupConfig.StorePeriod + var cleanErr error - if backupStorePeriod == period.PeriodForever { - continue + switch backupConfig.RetentionPolicyType { + case backups_config.RetentionPolicyTypeCount: + cleanErr = c.cleanByCount(backupConfig) + case backups_config.RetentionPolicyTypeGFS: + cleanErr = c.cleanByGFS(backupConfig) + default: + cleanErr = c.cleanByTimePeriod(backupConfig) } - storeDuration := backupStorePeriod.ToDuration() - dateBeforeBackupsShouldBeDeleted := time.Now().UTC().Add(-storeDuration) - - oldBackups, err := c.backupRepository.FindBackupsBeforeDate( - backupConfig.DatabaseID, - dateBeforeBackupsShouldBeDeleted, - ) - if err != nil { + if cleanErr != nil { c.logger.Error( - "Failed to find old backups for database", - "databaseId", - backupConfig.DatabaseID, - "error", - err, - ) - continue - } - - for _, backup := range oldBackups { - if err := c.DeleteBackup(backup); err != nil { - c.logger.Error("Failed to delete old backup", "backupId", backup.ID, "error", err) - continue - } - - c.logger.Info( - "Deleted old backup", - "backupId", - backup.ID, - "databaseId", - backupConfig.DatabaseID, + "Failed to clean backups by retention policy", + "databaseId", backupConfig.DatabaseID, + "policy", backupConfig.RetentionPolicyType, + "error", cleanErr, ) } } @@ -179,6 +161,158 @@ func (c *BackupCleaner) cleanExceededBackups() error { return nil } +func (c *BackupCleaner) cleanByTimePeriod(backupConfig *backups_config.BackupConfig) error { + if backupConfig.RetentionTimePeriod == "" { + return nil + } + + if backupConfig.RetentionTimePeriod == period.PeriodForever { + return nil + } + + storeDuration := backupConfig.RetentionTimePeriod.ToDuration() + dateBeforeBackupsShouldBeDeleted := time.Now().UTC().Add(-storeDuration) + + oldBackups, err := c.backupRepository.FindBackupsBeforeDate( + backupConfig.DatabaseID, + dateBeforeBackupsShouldBeDeleted, + ) + if err != nil { + return fmt.Errorf( + "failed to find old backups for database %s: %w", + backupConfig.DatabaseID, + err, + ) + } + + for _, backup := range oldBackups { + if isRecentBackup(backup) { + continue + } + + if err := c.DeleteBackup(backup); err != nil { + c.logger.Error("Failed to delete old backup", "backupId", backup.ID, "error", err) + continue + } + + c.logger.Info( + "Deleted old backup", + "backupId", backup.ID, + "databaseId", backupConfig.DatabaseID, + ) + } + + return nil +} + +func (c *BackupCleaner) cleanByCount(backupConfig *backups_config.BackupConfig) error { + if backupConfig.RetentionCount <= 0 { + return nil + } + + completedBackups, err := c.backupRepository.FindByDatabaseIdAndStatus( + backupConfig.DatabaseID, + backups_core.BackupStatusCompleted, + ) + if err != nil { + return fmt.Errorf( + "failed to find completed backups for database %s: %w", + backupConfig.DatabaseID, + err, + ) + } + + // completedBackups are ordered newest first; delete everything beyond position RetentionCount + if len(completedBackups) <= backupConfig.RetentionCount { + return nil + } + + toDelete := completedBackups[backupConfig.RetentionCount:] + for _, backup := range toDelete { + if isRecentBackup(backup) { + continue + } + + if err := c.DeleteBackup(backup); err != nil { + c.logger.Error( + "Failed to delete backup by count policy", + "backupId", + backup.ID, + "error", + err, + ) + continue + } + + c.logger.Info( + "Deleted backup by count policy", + "backupId", backup.ID, + "databaseId", backupConfig.DatabaseID, + "retentionCount", backupConfig.RetentionCount, + ) + } + + return nil +} + +func (c *BackupCleaner) cleanByGFS(backupConfig *backups_config.BackupConfig) error { + if backupConfig.RetentionGfsHours <= 0 && backupConfig.RetentionGfsDays <= 0 && + backupConfig.RetentionGfsWeeks <= 0 && backupConfig.RetentionGfsMonths <= 0 && + backupConfig.RetentionGfsYears <= 0 { + return nil + } + + completedBackups, err := c.backupRepository.FindByDatabaseIdAndStatus( + backupConfig.DatabaseID, + backups_core.BackupStatusCompleted, + ) + if err != nil { + return fmt.Errorf( + "failed to find completed backups for database %s: %w", + backupConfig.DatabaseID, + err, + ) + } + + keepSet := buildGFSKeepSet( + completedBackups, + backupConfig.RetentionGfsHours, + backupConfig.RetentionGfsDays, + backupConfig.RetentionGfsWeeks, + backupConfig.RetentionGfsMonths, + backupConfig.RetentionGfsYears, + ) + + for _, backup := range completedBackups { + if keepSet[backup.ID] { + continue + } + + if isRecentBackup(backup) { + continue + } + + if err := c.DeleteBackup(backup); err != nil { + c.logger.Error( + "Failed to delete backup by GFS policy", + "backupId", + backup.ID, + "error", + err, + ) + continue + } + + c.logger.Info( + "Deleted backup by GFS policy", + "backupId", backup.ID, + "databaseId", backupConfig.DatabaseID, + ) + } + + return nil +} + func (c *BackupCleaner) cleanExceededBackupsForDatabase( databaseID uuid.UUID, limitperDbMB int64, @@ -215,6 +349,21 @@ func (c *BackupCleaner) cleanExceededBackupsForDatabase( } backup := oldestBackups[0] + if isRecentBackup(backup) { + c.logger.Warn( + "Oldest backup is too recent to delete, stopping size cleanup", + "databaseId", + databaseID, + "backupId", + backup.ID, + "totalSizeMB", + backupsTotalSizeMB, + "limitMB", + limitperDbMB, + ) + break + } + if err := c.DeleteBackup(backup); err != nil { c.logger.Error( "Failed to delete exceeded backup", @@ -245,3 +394,68 @@ func (c *BackupCleaner) cleanExceededBackupsForDatabase( return nil } + +func isRecentBackup(backup *backups_core.Backup) bool { + return time.Since(backup.CreatedAt) < recentBackupGracePeriod +} + +// buildGFSKeepSet determines which backups to retain under the GFS rotation scheme. +// Backups must be sorted newest-first. A backup can fill multiple slots simultaneously +// (e.g. the newest backup of a year also fills the monthly, weekly, daily, and hourly slot). +func buildGFSKeepSet( + backups []*backups_core.Backup, + hours, days, weeks, months, years int, +) map[uuid.UUID]bool { + keep := make(map[uuid.UUID]bool) + + hoursSeen := make(map[string]bool) + daysSeen := make(map[string]bool) + weeksSeen := make(map[string]bool) + monthsSeen := make(map[string]bool) + yearsSeen := make(map[string]bool) + + hoursKept, daysKept, weeksKept, monthsKept, yearsKept := 0, 0, 0, 0, 0 + + for _, backup := range backups { + t := backup.CreatedAt + + hourKey := t.Format("2006-01-02-15") + dayKey := t.Format("2006-01-02") + weekYear, week := t.ISOWeek() + weekKey := fmt.Sprintf("%d-%02d", weekYear, week) + monthKey := t.Format("2006-01") + yearKey := t.Format("2006") + + if hours > 0 && hoursKept < hours && !hoursSeen[hourKey] { + keep[backup.ID] = true + hoursSeen[hourKey] = true + hoursKept++ + } + + if days > 0 && daysKept < days && !daysSeen[dayKey] { + keep[backup.ID] = true + daysSeen[dayKey] = true + daysKept++ + } + + if weeks > 0 && weeksKept < weeks && !weeksSeen[weekKey] { + keep[backup.ID] = true + weeksSeen[weekKey] = true + weeksKept++ + } + + if months > 0 && monthsKept < months && !monthsSeen[monthKey] { + keep[backup.ID] = true + monthsSeen[monthKey] = true + monthsKept++ + } + + if years > 0 && yearsKept < years && !yearsSeen[yearKey] { + keep[backup.ID] = true + yearsSeen[yearKey] = true + yearsKept++ + } + } + + return keep +} diff --git a/backend/internal/features/backups/backups/backuping/cleaner_test.go b/backend/internal/features/backups/backups/backuping/cleaner_test.go index 0b45f36..dc5d44f 100644 --- a/backend/internal/features/backups/backups/backuping/cleaner_test.go +++ b/backend/internal/features/backups/backups/backuping/cleaner_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/assert" ) -func Test_CleanOldBackups_DeletesBackupsOlderThanStorePeriod(t *testing.T) { +func Test_CleanOldBackups_DeletesBackupsOlderThanRetentionTimePeriod(t *testing.T) { router := CreateTestRouter() owner := users_testing.CreateTestUser(users_enums.UserRoleMember) workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) @@ -41,21 +41,20 @@ func Test_CleanOldBackups_DeletesBackupsOlderThanStorePeriod(t *testing.T) { workspaces_testing.RemoveTestWorkspace(workspace, router) }() - // Create backup interval interval := createTestInterval() backupConfig := &backups_config.BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, - StorageID: &storage.ID, - BackupIntervalID: interval.ID, - BackupInterval: interval, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, + StorageID: &storage.ID, + BackupIntervalID: interval.ID, + BackupInterval: interval, } _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) assert.NoError(t, err) - // Create backups with different ages now := time.Now().UTC() oldBackup1 := &backups_core.Backup{ ID: uuid.New(), @@ -63,7 +62,7 @@ func Test_CleanOldBackups_DeletesBackupsOlderThanStorePeriod(t *testing.T) { StorageID: storage.ID, Status: backups_core.BackupStatusCompleted, BackupSizeMb: 10, - CreatedAt: now.Add(-10 * 24 * time.Hour), // 10 days old + CreatedAt: now.Add(-10 * 24 * time.Hour), } oldBackup2 := &backups_core.Backup{ ID: uuid.New(), @@ -71,7 +70,7 @@ func Test_CleanOldBackups_DeletesBackupsOlderThanStorePeriod(t *testing.T) { StorageID: storage.ID, Status: backups_core.BackupStatusCompleted, BackupSizeMb: 10, - CreatedAt: now.Add(-8 * 24 * time.Hour), // 8 days old + CreatedAt: now.Add(-8 * 24 * time.Hour), } recentBackup := &backups_core.Backup{ ID: uuid.New(), @@ -79,7 +78,7 @@ func Test_CleanOldBackups_DeletesBackupsOlderThanStorePeriod(t *testing.T) { StorageID: storage.ID, Status: backups_core.BackupStatusCompleted, BackupSizeMb: 10, - CreatedAt: now.Add(-3 * 24 * time.Hour), // 3 days old + CreatedAt: now.Add(-3 * 24 * time.Hour), } err = backupRepository.Save(oldBackup1) @@ -89,19 +88,17 @@ func Test_CleanOldBackups_DeletesBackupsOlderThanStorePeriod(t *testing.T) { err = backupRepository.Save(recentBackup) assert.NoError(t, err) - // Run cleanup cleaner := GetBackupCleaner() - err = cleaner.cleanOldBackups() + err = cleaner.cleanByRetentionPolicy() assert.NoError(t, err) - // Verify old backups deleted, recent backup remains remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) assert.NoError(t, err) assert.Equal(t, 1, len(remainingBackups)) assert.Equal(t, recentBackup.ID, remainingBackups[0].ID) } -func Test_CleanOldBackups_SkipsDatabaseWithForeverStorePeriod(t *testing.T) { +func Test_CleanOldBackups_SkipsDatabaseWithForeverRetentionPeriod(t *testing.T) { router := CreateTestRouter() owner := users_testing.CreateTestUser(users_enums.UserRoleMember) workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) @@ -122,38 +119,35 @@ func Test_CleanOldBackups_SkipsDatabaseWithForeverStorePeriod(t *testing.T) { workspaces_testing.RemoveTestWorkspace(workspace, router) }() - // Create backup interval interval := createTestInterval() backupConfig := &backups_config.BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodForever, - StorageID: &storage.ID, - BackupIntervalID: interval.ID, - BackupInterval: interval, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodForever, + StorageID: &storage.ID, + BackupIntervalID: interval.ID, + BackupInterval: interval, } _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) assert.NoError(t, err) - // Create very old backup oldBackup := &backups_core.Backup{ ID: uuid.New(), DatabaseID: database.ID, StorageID: storage.ID, Status: backups_core.BackupStatusCompleted, BackupSizeMb: 10, - CreatedAt: time.Now().UTC().Add(-365 * 24 * time.Hour), // 1 year old + CreatedAt: time.Now().UTC().Add(-365 * 24 * time.Hour), } err = backupRepository.Save(oldBackup) assert.NoError(t, err) - // Run cleanup cleaner := GetBackupCleaner() - err = cleaner.cleanOldBackups() + err = cleaner.cleanByRetentionPolicy() assert.NoError(t, err) - // Verify backup still exists remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) assert.NoError(t, err) assert.Equal(t, 1, len(remainingBackups)) @@ -181,22 +175,21 @@ func Test_CleanExceededBackups_WhenUnderLimit_NoBackupsDeleted(t *testing.T) { workspaces_testing.RemoveTestWorkspace(workspace, router) }() - // Create backup interval interval := createTestInterval() backupConfig := &backups_config.BackupConfig{ DatabaseID: database.ID, IsBackupsEnabled: true, - StorePeriod: period.PeriodForever, + RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodForever, StorageID: &storage.ID, - MaxBackupsTotalSizeMB: 100, // 100 MB limit + MaxBackupsTotalSizeMB: 100, BackupIntervalID: interval.ID, BackupInterval: interval, } _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) assert.NoError(t, err) - // Create 3 backups totaling 50MB (under limit) for i := 0; i < 3; i++ { backup := &backups_core.Backup{ ID: uuid.New(), @@ -210,12 +203,10 @@ func Test_CleanExceededBackups_WhenUnderLimit_NoBackupsDeleted(t *testing.T) { assert.NoError(t, err) } - // Run cleanup cleaner := GetBackupCleaner() err = cleaner.cleanExceededBackups() assert.NoError(t, err) - // Verify all backups remain remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) assert.NoError(t, err) assert.Equal(t, 3, len(remainingBackups)) @@ -242,22 +233,21 @@ func Test_CleanExceededBackups_WhenOverLimit_DeletesOldestBackups(t *testing.T) workspaces_testing.RemoveTestWorkspace(workspace, router) }() - // Create backup interval interval := createTestInterval() backupConfig := &backups_config.BackupConfig{ DatabaseID: database.ID, IsBackupsEnabled: true, - StorePeriod: period.PeriodForever, + RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodForever, StorageID: &storage.ID, - MaxBackupsTotalSizeMB: 30, // 30 MB limit + MaxBackupsTotalSizeMB: 30, BackupIntervalID: interval.ID, BackupInterval: interval, } _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) assert.NoError(t, err) - // Create 5 backups of 10MB each (total 50MB, over 30MB limit) now := time.Now().UTC() var backupIDs []uuid.UUID for i := 0; i < 5; i++ { @@ -267,33 +257,30 @@ func Test_CleanExceededBackups_WhenOverLimit_DeletesOldestBackups(t *testing.T) StorageID: storage.ID, Status: backups_core.BackupStatusCompleted, BackupSizeMb: 10, - CreatedAt: now.Add(-time.Duration(4-i) * time.Hour), // Oldest first + CreatedAt: now.Add(-time.Duration(4-i) * time.Hour), } err = backupRepository.Save(backup) assert.NoError(t, err) backupIDs = append(backupIDs, backup.ID) } - // Run cleanup cleaner := GetBackupCleaner() err = cleaner.cleanExceededBackups() assert.NoError(t, err) - // Verify 2 oldest backups deleted, 3 newest remain remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) assert.NoError(t, err) assert.Equal(t, 3, len(remainingBackups)) - // Check that the newest 3 backups remain remainingIDs := make(map[uuid.UUID]bool) for _, backup := range remainingBackups { remainingIDs[backup.ID] = true } - assert.False(t, remainingIDs[backupIDs[0]]) // Oldest deleted - assert.False(t, remainingIDs[backupIDs[1]]) // 2nd oldest deleted - assert.True(t, remainingIDs[backupIDs[2]]) // 3rd remains - assert.True(t, remainingIDs[backupIDs[3]]) // 4th remains - assert.True(t, remainingIDs[backupIDs[4]]) // Newest remains + assert.False(t, remainingIDs[backupIDs[0]]) + assert.False(t, remainingIDs[backupIDs[1]]) + assert.True(t, remainingIDs[backupIDs[2]]) + assert.True(t, remainingIDs[backupIDs[3]]) + assert.True(t, remainingIDs[backupIDs[4]]) } func Test_CleanExceededBackups_SkipsInProgressBackups(t *testing.T) { @@ -317,15 +304,15 @@ func Test_CleanExceededBackups_SkipsInProgressBackups(t *testing.T) { workspaces_testing.RemoveTestWorkspace(workspace, router) }() - // Create backup interval interval := createTestInterval() backupConfig := &backups_config.BackupConfig{ DatabaseID: database.ID, IsBackupsEnabled: true, - StorePeriod: period.PeriodForever, + RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodForever, StorageID: &storage.ID, - MaxBackupsTotalSizeMB: 50, // 50 MB limit + MaxBackupsTotalSizeMB: 50, BackupIntervalID: interval.ID, BackupInterval: interval, } @@ -334,7 +321,6 @@ func Test_CleanExceededBackups_SkipsInProgressBackups(t *testing.T) { now := time.Now().UTC() - // Create 3 completed backups of 30MB each completedBackups := make([]*backups_core.Backup, 3) for i := 0; i < 3; i++ { backup := &backups_core.Backup{ @@ -350,7 +336,6 @@ func Test_CleanExceededBackups_SkipsInProgressBackups(t *testing.T) { completedBackups[i] = backup } - // Create 1 in-progress backup (should be excluded from size calculation and deletion) inProgressBackup := &backups_core.Backup{ ID: uuid.New(), DatabaseID: database.ID, @@ -362,19 +347,14 @@ func Test_CleanExceededBackups_SkipsInProgressBackups(t *testing.T) { err = backupRepository.Save(inProgressBackup) assert.NoError(t, err) - // Run cleanup cleaner := GetBackupCleaner() err = cleaner.cleanExceededBackups() assert.NoError(t, err) - // Verify: only completed backups deleted, in-progress remains remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) assert.NoError(t, err) - - // Should have in-progress + 1 completed (total 40MB completed + 10MB in-progress) assert.GreaterOrEqual(t, len(remainingBackups), 2) - // Verify in-progress backup still exists var inProgressFound bool for _, backup := range remainingBackups { if backup.ID == inProgressBackup.ID { @@ -406,22 +386,21 @@ func Test_CleanExceededBackups_WithZeroLimit_SkipsDatabase(t *testing.T) { workspaces_testing.RemoveTestWorkspace(workspace, router) }() - // Create backup interval interval := createTestInterval() backupConfig := &backups_config.BackupConfig{ DatabaseID: database.ID, IsBackupsEnabled: true, - StorePeriod: period.PeriodForever, + RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodForever, StorageID: &storage.ID, - MaxBackupsTotalSizeMB: 0, // No size limit + MaxBackupsTotalSizeMB: 0, BackupIntervalID: interval.ID, BackupInterval: interval, } _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) assert.NoError(t, err) - // Create large backups for i := 0; i < 10; i++ { backup := &backups_core.Backup{ ID: uuid.New(), @@ -435,12 +414,10 @@ func Test_CleanExceededBackups_WithZeroLimit_SkipsDatabase(t *testing.T) { assert.NoError(t, err) } - // Run cleanup cleaner := GetBackupCleaner() err = cleaner.cleanExceededBackups() assert.NoError(t, err) - // Verify all backups remain remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) assert.NoError(t, err) assert.Equal(t, 10, len(remainingBackups)) @@ -467,7 +444,6 @@ func Test_GetTotalSizeByDatabase_CalculatesCorrectly(t *testing.T) { workspaces_testing.RemoveTestWorkspace(workspace, router) }() - // Create completed backups completedBackup1 := &backups_core.Backup{ ID: uuid.New(), DatabaseID: database.ID, @@ -484,7 +460,6 @@ func Test_GetTotalSizeByDatabase_CalculatesCorrectly(t *testing.T) { BackupSizeMb: 20.3, CreatedAt: time.Now().UTC(), } - // Create failed backup (should be included) failedBackup := &backups_core.Backup{ ID: uuid.New(), DatabaseID: database.ID, @@ -493,7 +468,6 @@ func Test_GetTotalSizeByDatabase_CalculatesCorrectly(t *testing.T) { BackupSizeMb: 5.2, CreatedAt: time.Now().UTC(), } - // Create in-progress backup (should be excluded) inProgressBackup := &backups_core.Backup{ ID: uuid.New(), DatabaseID: database.ID, @@ -512,25 +486,369 @@ func Test_GetTotalSizeByDatabase_CalculatesCorrectly(t *testing.T) { err = backupRepository.Save(inProgressBackup) assert.NoError(t, err) - // Calculate total size totalSize, err := backupRepository.GetTotalSizeByDatabase(database.ID) assert.NoError(t, err) - - // Should be 10.5 + 20.3 + 5.2 = 36.0 (excluding in-progress 100) assert.InDelta(t, 36.0, totalSize, 0.1) } -// Mock listener for testing -type mockBackupRemoveListener struct { - onBeforeBackupRemove func(*backups_core.Backup) error -} +func Test_CleanByCount_KeepsNewestNBackups_DeletesOlder(t *testing.T) { + router := CreateTestRouter() + owner := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) + storage := storages.CreateTestStorage(workspace.ID) + notifier := notifiers.CreateTestNotifier(workspace.ID) + database := databases.CreateTestDatabase(workspace.ID, storage, notifier) -func (m *mockBackupRemoveListener) OnBeforeBackupRemove(backup *backups_core.Backup) error { - if m.onBeforeBackupRemove != nil { - return m.onBeforeBackupRemove(backup) + defer func() { + backups, _ := backupRepository.FindByDatabaseID(database.ID) + for _, backup := range backups { + backupRepository.DeleteByID(backup.ID) + } + + databases.RemoveTestDatabase(database) + time.Sleep(50 * time.Millisecond) + notifiers.RemoveTestNotifier(notifier) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) + }() + + interval := createTestInterval() + + backupConfig := &backups_config.BackupConfig{ + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: backups_config.RetentionPolicyTypeCount, + RetentionCount: 3, + StorageID: &storage.ID, + BackupIntervalID: interval.ID, + BackupInterval: interval, + } + _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) + assert.NoError(t, err) + + now := time.Now().UTC() + var backupIDs []uuid.UUID + for i := 0; i < 5; i++ { + backup := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add( + -time.Duration(4-i) * time.Hour, + ), // oldest first in loop, newest = i=4 + } + err = backupRepository.Save(backup) + assert.NoError(t, err) + backupIDs = append(backupIDs, backup.ID) } - return nil + cleaner := GetBackupCleaner() + err = cleaner.cleanByRetentionPolicy() + assert.NoError(t, err) + + remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) + assert.NoError(t, err) + assert.Equal(t, 3, len(remainingBackups)) + + remainingIDs := make(map[uuid.UUID]bool) + for _, backup := range remainingBackups { + remainingIDs[backup.ID] = true + } + assert.False(t, remainingIDs[backupIDs[0]], "Oldest backup should be deleted") + assert.False(t, remainingIDs[backupIDs[1]], "2nd oldest backup should be deleted") + assert.True(t, remainingIDs[backupIDs[2]], "3rd backup should remain") + assert.True(t, remainingIDs[backupIDs[3]], "4th backup should remain") + assert.True(t, remainingIDs[backupIDs[4]], "Newest backup should remain") +} + +func Test_CleanByCount_WhenUnderLimit_NoBackupsDeleted(t *testing.T) { + router := CreateTestRouter() + owner := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) + storage := storages.CreateTestStorage(workspace.ID) + notifier := notifiers.CreateTestNotifier(workspace.ID) + database := databases.CreateTestDatabase(workspace.ID, storage, notifier) + + defer func() { + backups, _ := backupRepository.FindByDatabaseID(database.ID) + for _, backup := range backups { + backupRepository.DeleteByID(backup.ID) + } + + databases.RemoveTestDatabase(database) + time.Sleep(50 * time.Millisecond) + notifiers.RemoveTestNotifier(notifier) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) + }() + + interval := createTestInterval() + + backupConfig := &backups_config.BackupConfig{ + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: backups_config.RetentionPolicyTypeCount, + RetentionCount: 10, + StorageID: &storage.ID, + BackupIntervalID: interval.ID, + BackupInterval: interval, + } + _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) + assert.NoError(t, err) + + for i := 0; i < 5; i++ { + backup := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: time.Now().UTC().Add(-time.Duration(i) * time.Hour), + } + err = backupRepository.Save(backup) + assert.NoError(t, err) + } + + cleaner := GetBackupCleaner() + err = cleaner.cleanByRetentionPolicy() + assert.NoError(t, err) + + remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) + assert.NoError(t, err) + assert.Equal(t, 5, len(remainingBackups)) +} + +func Test_CleanByCount_DoesNotDeleteInProgressBackups(t *testing.T) { + router := CreateTestRouter() + owner := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) + storage := storages.CreateTestStorage(workspace.ID) + notifier := notifiers.CreateTestNotifier(workspace.ID) + database := databases.CreateTestDatabase(workspace.ID, storage, notifier) + + defer func() { + backups, _ := backupRepository.FindByDatabaseID(database.ID) + for _, backup := range backups { + backupRepository.DeleteByID(backup.ID) + } + + databases.RemoveTestDatabase(database) + time.Sleep(50 * time.Millisecond) + notifiers.RemoveTestNotifier(notifier) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) + }() + + interval := createTestInterval() + + backupConfig := &backups_config.BackupConfig{ + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: backups_config.RetentionPolicyTypeCount, + RetentionCount: 2, + StorageID: &storage.ID, + BackupIntervalID: interval.ID, + BackupInterval: interval, + } + _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) + assert.NoError(t, err) + + now := time.Now().UTC() + + for i := 0; i < 3; i++ { + backup := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add(-time.Duration(3-i) * time.Hour), + } + err = backupRepository.Save(backup) + assert.NoError(t, err) + } + + inProgressBackup := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusInProgress, + BackupSizeMb: 5, + CreatedAt: now, + } + err = backupRepository.Save(inProgressBackup) + assert.NoError(t, err) + + cleaner := GetBackupCleaner() + err = cleaner.cleanByRetentionPolicy() + assert.NoError(t, err) + + remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) + assert.NoError(t, err) + + var inProgressFound bool + for _, backup := range remainingBackups { + if backup.ID == inProgressBackup.ID { + inProgressFound = true + } + } + assert.True(t, inProgressFound, "In-progress backup should not be deleted by count policy") +} + +func Test_CleanByGFS_KeepsCorrectBackupsPerSlot(t *testing.T) { + router := CreateTestRouter() + owner := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) + storage := storages.CreateTestStorage(workspace.ID) + notifier := notifiers.CreateTestNotifier(workspace.ID) + database := databases.CreateTestDatabase(workspace.ID, storage, notifier) + + defer func() { + backups, _ := backupRepository.FindByDatabaseID(database.ID) + for _, backup := range backups { + backupRepository.DeleteByID(backup.ID) + } + + databases.RemoveTestDatabase(database) + time.Sleep(50 * time.Millisecond) + notifiers.RemoveTestNotifier(notifier) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) + }() + + interval := createTestInterval() + + backupConfig := &backups_config.BackupConfig{ + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: backups_config.RetentionPolicyTypeGFS, + RetentionGfsDays: 3, + RetentionGfsWeeks: 0, + RetentionGfsMonths: 0, + RetentionGfsYears: 0, + StorageID: &storage.ID, + BackupIntervalID: interval.ID, + BackupInterval: interval, + } + _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) + assert.NoError(t, err) + + now := time.Now().UTC() + + // Create 5 backups on 5 different days; only the 3 newest days should be kept + var backupIDs []uuid.UUID + for i := 0; i < 5; i++ { + backup := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add(-time.Duration(4-i) * 24 * time.Hour).Truncate(24 * time.Hour), + } + err = backupRepository.Save(backup) + assert.NoError(t, err) + backupIDs = append(backupIDs, backup.ID) + } + + cleaner := GetBackupCleaner() + err = cleaner.cleanByRetentionPolicy() + assert.NoError(t, err) + + remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) + assert.NoError(t, err) + assert.Equal(t, 3, len(remainingBackups)) + + remainingIDs := make(map[uuid.UUID]bool) + for _, backup := range remainingBackups { + remainingIDs[backup.ID] = true + } + assert.False(t, remainingIDs[backupIDs[0]], "Oldest daily backup should be deleted") + assert.False(t, remainingIDs[backupIDs[1]], "2nd oldest daily backup should be deleted") + assert.True(t, remainingIDs[backupIDs[2]], "3rd backup should remain") + assert.True(t, remainingIDs[backupIDs[3]], "4th backup should remain") + assert.True(t, remainingIDs[backupIDs[4]], "Newest backup should remain") +} + +func Test_CleanByGFS_WithWeeklyAndMonthlySlots_KeepsWiderSpread(t *testing.T) { + router := CreateTestRouter() + owner := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) + storage := storages.CreateTestStorage(workspace.ID) + notifier := notifiers.CreateTestNotifier(workspace.ID) + database := databases.CreateTestDatabase(workspace.ID, storage, notifier) + + defer func() { + backups, _ := backupRepository.FindByDatabaseID(database.ID) + for _, backup := range backups { + backupRepository.DeleteByID(backup.ID) + } + + databases.RemoveTestDatabase(database) + time.Sleep(50 * time.Millisecond) + notifiers.RemoveTestNotifier(notifier) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) + }() + + interval := createTestInterval() + + backupConfig := &backups_config.BackupConfig{ + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: backups_config.RetentionPolicyTypeGFS, + RetentionGfsDays: 2, + RetentionGfsWeeks: 2, + RetentionGfsMonths: 1, + RetentionGfsYears: 0, + StorageID: &storage.ID, + BackupIntervalID: interval.ID, + BackupInterval: interval, + } + _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) + assert.NoError(t, err) + + now := time.Now().UTC() + + // Create one backup per week for 6 weeks (each on Monday of that week) + // GFS should keep: 2 daily (most recent 2 unique days) + 2 weekly + 1 monthly = up to 5 unique + var createdIDs []uuid.UUID + for i := 0; i < 6; i++ { + weekOffset := time.Duration(5-i) * 7 * 24 * time.Hour + backup := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add(-weekOffset).Truncate(24 * time.Hour), + } + err = backupRepository.Save(backup) + assert.NoError(t, err) + createdIDs = append(createdIDs, backup.ID) + } + + cleaner := GetBackupCleaner() + err = cleaner.cleanByRetentionPolicy() + assert.NoError(t, err) + + remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) + assert.NoError(t, err) + + // We should have at most 5 backups kept (2 daily + 2 weekly + 1 monthly, but with overlap possible) + // The exact count depends on how many unique periods are covered + assert.LessOrEqual(t, len(remainingBackups), 5) + assert.GreaterOrEqual(t, len(remainingBackups), 1) + + // The two most recent backups should always be retained (daily slots) + remainingIDs := make(map[uuid.UUID]bool) + for _, backup := range remainingBackups { + remainingIDs[backup.ID] = true + } + assert.True(t, remainingIDs[createdIDs[4]], "Second newest backup should be retained (daily)") + assert.True(t, remainingIDs[createdIDs[5]], "Newest backup should be retained (daily)") } // Test_DeleteBackup_WhenStorageDeleteFails_BackupStillRemovedFromDatabase verifies resilience @@ -579,6 +897,661 @@ func Test_DeleteBackup_WhenStorageDeleteFails_BackupStillRemovedFromDatabase(t * assert.Nil(t, deletedBackup) } +func Test_CleanByGFS_WithHourlySlots_KeepsCorrectBackups(t *testing.T) { + router := CreateTestRouter() + owner := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) + testStorage := storages.CreateTestStorage(workspace.ID) + notifier := notifiers.CreateTestNotifier(workspace.ID) + database := databases.CreateTestDatabase(workspace.ID, testStorage, notifier) + + defer func() { + backups, _ := backupRepository.FindByDatabaseID(database.ID) + for _, backup := range backups { + backupRepository.DeleteByID(backup.ID) + } + + databases.RemoveTestDatabase(database) + time.Sleep(50 * time.Millisecond) + notifiers.RemoveTestNotifier(notifier) + storages.RemoveTestStorage(testStorage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) + }() + + interval := createTestInterval() + + backupConfig := &backups_config.BackupConfig{ + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: backups_config.RetentionPolicyTypeGFS, + RetentionGfsHours: 3, + StorageID: &testStorage.ID, + BackupIntervalID: interval.ID, + BackupInterval: interval, + } + _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) + assert.NoError(t, err) + + now := time.Now().UTC() + + // Create 5 backups spaced 1 hour apart; only the 3 newest hours should be kept + var backupIDs []uuid.UUID + for i := 0; i < 5; i++ { + backup := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: testStorage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add(-time.Duration(4-i) * time.Hour).Truncate(time.Hour), + } + err = backupRepository.Save(backup) + assert.NoError(t, err) + backupIDs = append(backupIDs, backup.ID) + } + + cleaner := GetBackupCleaner() + err = cleaner.cleanByRetentionPolicy() + assert.NoError(t, err) + + remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) + assert.NoError(t, err) + assert.Equal(t, 3, len(remainingBackups)) + + remainingIDs := make(map[uuid.UUID]bool) + for _, backup := range remainingBackups { + remainingIDs[backup.ID] = true + } + assert.False(t, remainingIDs[backupIDs[0]], "Oldest hourly backup should be deleted") + assert.False(t, remainingIDs[backupIDs[1]], "2nd oldest hourly backup should be deleted") + assert.True(t, remainingIDs[backupIDs[2]], "3rd backup should remain") + assert.True(t, remainingIDs[backupIDs[3]], "4th backup should remain") + assert.True(t, remainingIDs[backupIDs[4]], "Newest backup should remain") +} + +func Test_BuildGFSKeepSet(t *testing.T) { + // Fixed reference time: a Wednesday mid-month to avoid boundary edge cases in the default tests. + // Use time.Date for determinism across test runs. + ref := time.Date(2025, 6, 18, 12, 0, 0, 0, time.UTC) // Wednesday, 2025-06-18 + + day := 24 * time.Hour + week := 7 * day + + newBackup := func(createdAt time.Time) *backups_core.Backup { + return &backups_core.Backup{ID: uuid.New(), CreatedAt: createdAt} + } + + // backupsEveryDay returns n backups, newest-first, each 1 day apart. + backupsEveryDay := func(n int) []*backups_core.Backup { + bs := make([]*backups_core.Backup, n) + for i := 0; i < n; i++ { + bs[i] = newBackup(ref.Add(-time.Duration(i) * day)) + } + return bs + } + + // backupsEveryWeek returns n backups, newest-first, each 7 days apart. + backupsEveryWeek := func(n int) []*backups_core.Backup { + bs := make([]*backups_core.Backup, n) + for i := 0; i < n; i++ { + bs[i] = newBackup(ref.Add(-time.Duration(i) * week)) + } + return bs + } + + hour := time.Hour + + // backupsEveryHour returns n backups, newest-first, each 1 hour apart. + backupsEveryHour := func(n int) []*backups_core.Backup { + bs := make([]*backups_core.Backup, n) + for i := 0; i < n; i++ { + bs[i] = newBackup(ref.Add(-time.Duration(i) * hour)) + } + return bs + } + + tests := []struct { + name string + backups []*backups_core.Backup + hours int + days int + weeks int + months int + years int + keptIndices []int // which indices in backups should be kept + deletedRange *[2]int // optional: all indices in [from, to) must be deleted + }{ + { + name: "OnlyHourlySlots_KeepsNewest3Of5", + backups: backupsEveryHour(5), + hours: 3, + keptIndices: []int{0, 1, 2}, + }, + { + name: "SameHourDedup_OnlyNewestKeptForHourlySlot", + backups: []*backups_core.Backup{ + newBackup(ref.Truncate(hour).Add(45 * time.Minute)), + newBackup(ref.Truncate(hour).Add(10 * time.Minute)), + }, + hours: 1, + keptIndices: []int{0}, + }, + { + name: "OnlyDailySlots_KeepsNewest3Of5", + backups: backupsEveryDay(5), + days: 3, + keptIndices: []int{0, 1, 2}, + }, + { + name: "OnlyDailySlots_FewerBackupsThanSlots_KeepsAll", + backups: backupsEveryDay(2), + days: 5, + keptIndices: []int{0, 1}, + }, + { + name: "OnlyWeeklySlots_KeepsNewest2Weeks", + backups: backupsEveryWeek(4), + weeks: 2, + keptIndices: []int{0, 1}, + }, + { + name: "OnlyMonthlySlots_KeepsNewest2Months", + backups: []*backups_core.Backup{ + newBackup(time.Date(2025, 6, 1, 12, 0, 0, 0, time.UTC)), + newBackup(time.Date(2025, 5, 1, 12, 0, 0, 0, time.UTC)), + newBackup(time.Date(2025, 4, 1, 12, 0, 0, 0, time.UTC)), + }, + months: 2, + keptIndices: []int{0, 1}, + }, + { + name: "OnlyYearlySlots_KeepsNewest2Years", + backups: []*backups_core.Backup{ + newBackup(time.Date(2025, 6, 1, 12, 0, 0, 0, time.UTC)), + newBackup(time.Date(2024, 6, 1, 12, 0, 0, 0, time.UTC)), + newBackup(time.Date(2023, 6, 1, 12, 0, 0, 0, time.UTC)), + }, + years: 2, + keptIndices: []int{0, 1}, + }, + { + name: "SameDayDedup_OnlyNewestKeptForDailySlot", + backups: []*backups_core.Backup{ + // Two backups on the same day; newest-first order + newBackup(ref.Truncate(day).Add(10 * time.Hour)), + newBackup(ref.Truncate(day).Add(2 * time.Hour)), + }, + days: 1, + keptIndices: []int{0}, + }, + { + name: "SameWeekDedup_OnlyNewestKeptForWeeklySlot", + backups: []*backups_core.Backup{ + // ref is Wednesday; add Thursday of same week + newBackup(ref.Add(1 * day)), // Thursday same week + newBackup(ref), // Wednesday same week + }, + weeks: 1, + keptIndices: []int{0}, + }, + { + name: "AdditiveSlots_NewestFillsDailyAndWeeklyAndMonthly", + // Newest backup fills daily + weekly + monthly simultaneously + backups: []*backups_core.Backup{ + newBackup(time.Date(2025, 6, 18, 12, 0, 0, 0, time.UTC)), // newest + newBackup(time.Date(2025, 6, 11, 12, 0, 0, 0, time.UTC)), // 1 week ago + newBackup(time.Date(2025, 5, 18, 12, 0, 0, 0, time.UTC)), // 1 month ago + newBackup(time.Date(2025, 4, 18, 12, 0, 0, 0, time.UTC)), // 2 months ago + }, + days: 1, + weeks: 2, + months: 2, + keptIndices: []int{0, 1, 2}, + }, + { + name: "YearBoundary_CorrectlySplitsAcrossYears", + backups: []*backups_core.Backup{ + newBackup(time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC)), + newBackup(time.Date(2024, 12, 31, 12, 0, 0, 0, time.UTC)), + newBackup(time.Date(2024, 6, 1, 12, 0, 0, 0, time.UTC)), + newBackup(time.Date(2023, 6, 1, 12, 0, 0, 0, time.UTC)), + }, + years: 2, + keptIndices: []int{0, 1}, // 2025 and 2024 kept; 2024-06 and 2023 deleted + }, + { + name: "ISOWeekBoundary_Jan1UsesCorrectISOWeek", + // 2025-01-01 is ISO week 1 of 2025; 2024-12-28 is ISO week 52 of 2024 + backups: []*backups_core.Backup{ + newBackup(time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC)), // ISO week 2025-W01 + newBackup(time.Date(2024, 12, 28, 12, 0, 0, 0, time.UTC)), // ISO week 2024-W52 + }, + weeks: 2, + keptIndices: []int{0, 1}, // different ISO weeks β†’ both kept + }, + { + name: "EmptyBackups_ReturnsEmptyKeepSet", + backups: []*backups_core.Backup{}, + hours: 3, + days: 3, + weeks: 2, + months: 1, + years: 1, + keptIndices: []int{}, + }, + { + name: "AllZeroSlots_KeepsNothing", + backups: backupsEveryDay(5), + hours: 0, + days: 0, + weeks: 0, + months: 0, + years: 0, + keptIndices: []int{}, + }, + { + name: "AllSlotsActive_FullCombination", + backups: backupsEveryWeek(12), + days: 2, + weeks: 3, + months: 2, + years: 1, + // 2 daily (indices 0,1) + 3rd weekly slot (index 2) + 2nd monthly slot (index 3 or later). + // Additive slots: newest fills daily+weekly+monthly+yearly; each subsequent week fills another weekly, + // and a backup ~4 weeks later fills the 2nd monthly slot. + keptIndices: []int{0, 1, 2, 3}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + keepSet := buildGFSKeepSet(tc.backups, tc.hours, tc.days, tc.weeks, tc.months, tc.years) + + keptIndexSet := make(map[int]bool, len(tc.keptIndices)) + for _, idx := range tc.keptIndices { + keptIndexSet[idx] = true + } + + for i, backup := range tc.backups { + if keptIndexSet[i] { + assert.True(t, keepSet[backup.ID], "backup at index %d should be kept", i) + } else { + assert.False(t, keepSet[backup.ID], "backup at index %d should be deleted", i) + } + } + }) + } +} + +func Test_CleanByTimePeriod_SkipsRecentBackup_EvenIfOlderThanRetention(t *testing.T) { + router := CreateTestRouter() + owner := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) + storage := storages.CreateTestStorage(workspace.ID) + notifier := notifiers.CreateTestNotifier(workspace.ID) + database := databases.CreateTestDatabase(workspace.ID, storage, notifier) + + defer func() { + backups, _ := backupRepository.FindByDatabaseID(database.ID) + for _, backup := range backups { + backupRepository.DeleteByID(backup.ID) + } + + databases.RemoveTestDatabase(database) + time.Sleep(50 * time.Millisecond) + notifiers.RemoveTestNotifier(notifier) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) + }() + + interval := createTestInterval() + + // Retention period is 1 day β€” any backup older than 1 day should be deleted. + // But the recent backup was created only 30 minutes ago and must be preserved. + backupConfig := &backups_config.BackupConfig{ + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodDay, + StorageID: &storage.ID, + BackupIntervalID: interval.ID, + BackupInterval: interval, + } + _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) + assert.NoError(t, err) + + now := time.Now().UTC() + + oldBackup := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add(-2 * 24 * time.Hour), + } + recentBackup := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add(-30 * time.Minute), + } + + err = backupRepository.Save(oldBackup) + assert.NoError(t, err) + err = backupRepository.Save(recentBackup) + assert.NoError(t, err) + + cleaner := GetBackupCleaner() + err = cleaner.cleanByRetentionPolicy() + assert.NoError(t, err) + + remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) + assert.NoError(t, err) + assert.Equal(t, 1, len(remainingBackups)) + assert.Equal(t, recentBackup.ID, remainingBackups[0].ID) +} + +func Test_CleanByCount_SkipsRecentBackup_EvenIfOverLimit(t *testing.T) { + router := CreateTestRouter() + owner := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) + storage := storages.CreateTestStorage(workspace.ID) + notifier := notifiers.CreateTestNotifier(workspace.ID) + database := databases.CreateTestDatabase(workspace.ID, storage, notifier) + + defer func() { + backups, _ := backupRepository.FindByDatabaseID(database.ID) + for _, backup := range backups { + backupRepository.DeleteByID(backup.ID) + } + + databases.RemoveTestDatabase(database) + time.Sleep(50 * time.Millisecond) + notifiers.RemoveTestNotifier(notifier) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) + }() + + interval := createTestInterval() + + // Retention count is 2 β€” 4 backups exist so 2 should be deleted. + // The oldest backup in the "excess" tail was made 30 min ago β€” it must be preserved. + backupConfig := &backups_config.BackupConfig{ + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: backups_config.RetentionPolicyTypeCount, + RetentionCount: 2, + StorageID: &storage.ID, + BackupIntervalID: interval.ID, + BackupInterval: interval, + } + _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) + assert.NoError(t, err) + + now := time.Now().UTC() + + oldBackup1 := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add(-5 * time.Hour), + } + oldBackup2 := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add(-3 * time.Hour), + } + // This backup is 3rd newest and would normally be deleted β€” but it is recent. + recentExcessBackup := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add(-30 * time.Minute), + } + newestBackup := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add(-10 * time.Minute), + } + + for _, b := range []*backups_core.Backup{oldBackup1, oldBackup2, recentExcessBackup, newestBackup} { + err = backupRepository.Save(b) + assert.NoError(t, err) + } + + cleaner := GetBackupCleaner() + err = cleaner.cleanByRetentionPolicy() + assert.NoError(t, err) + + remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) + assert.NoError(t, err) + + remainingIDs := make(map[uuid.UUID]bool) + for _, backup := range remainingBackups { + remainingIDs[backup.ID] = true + } + + assert.False(t, remainingIDs[oldBackup1.ID], "Oldest non-recent backup should be deleted") + assert.False(t, remainingIDs[oldBackup2.ID], "2nd oldest non-recent backup should be deleted") + assert.True( + t, + remainingIDs[recentExcessBackup.ID], + "Recent backup must be preserved despite being over limit", + ) + assert.True(t, remainingIDs[newestBackup.ID], "Newest backup should be preserved") +} + +func Test_CleanByGFS_SkipsRecentBackup_WhenNotInKeepSet(t *testing.T) { + router := CreateTestRouter() + owner := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) + storage := storages.CreateTestStorage(workspace.ID) + notifier := notifiers.CreateTestNotifier(workspace.ID) + database := databases.CreateTestDatabase(workspace.ID, storage, notifier) + + defer func() { + backups, _ := backupRepository.FindByDatabaseID(database.ID) + for _, backup := range backups { + backupRepository.DeleteByID(backup.ID) + } + + databases.RemoveTestDatabase(database) + time.Sleep(50 * time.Millisecond) + notifiers.RemoveTestNotifier(notifier) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) + }() + + interval := createTestInterval() + + // Keep only 1 daily slot. We create 2 old backups plus two recent backups on today. + // Backups are ordered newest-first, so the 15-min-old backup fills the single daily slot. + // The 30-min-old backup is the same day β†’ not in the GFS keep-set, but it is still recent + // (within grace period) and must be preserved. + backupConfig := &backups_config.BackupConfig{ + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: backups_config.RetentionPolicyTypeGFS, + RetentionGfsDays: 1, + StorageID: &storage.ID, + BackupIntervalID: interval.ID, + BackupInterval: interval, + } + _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) + assert.NoError(t, err) + + now := time.Now().UTC() + + oldBackup1 := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add(-3 * 24 * time.Hour).Truncate(24 * time.Hour), + } + oldBackup2 := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add(-2 * 24 * time.Hour).Truncate(24 * time.Hour), + } + // Newest backup today β€” will fill the single GFS daily slot. + newestTodayBackup := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add(-15 * time.Minute), + } + // Slightly older backup, also today β€” NOT in GFS keep-set (duplicate day), + // but within the 60-minute grace period so it must survive. + recentNotInKeepSet := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 10, + CreatedAt: now.Add(-30 * time.Minute), + } + + for _, b := range []*backups_core.Backup{oldBackup1, oldBackup2, newestTodayBackup, recentNotInKeepSet} { + err = backupRepository.Save(b) + assert.NoError(t, err) + } + + cleaner := GetBackupCleaner() + err = cleaner.cleanByRetentionPolicy() + assert.NoError(t, err) + + remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) + assert.NoError(t, err) + + remainingIDs := make(map[uuid.UUID]bool) + for _, backup := range remainingBackups { + remainingIDs[backup.ID] = true + } + + assert.False(t, remainingIDs[oldBackup1.ID], "Old backup 1 should be deleted by GFS") + assert.False(t, remainingIDs[oldBackup2.ID], "Old backup 2 should be deleted by GFS") + assert.True( + t, + remainingIDs[newestTodayBackup.ID], + "Newest backup fills GFS daily slot and must remain", + ) + assert.True( + t, + remainingIDs[recentNotInKeepSet.ID], + "Recent backup not in keep-set must be preserved by grace period", + ) +} + +func Test_CleanExceededBackups_SkipsRecentBackup_WhenOverTotalSizeLimit(t *testing.T) { + router := CreateTestRouter() + owner := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) + storage := storages.CreateTestStorage(workspace.ID) + notifier := notifiers.CreateTestNotifier(workspace.ID) + database := databases.CreateTestDatabase(workspace.ID, storage, notifier) + + defer func() { + backups, _ := backupRepository.FindByDatabaseID(database.ID) + for _, backup := range backups { + backupRepository.DeleteByID(backup.ID) + } + + databases.RemoveTestDatabase(database) + time.Sleep(50 * time.Millisecond) + notifiers.RemoveTestNotifier(notifier) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) + }() + + interval := createTestInterval() + + // Total size limit is 10 MB. We have two backups of 8 MB each (16 MB total). + // The oldest backup was created 30 minutes ago β€” within the grace period. + // The cleaner must stop and leave both backups intact. + backupConfig := &backups_config.BackupConfig{ + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodForever, + StorageID: &storage.ID, + MaxBackupsTotalSizeMB: 10, + BackupIntervalID: interval.ID, + BackupInterval: interval, + } + _, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig) + assert.NoError(t, err) + + now := time.Now().UTC() + + olderRecentBackup := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 8, + CreatedAt: now.Add(-30 * time.Minute), + } + newerRecentBackup := &backups_core.Backup{ + ID: uuid.New(), + DatabaseID: database.ID, + StorageID: storage.ID, + Status: backups_core.BackupStatusCompleted, + BackupSizeMb: 8, + CreatedAt: now.Add(-10 * time.Minute), + } + + err = backupRepository.Save(olderRecentBackup) + assert.NoError(t, err) + err = backupRepository.Save(newerRecentBackup) + assert.NoError(t, err) + + cleaner := GetBackupCleaner() + err = cleaner.cleanExceededBackups() + assert.NoError(t, err) + + remainingBackups, err := backupRepository.FindByDatabaseID(database.ID) + assert.NoError(t, err) + assert.Equal( + t, + 2, + len(remainingBackups), + "Both recent backups must be preserved even though total size exceeds limit", + ) +} + +// Mock listener for testing +type mockBackupRemoveListener struct { + onBeforeBackupRemove func(*backups_core.Backup) error +} + +func (m *mockBackupRemoveListener) OnBeforeBackupRemove(backup *backups_core.Backup) error { + if m.onBeforeBackupRemove != nil { + return m.onBeforeBackupRemove(backup) + } + + return nil +} + func createTestInterval() *intervals.Interval { timeOfDay := "04:00" interval := &intervals.Interval{ diff --git a/backend/internal/features/backups/backups/backuping/scheduler_test.go b/backend/internal/features/backups/backups/backuping/scheduler_test.go index bbe1fc9..e3a36dd 100644 --- a/backend/internal/features/backups/backups/backuping/scheduler_test.go +++ b/backend/internal/features/backups/backups/backuping/scheduler_test.go @@ -57,7 +57,8 @@ func Test_RunPendingBackups_WhenLastBackupWasYesterday_CreatesNewBackup(t *testi TimeOfDay: &timeOfDay, } backupConfig.IsBackupsEnabled = true - backupConfig.StorePeriod = period.PeriodWeek + backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig.RetentionTimePeriod = period.PeriodWeek backupConfig.Storage = storage backupConfig.StorageID = &storage.ID @@ -126,7 +127,8 @@ func Test_RunPendingBackups_WhenLastBackupWasRecentlyCompleted_SkipsBackup(t *te TimeOfDay: &timeOfDay, } backupConfig.IsBackupsEnabled = true - backupConfig.StorePeriod = period.PeriodWeek + backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig.RetentionTimePeriod = period.PeriodWeek backupConfig.Storage = storage backupConfig.StorageID = &storage.ID @@ -194,7 +196,8 @@ func Test_RunPendingBackups_WhenLastBackupFailedAndRetriesDisabled_SkipsBackup(t TimeOfDay: &timeOfDay, } backupConfig.IsBackupsEnabled = true - backupConfig.StorePeriod = period.PeriodWeek + backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig.RetentionTimePeriod = period.PeriodWeek backupConfig.Storage = storage backupConfig.StorageID = &storage.ID backupConfig.IsRetryIfFailed = false @@ -266,7 +269,8 @@ func Test_RunPendingBackups_WhenLastBackupFailedAndRetriesEnabled_CreatesNewBack TimeOfDay: &timeOfDay, } backupConfig.IsBackupsEnabled = true - backupConfig.StorePeriod = period.PeriodWeek + backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig.RetentionTimePeriod = period.PeriodWeek backupConfig.Storage = storage backupConfig.StorageID = &storage.ID backupConfig.IsRetryIfFailed = true @@ -339,7 +343,8 @@ func Test_RunPendingBackups_WhenFailedBackupsExceedMaxRetries_SkipsBackup(t *tes TimeOfDay: &timeOfDay, } backupConfig.IsBackupsEnabled = true - backupConfig.StorePeriod = period.PeriodWeek + backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig.RetentionTimePeriod = period.PeriodWeek backupConfig.Storage = storage backupConfig.StorageID = &storage.ID backupConfig.IsRetryIfFailed = true @@ -410,7 +415,8 @@ func Test_RunPendingBackups_WhenBackupsDisabled_SkipsBackup(t *testing.T) { TimeOfDay: &timeOfDay, } backupConfig.IsBackupsEnabled = false - backupConfig.StorePeriod = period.PeriodWeek + backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig.RetentionTimePeriod = period.PeriodWeek backupConfig.Storage = storage backupConfig.StorageID = &storage.ID @@ -479,7 +485,8 @@ func Test_CheckDeadNodesAndFailBackups_WhenNodeDies_FailsBackupAndCleansUpRegist TimeOfDay: &timeOfDay, } backupConfig.IsBackupsEnabled = true - backupConfig.StorePeriod = period.PeriodWeek + backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig.RetentionTimePeriod = period.PeriodWeek backupConfig.Storage = storage backupConfig.StorageID = &storage.ID @@ -582,7 +589,8 @@ func Test_OnBackupCompleted_WhenTaskIsNotBackup_SkipsProcessing(t *testing.T) { TimeOfDay: &timeOfDay, } backupConfig.IsBackupsEnabled = true - backupConfig.StorePeriod = period.PeriodWeek + backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig.RetentionTimePeriod = period.PeriodWeek backupConfig.Storage = storage backupConfig.StorageID = &storage.ID @@ -759,7 +767,8 @@ func Test_FailBackupsInProgress_WhenSchedulerStarts_CancelsBackupsAndUpdatesStat TimeOfDay: &timeOfDay, } backupConfig.IsBackupsEnabled = true - backupConfig.StorePeriod = period.PeriodWeek + backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig.RetentionTimePeriod = period.PeriodWeek backupConfig.Storage = storage backupConfig.StorageID = &storage.ID @@ -872,7 +881,8 @@ func Test_StartBackup_WhenBackupCompletes_DecrementsActiveTaskCount(t *testing.T TimeOfDay: &timeOfDay, } backupConfig.IsBackupsEnabled = true - backupConfig.StorePeriod = period.PeriodWeek + backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig.RetentionTimePeriod = period.PeriodWeek backupConfig.Storage = storage backupConfig.StorageID = &storage.ID @@ -975,7 +985,8 @@ func Test_StartBackup_WhenBackupFails_DecrementsActiveTaskCount(t *testing.T) { TimeOfDay: &timeOfDay, } backupConfig.IsBackupsEnabled = true - backupConfig.StorePeriod = period.PeriodWeek + backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig.RetentionTimePeriod = period.PeriodWeek backupConfig.Storage = storage backupConfig.StorageID = &storage.ID @@ -1069,7 +1080,8 @@ func Test_StartBackup_WhenBackupAlreadyInProgress_SkipsNewBackup(t *testing.T) { TimeOfDay: &timeOfDay, } backupConfig.IsBackupsEnabled = true - backupConfig.StorePeriod = period.PeriodWeek + backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig.RetentionTimePeriod = period.PeriodWeek backupConfig.Storage = storage backupConfig.StorageID = &storage.ID @@ -1140,7 +1152,8 @@ func Test_RunPendingBackups_WhenLastBackupFailedWithIsSkipRetry_SkipsBackupEvenW TimeOfDay: &timeOfDay, } backupConfig.IsBackupsEnabled = true - backupConfig.StorePeriod = period.PeriodWeek + backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig.RetentionTimePeriod = period.PeriodWeek backupConfig.Storage = storage backupConfig.StorageID = &storage.ID backupConfig.IsRetryIfFailed = true @@ -1242,7 +1255,8 @@ func Test_StartBackup_When2BackupsStartedForDifferentDatabases_BothUseCasesAreCa TimeOfDay: &timeOfDay, } backupConfig1.IsBackupsEnabled = true - backupConfig1.StorePeriod = period.PeriodWeek + backupConfig1.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig1.RetentionTimePeriod = period.PeriodWeek backupConfig1.Storage = storage backupConfig1.StorageID = &storage.ID @@ -1259,7 +1273,8 @@ func Test_StartBackup_When2BackupsStartedForDifferentDatabases_BothUseCasesAreCa TimeOfDay: &timeOfDay, } backupConfig2.IsBackupsEnabled = true - backupConfig2.StorePeriod = period.PeriodWeek + backupConfig2.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod + backupConfig2.RetentionTimePeriod = period.PeriodWeek backupConfig2.Storage = storage backupConfig2.StorageID = &storage.ID diff --git a/backend/internal/features/backups/config/controller_test.go b/backend/internal/features/backups/config/controller_test.go index 7190c81..1cd29a1 100644 --- a/backend/internal/features/backups/config/controller_test.go +++ b/backend/internal/features/backups/config/controller_test.go @@ -118,9 +118,10 @@ func Test_SaveBackupConfig_PermissionsEnforced(t *testing.T) { timeOfDay := "04:00" request := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -146,7 +147,7 @@ func Test_SaveBackupConfig_PermissionsEnforced(t *testing.T) { if tt.expectSuccess { assert.Equal(t, database.ID, response.DatabaseID) assert.True(t, response.IsBackupsEnabled) - assert.Equal(t, period.PeriodWeek, response.StorePeriod) + assert.Equal(t, period.PeriodWeek, response.RetentionTimePeriod) } else { assert.Contains(t, string(testResp.Body), "insufficient permissions") } @@ -170,9 +171,10 @@ func Test_SaveBackupConfig_WhenUserIsNotWorkspaceMember_ReturnsForbidden(t *test timeOfDay := "04:00" request := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -337,7 +339,7 @@ func Test_GetBackupConfigByDbID_ReturnsDefaultConfigForNewDatabase(t *testing.T) assert.Equal(t, database.ID, response.DatabaseID) assert.False(t, response.IsBackupsEnabled) - assert.Equal(t, plan.MaxStoragePeriod, response.StorePeriod) + assert.Equal(t, plan.MaxStoragePeriod, response.RetentionTimePeriod) assert.Equal(t, plan.MaxBackupSizeMB, response.MaxBackupSizeMB) assert.Equal(t, plan.MaxBackupsTotalSizeMB, response.MaxBackupsTotalSizeMB) assert.True(t, response.IsRetryIfFailed) @@ -411,9 +413,10 @@ func Test_SaveBackupConfig_WhenPlanLimitsAreAdjusted_ValidationEnforced(t *testi // Test 1: Try to save backup config with exceeded backup size limit timeOfDay := "04:00" backupConfigExceededSize := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -440,9 +443,10 @@ func Test_SaveBackupConfig_WhenPlanLimitsAreAdjusted_ValidationEnforced(t *testi // Test 2: Try to save backup config with exceeded total size limit backupConfigExceededTotal := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -469,9 +473,10 @@ func Test_SaveBackupConfig_WhenPlanLimitsAreAdjusted_ValidationEnforced(t *testi // Test 3: Try to save backup config with exceeded storage period limit backupConfigExceededPeriod := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodYear, // Exceeds limit of Month + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodYear, // Exceeds limit of Month BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -498,9 +503,10 @@ func Test_SaveBackupConfig_WhenPlanLimitsAreAdjusted_ValidationEnforced(t *testi // Test 4: Save backup config within all limits - should succeed backupConfigValid := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, // Within Month limit + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, // Within Month limit BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -529,7 +535,7 @@ func Test_SaveBackupConfig_WhenPlanLimitsAreAdjusted_ValidationEnforced(t *testi assert.Equal(t, database.ID, responseValid.DatabaseID) assert.Equal(t, int64(80), responseValid.MaxBackupSizeMB) assert.Equal(t, int64(800), responseValid.MaxBackupsTotalSizeMB) - assert.Equal(t, period.PeriodWeek, responseValid.StorePeriod) + assert.Equal(t, period.PeriodWeek, responseValid.RetentionTimePeriod) } func Test_IsStorageUsing_PermissionsEnforced(t *testing.T) { @@ -618,9 +624,10 @@ func Test_SaveBackupConfig_WithEncryptionNone_ConfigSaved(t *testing.T) { timeOfDay := "04:00" request := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -662,9 +669,10 @@ func Test_SaveBackupConfig_WithEncryptionEncrypted_ConfigSaved(t *testing.T) { timeOfDay := "04:00" request := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -959,9 +967,10 @@ func Test_TransferDatabase_ToNewStorage_DatabaseTransferd(t *testing.T) { timeOfDay := "04:00" backupConfigRequest := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -1045,9 +1054,10 @@ func Test_TransferDatabase_WithExistingStorage_DatabaseAndStorageTransferd(t *te timeOfDay := "04:00" backupConfigRequest := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -1142,9 +1152,10 @@ func Test_TransferDatabase_StorageHasOtherDBs_CannotTransfer(t *testing.T) { timeOfDay := "04:00" backupConfigRequest1 := BackupConfig{ - DatabaseID: database1.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database1.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -1168,9 +1179,10 @@ func Test_TransferDatabase_StorageHasOtherDBs_CannotTransfer(t *testing.T) { ) backupConfigRequest2 := BackupConfig{ - DatabaseID: database2.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database2.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -1244,9 +1256,10 @@ func Test_TransferDatabase_WithNotifiers_NotifiersTransferred(t *testing.T) { timeOfDay := "04:00" backupConfigRequest := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -1364,9 +1377,10 @@ func Test_TransferDatabase_NotifierHasOtherDBs_NotifierSkipped(t *testing.T) { timeOfDay := "04:00" backupConfigRequest := BackupConfig{ - DatabaseID: database1.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database1.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -1486,9 +1500,10 @@ func Test_TransferDatabase_WithMultipleNotifiers_OnlyExclusiveOnesTransferred(t timeOfDay := "04:00" backupConfigRequest := BackupConfig{ - DatabaseID: database1.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database1.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -1585,9 +1600,10 @@ func Test_TransferDatabase_WithTargetNotifiers_NotifiersAssigned(t *testing.T) { timeOfDay := "04:00" backupConfigRequest := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -1665,9 +1681,10 @@ func Test_TransferDatabase_TargetNotifierFromDifferentWorkspace_ReturnsBadReques timeOfDay := "04:00" backupConfigRequest := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -1730,9 +1747,10 @@ func Test_TransferDatabase_TargetStorageFromDifferentWorkspace_ReturnsBadRequest timeOfDay := "04:00" backupConfigRequest := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -1789,9 +1807,10 @@ func Test_SaveBackupConfig_WithSystemStorage_CanBeUsedByAnyDatabase(t *testing.T timeOfDay := "04:00" backupConfigWithRegularStorage := BackupConfig{ - DatabaseID: databaseA.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: databaseA.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -1840,9 +1859,10 @@ func Test_SaveBackupConfig_WithSystemStorage_CanBeUsedByAnyDatabase(t *testing.T assert.True(t, savedSystemStorage.IsSystem) backupConfigWithSystemStorage := BackupConfig{ - DatabaseID: databaseA.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: databaseA.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, diff --git a/backend/internal/features/backups/config/enums.go b/backend/internal/features/backups/config/enums.go index 3baedc3..8dffa27 100644 --- a/backend/internal/features/backups/config/enums.go +++ b/backend/internal/features/backups/config/enums.go @@ -13,3 +13,11 @@ const ( BackupEncryptionNone BackupEncryption = "NONE" BackupEncryptionEncrypted BackupEncryption = "ENCRYPTED" ) + +type RetentionPolicyType string + +const ( + RetentionPolicyTypeTimePeriod RetentionPolicyType = "TIME_PERIOD" + RetentionPolicyTypeCount RetentionPolicyType = "COUNT" + RetentionPolicyTypeGFS RetentionPolicyType = "GFS" +) diff --git a/backend/internal/features/backups/config/model.go b/backend/internal/features/backups/config/model.go index b5567f6..57fbc0c 100644 --- a/backend/internal/features/backups/config/model.go +++ b/backend/internal/features/backups/config/model.go @@ -18,7 +18,15 @@ type BackupConfig struct { IsBackupsEnabled bool `json:"isBackupsEnabled" gorm:"column:is_backups_enabled;type:boolean;not null"` - StorePeriod period.Period `json:"storePeriod" gorm:"column:store_period;type:text;not null"` + RetentionPolicyType RetentionPolicyType `json:"retentionPolicyType" gorm:"column:retention_policy_type;type:text;not null;default:'TIME_PERIOD'"` + RetentionTimePeriod period.TimePeriod `json:"retentionTimePeriod" gorm:"column:retention_time_period;type:text;not null;default:''"` + + RetentionCount int `json:"retentionCount" gorm:"column:retention_count;type:int;not null;default:0"` + RetentionGfsHours int `json:"retentionGfsHours" gorm:"column:retention_gfs_hours;type:int;not null;default:0"` + RetentionGfsDays int `json:"retentionGfsDays" gorm:"column:retention_gfs_days;type:int;not null;default:0"` + RetentionGfsWeeks int `json:"retentionGfsWeeks" gorm:"column:retention_gfs_weeks;type:int;not null;default:0"` + RetentionGfsMonths int `json:"retentionGfsMonths" gorm:"column:retention_gfs_months;type:int;not null;default:0"` + RetentionGfsYears int `json:"retentionGfsYears" gorm:"column:retention_gfs_years;type:int;not null;default:0"` BackupIntervalID uuid.UUID `json:"backupIntervalId" gorm:"column:backup_interval_id;type:uuid;not null"` BackupInterval *intervals.Interval `json:"backupInterval,omitempty" gorm:"foreignKey:BackupIntervalID"` @@ -78,13 +86,12 @@ func (b *BackupConfig) AfterFind(tx *gorm.DB) error { } func (b *BackupConfig) Validate(plan *plans.DatabasePlan) error { - // Backup interval is required either as ID or as object if b.BackupIntervalID == uuid.Nil && b.BackupInterval == nil { return errors.New("backup interval is required") } - if b.StorePeriod == "" { - return errors.New("store period is required") + if err := b.validateRetentionPolicy(plan); err != nil { + return err } if b.IsRetryIfFailed && b.MaxFailedTriesCount <= 0 { @@ -110,22 +117,12 @@ func (b *BackupConfig) Validate(plan *plans.DatabasePlan) error { return errors.New("max backups total size must be non-negative") } - // Validate against plan limits - // Check storage period limit - if plan.MaxStoragePeriod != period.PeriodForever { - if b.StorePeriod.CompareTo(plan.MaxStoragePeriod) > 0 { - return errors.New("storage period exceeds plan limit") - } - } - - // Check max backup size limit (0 in plan means unlimited) if plan.MaxBackupSizeMB > 0 { if b.MaxBackupSizeMB == 0 || b.MaxBackupSizeMB > plan.MaxBackupSizeMB { return errors.New("max backup size exceeds plan limit") } } - // Check max total backups size limit (0 in plan means unlimited) if plan.MaxBackupsTotalSizeMB > 0 { if b.MaxBackupsTotalSizeMB == 0 || b.MaxBackupsTotalSizeMB > plan.MaxBackupsTotalSizeMB { @@ -140,7 +137,14 @@ func (b *BackupConfig) Copy(newDatabaseID uuid.UUID) *BackupConfig { return &BackupConfig{ DatabaseID: newDatabaseID, IsBackupsEnabled: b.IsBackupsEnabled, - StorePeriod: b.StorePeriod, + RetentionPolicyType: b.RetentionPolicyType, + RetentionTimePeriod: b.RetentionTimePeriod, + RetentionCount: b.RetentionCount, + RetentionGfsHours: b.RetentionGfsHours, + RetentionGfsDays: b.RetentionGfsDays, + RetentionGfsWeeks: b.RetentionGfsWeeks, + RetentionGfsMonths: b.RetentionGfsMonths, + RetentionGfsYears: b.RetentionGfsYears, BackupIntervalID: uuid.Nil, BackupInterval: b.BackupInterval.Copy(), StorageID: b.StorageID, @@ -152,3 +156,34 @@ func (b *BackupConfig) Copy(newDatabaseID uuid.UUID) *BackupConfig { MaxBackupsTotalSizeMB: b.MaxBackupsTotalSizeMB, } } + +func (b *BackupConfig) validateRetentionPolicy(plan *plans.DatabasePlan) error { + switch b.RetentionPolicyType { + case RetentionPolicyTypeTimePeriod, "": + if b.RetentionTimePeriod == "" { + return errors.New("retention time period is required") + } + + if plan.MaxStoragePeriod != period.PeriodForever { + if b.RetentionTimePeriod.CompareTo(plan.MaxStoragePeriod) > 0 { + return errors.New("storage period exceeds plan limit") + } + } + + case RetentionPolicyTypeCount: + if b.RetentionCount <= 0 { + return errors.New("retention count must be greater than 0") + } + + case RetentionPolicyTypeGFS: + if b.RetentionGfsHours <= 0 && b.RetentionGfsDays <= 0 && b.RetentionGfsWeeks <= 0 && + b.RetentionGfsMonths <= 0 && b.RetentionGfsYears <= 0 { + return errors.New("at least one GFS retention field must be greater than 0") + } + + default: + return errors.New("invalid retention policy type") + } + + return nil +} diff --git a/backend/internal/features/backups/config/model_test.go b/backend/internal/features/backups/config/model_test.go index 4ae0a54..90c6cff 100644 --- a/backend/internal/features/backups/config/model_test.go +++ b/backend/internal/features/backups/config/model_test.go @@ -11,9 +11,9 @@ import ( "github.com/stretchr/testify/assert" ) -func Test_Validate_WhenStoragePeriodIsWeekAndPlanAllowsMonth_ValidationPasses(t *testing.T) { +func Test_Validate_WhenRetentionTimePeriodIsWeekAndPlanAllowsMonth_ValidationPasses(t *testing.T) { config := createValidBackupConfig() - config.StorePeriod = period.PeriodWeek + config.RetentionTimePeriod = period.PeriodWeek plan := createUnlimitedPlan() plan.MaxStoragePeriod = period.PeriodMonth @@ -22,9 +22,9 @@ func Test_Validate_WhenStoragePeriodIsWeekAndPlanAllowsMonth_ValidationPasses(t assert.NoError(t, err) } -func Test_Validate_WhenStoragePeriodIsYearAndPlanAllowsMonth_ValidationFails(t *testing.T) { +func Test_Validate_WhenRetentionTimePeriodIsYearAndPlanAllowsMonth_ValidationFails(t *testing.T) { config := createValidBackupConfig() - config.StorePeriod = period.PeriodYear + config.RetentionTimePeriod = period.PeriodYear plan := createUnlimitedPlan() plan.MaxStoragePeriod = period.PeriodMonth @@ -33,9 +33,11 @@ func Test_Validate_WhenStoragePeriodIsYearAndPlanAllowsMonth_ValidationFails(t * assert.EqualError(t, err, "storage period exceeds plan limit") } -func Test_Validate_WhenStoragePeriodIsForeverAndPlanAllowsForever_ValidationPasses(t *testing.T) { +func Test_Validate_WhenRetentionTimePeriodIsForeverAndPlanAllowsForever_ValidationPasses( + t *testing.T, +) { config := createValidBackupConfig() - config.StorePeriod = period.PeriodForever + config.RetentionTimePeriod = period.PeriodForever plan := createUnlimitedPlan() plan.MaxStoragePeriod = period.PeriodForever @@ -44,9 +46,9 @@ func Test_Validate_WhenStoragePeriodIsForeverAndPlanAllowsForever_ValidationPass assert.NoError(t, err) } -func Test_Validate_WhenStoragePeriodIsForeverAndPlanAllowsYear_ValidationFails(t *testing.T) { +func Test_Validate_WhenRetentionTimePeriodIsForeverAndPlanAllowsYear_ValidationFails(t *testing.T) { config := createValidBackupConfig() - config.StorePeriod = period.PeriodForever + config.RetentionTimePeriod = period.PeriodForever plan := createUnlimitedPlan() plan.MaxStoragePeriod = period.PeriodYear @@ -55,9 +57,9 @@ func Test_Validate_WhenStoragePeriodIsForeverAndPlanAllowsYear_ValidationFails(t assert.EqualError(t, err, "storage period exceeds plan limit") } -func Test_Validate_WhenStoragePeriodEqualsExactPlanLimit_ValidationPasses(t *testing.T) { +func Test_Validate_WhenRetentionTimePeriodEqualsExactPlanLimit_ValidationPasses(t *testing.T) { config := createValidBackupConfig() - config.StorePeriod = period.PeriodMonth + config.RetentionTimePeriod = period.PeriodMonth plan := createUnlimitedPlan() plan.MaxStoragePeriod = period.PeriodMonth @@ -178,7 +180,7 @@ func Test_Validate_WhenTotalSizeEqualsExactPlanLimit_ValidationPasses(t *testing func Test_Validate_WhenAllLimitsAreUnlimitedInPlan_AnyConfigurationPasses(t *testing.T) { config := createValidBackupConfig() - config.StorePeriod = period.PeriodForever + config.RetentionTimePeriod = period.PeriodForever config.MaxBackupSizeMB = 0 config.MaxBackupsTotalSizeMB = 0 @@ -190,7 +192,7 @@ func Test_Validate_WhenAllLimitsAreUnlimitedInPlan_AnyConfigurationPasses(t *tes func Test_Validate_WhenMultipleLimitsExceeded_ValidationFailsWithFirstError(t *testing.T) { config := createValidBackupConfig() - config.StorePeriod = period.PeriodYear + config.RetentionTimePeriod = period.PeriodYear config.MaxBackupSizeMB = 500 config.MaxBackupsTotalSizeMB = 5000 @@ -249,14 +251,14 @@ func Test_Validate_WhenEncryptionIsInvalid_ValidationFailsRegardlessOfPlan(t *te assert.EqualError(t, err, "encryption must be NONE or ENCRYPTED") } -func Test_Validate_WhenStoragePeriodIsEmpty_ValidationFails(t *testing.T) { +func Test_Validate_WhenRetentionTimePeriodIsEmpty_ValidationFails(t *testing.T) { config := createValidBackupConfig() - config.StorePeriod = "" + config.RetentionTimePeriod = "" plan := createUnlimitedPlan() err := config.Validate(plan) - assert.EqualError(t, err, "store period is required") + assert.EqualError(t, err, "retention time period is required") } func Test_Validate_WhenMaxBackupSizeIsNegative_ValidationFails(t *testing.T) { @@ -282,8 +284,8 @@ func Test_Validate_WhenMaxTotalSizeIsNegative_ValidationFails(t *testing.T) { func Test_Validate_WhenPlanLimitsAreAtBoundary_ValidationWorks(t *testing.T) { tests := []struct { name string - configPeriod period.Period - planPeriod period.Period + configPeriod period.TimePeriod + planPeriod period.TimePeriod configSize int64 planSize int64 configTotal int64 @@ -345,7 +347,7 @@ func Test_Validate_WhenPlanLimitsAreAtBoundary_ValidationWorks(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config := createValidBackupConfig() - config.StorePeriod = tt.configPeriod + config.RetentionTimePeriod = tt.configPeriod config.MaxBackupSizeMB = tt.configSize config.MaxBackupsTotalSizeMB = tt.configTotal @@ -364,12 +366,96 @@ func Test_Validate_WhenPlanLimitsAreAtBoundary_ValidationWorks(t *testing.T) { } } +func Test_Validate_WhenPolicyTypeIsCount_RequiresPositiveCount(t *testing.T) { + config := createValidBackupConfig() + config.RetentionPolicyType = RetentionPolicyTypeCount + config.RetentionCount = 0 + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.EqualError(t, err, "retention count must be greater than 0") +} + +func Test_Validate_WhenPolicyTypeIsCount_WithPositiveCount_ValidationPasses(t *testing.T) { + config := createValidBackupConfig() + config.RetentionPolicyType = RetentionPolicyTypeCount + config.RetentionCount = 10 + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.NoError(t, err) +} + +func Test_Validate_WhenPolicyTypeIsGFS_RequiresAtLeastOneField(t *testing.T) { + config := createValidBackupConfig() + config.RetentionPolicyType = RetentionPolicyTypeGFS + config.RetentionGfsDays = 0 + config.RetentionGfsWeeks = 0 + config.RetentionGfsMonths = 0 + config.RetentionGfsYears = 0 + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.EqualError(t, err, "at least one GFS retention field must be greater than 0") +} + +func Test_Validate_WhenPolicyTypeIsGFS_WithOnlyHours_ValidationPasses(t *testing.T) { + config := createValidBackupConfig() + config.RetentionPolicyType = RetentionPolicyTypeGFS + config.RetentionGfsHours = 24 + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.NoError(t, err) +} + +func Test_Validate_WhenPolicyTypeIsGFS_WithOnlyDays_ValidationPasses(t *testing.T) { + config := createValidBackupConfig() + config.RetentionPolicyType = RetentionPolicyTypeGFS + config.RetentionGfsDays = 7 + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.NoError(t, err) +} + +func Test_Validate_WhenPolicyTypeIsGFS_WithAllFields_ValidationPasses(t *testing.T) { + config := createValidBackupConfig() + config.RetentionPolicyType = RetentionPolicyTypeGFS + config.RetentionGfsHours = 24 + config.RetentionGfsDays = 7 + config.RetentionGfsWeeks = 4 + config.RetentionGfsMonths = 12 + config.RetentionGfsYears = 3 + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.NoError(t, err) +} + +func Test_Validate_WhenPolicyTypeIsInvalid_ValidationFails(t *testing.T) { + config := createValidBackupConfig() + config.RetentionPolicyType = "INVALID" + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.EqualError(t, err, "invalid retention policy type") +} + func createValidBackupConfig() *BackupConfig { intervalID := uuid.New() return &BackupConfig{ DatabaseID: uuid.New(), IsBackupsEnabled: true, - StorePeriod: period.PeriodMonth, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodMonth, BackupIntervalID: intervalID, BackupInterval: &intervals.Interval{ID: intervalID}, SendNotificationsOn: []BackupNotificationType{}, diff --git a/backend/internal/features/backups/config/service.go b/backend/internal/features/backups/config/service.go index 39351d5..090c10c 100644 --- a/backend/internal/features/backups/config/service.go +++ b/backend/internal/features/backups/config/service.go @@ -227,7 +227,8 @@ func (s *BackupConfigService) initializeDefaultConfig( _, err = s.backupConfigRepository.Save(&BackupConfig{ DatabaseID: databaseID, IsBackupsEnabled: false, - StorePeriod: plan.MaxStoragePeriod, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: plan.MaxStoragePeriod, MaxBackupSizeMB: plan.MaxBackupSizeMB, MaxBackupsTotalSizeMB: plan.MaxBackupsTotalSizeMB, BackupInterval: &intervals.Interval{ diff --git a/backend/internal/features/backups/config/storages_test.go b/backend/internal/features/backups/config/storages_test.go index 82153b9..e0ef6c8 100644 --- a/backend/internal/features/backups/config/storages_test.go +++ b/backend/internal/features/backups/config/storages_test.go @@ -35,9 +35,10 @@ func Test_AttachStorageFromSameWorkspace_SuccessfullyAttached(t *testing.T) { timeOfDay := "04:00" request := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -87,9 +88,10 @@ func Test_AttachStorageFromDifferentWorkspace_ReturnsForbidden(t *testing.T) { timeOfDay := "04:00" request := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -131,9 +133,10 @@ func Test_DeleteStorageWithAttachedDatabases_CannotDelete(t *testing.T) { timeOfDay := "04:00" request := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, @@ -191,9 +194,10 @@ func Test_TransferStorageWithAttachedDatabase_CannotTransfer(t *testing.T) { timeOfDay := "04:00" request := BackupConfig{ - DatabaseID: database.ID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodWeek, + DatabaseID: database.ID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodWeek, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, diff --git a/backend/internal/features/backups/config/testing.go b/backend/internal/features/backups/config/testing.go index 883d87e..208b2ee 100644 --- a/backend/internal/features/backups/config/testing.go +++ b/backend/internal/features/backups/config/testing.go @@ -15,9 +15,10 @@ func EnableBackupsForTestDatabase( timeOfDay := "16:00" backupConfig := &BackupConfig{ - DatabaseID: databaseID, - IsBackupsEnabled: true, - StorePeriod: period.PeriodDay, + DatabaseID: databaseID, + IsBackupsEnabled: true, + RetentionPolicyType: RetentionPolicyTypeTimePeriod, + RetentionTimePeriod: period.PeriodDay, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, diff --git a/backend/internal/features/plan/model.go b/backend/internal/features/plan/model.go index 0d2f430..ac14da9 100644 --- a/backend/internal/features/plan/model.go +++ b/backend/internal/features/plan/model.go @@ -9,9 +9,9 @@ import ( type DatabasePlan struct { DatabaseID uuid.UUID `json:"databaseId" gorm:"column:database_id;type:uuid;primaryKey;not null"` - MaxBackupSizeMB int64 `json:"maxBackupSizeMb" gorm:"column:max_backup_size_mb;type:int;not null"` - MaxBackupsTotalSizeMB int64 `json:"maxBackupsTotalSizeMb" gorm:"column:max_backups_total_size_mb;type:int;not null"` - MaxStoragePeriod period.Period `json:"maxStoragePeriod" gorm:"column:max_storage_period;type:text;not null"` + MaxBackupSizeMB int64 `json:"maxBackupSizeMb" gorm:"column:max_backup_size_mb;type:int;not null"` + MaxBackupsTotalSizeMB int64 `json:"maxBackupsTotalSizeMb" gorm:"column:max_backups_total_size_mb;type:int;not null"` + MaxStoragePeriod period.TimePeriod `json:"maxStoragePeriod" gorm:"column:max_storage_period;type:text;not null"` } func (p *DatabasePlan) TableName() string { diff --git a/backend/internal/util/period/enums.go b/backend/internal/util/period/enums.go index b38c261..ac7f199 100644 --- a/backend/internal/util/period/enums.go +++ b/backend/internal/util/period/enums.go @@ -2,24 +2,24 @@ package period import "time" -type Period string +type TimePeriod string const ( - PeriodDay Period = "DAY" - PeriodWeek Period = "WEEK" - PeriodMonth Period = "MONTH" - Period3Month Period = "3_MONTH" - Period6Month Period = "6_MONTH" - PeriodYear Period = "YEAR" - Period2Years Period = "2_YEARS" - Period3Years Period = "3_YEARS" - Period4Years Period = "4_YEARS" - Period5Years Period = "5_YEARS" - PeriodForever Period = "FOREVER" + PeriodDay TimePeriod = "DAY" + PeriodWeek TimePeriod = "WEEK" + PeriodMonth TimePeriod = "MONTH" + Period3Month TimePeriod = "3_MONTH" + Period6Month TimePeriod = "6_MONTH" + PeriodYear TimePeriod = "YEAR" + Period2Years TimePeriod = "2_YEARS" + Period3Years TimePeriod = "3_YEARS" + Period4Years TimePeriod = "4_YEARS" + Period5Years TimePeriod = "5_YEARS" + PeriodForever TimePeriod = "FOREVER" ) // ToDuration converts Period to time.Duration -func (p Period) ToDuration() time.Duration { +func (p TimePeriod) ToDuration() time.Duration { switch p { case PeriodDay: return 24 * time.Hour @@ -55,7 +55,7 @@ func (p Period) ToDuration() time.Duration { // 1 if p > other // // FOREVER is treated as the longest period -func (p Period) CompareTo(other Period) int { +func (p TimePeriod) CompareTo(other TimePeriod) int { if p == other { return 0 } diff --git a/backend/migrations/20260220000000_add_retention_policy.sql b/backend/migrations/20260220000000_add_retention_policy.sql new file mode 100644 index 0000000..4514f85 --- /dev/null +++ b/backend/migrations/20260220000000_add_retention_policy.sql @@ -0,0 +1,38 @@ +-- +goose Up + +ALTER TABLE backup_configs + ADD COLUMN retention_policy_type TEXT NOT NULL DEFAULT 'TIME_PERIOD', + ADD COLUMN retention_time_period TEXT NOT NULL DEFAULT '', + ADD COLUMN retention_count INT NOT NULL DEFAULT 0, + ADD COLUMN retention_gfs_hours INT NOT NULL DEFAULT 0, + ADD COLUMN retention_gfs_days INT NOT NULL DEFAULT 0, + ADD COLUMN retention_gfs_weeks INT NOT NULL DEFAULT 0, + ADD COLUMN retention_gfs_months INT NOT NULL DEFAULT 0, + ADD COLUMN retention_gfs_years INT NOT NULL DEFAULT 0; + +UPDATE backup_configs +SET retention_time_period = store_period; + +ALTER TABLE backup_configs + DROP COLUMN store_period; + +-- +goose Down + +ALTER TABLE backup_configs + ADD COLUMN store_period TEXT NOT NULL DEFAULT 'WEEK'; + +UPDATE backup_configs +SET store_period = CASE + WHEN retention_time_period != '' THEN retention_time_period + ELSE 'WEEK' +END; + +ALTER TABLE backup_configs + DROP COLUMN retention_policy_type, + DROP COLUMN retention_time_period, + DROP COLUMN retention_count, + DROP COLUMN retention_gfs_hours, + DROP COLUMN retention_gfs_days, + DROP COLUMN retention_gfs_weeks, + DROP COLUMN retention_gfs_months, + DROP COLUMN retention_gfs_years; diff --git a/frontend/src/entity/backups/index.ts b/frontend/src/entity/backups/index.ts index ccf195d..96a1e28 100644 --- a/frontend/src/entity/backups/index.ts +++ b/frontend/src/entity/backups/index.ts @@ -5,5 +5,6 @@ export type { Backup } from './model/Backup'; export type { BackupConfig } from './model/BackupConfig'; export { BackupNotificationType } from './model/BackupNotificationType'; export { BackupEncryption } from './model/BackupEncryption'; +export { RetentionPolicyType } from './model/RetentionPolicyType'; export type { TransferDatabaseRequest } from './model/TransferDatabaseRequest'; export type { DatabasePlan } from '../plan'; diff --git a/frontend/src/entity/backups/model/BackupConfig.ts b/frontend/src/entity/backups/model/BackupConfig.ts index 8fcb1f4..84e5fd0 100644 --- a/frontend/src/entity/backups/model/BackupConfig.ts +++ b/frontend/src/entity/backups/model/BackupConfig.ts @@ -3,12 +3,22 @@ import type { Interval } from '../../intervals'; import type { Storage } from '../../storages'; import { BackupEncryption } from './BackupEncryption'; import type { BackupNotificationType } from './BackupNotificationType'; +import type { RetentionPolicyType } from './RetentionPolicyType'; export interface BackupConfig { databaseId: string; isBackupsEnabled: boolean; - storePeriod: Period; + + retentionPolicyType: RetentionPolicyType; + retentionTimePeriod: Period; + retentionCount: number; + retentionGfsHours: number; + retentionGfsDays: number; + retentionGfsWeeks: number; + retentionGfsMonths: number; + retentionGfsYears: number; + backupInterval?: Interval; storage?: Storage; sendNotificationsOn: BackupNotificationType[]; diff --git a/frontend/src/entity/backups/model/RetentionPolicyType.ts b/frontend/src/entity/backups/model/RetentionPolicyType.ts new file mode 100644 index 0000000..3ec950b --- /dev/null +++ b/frontend/src/entity/backups/model/RetentionPolicyType.ts @@ -0,0 +1,5 @@ +export enum RetentionPolicyType { + TimePeriod = 'TIME_PERIOD', + Count = 'COUNT', + GFS = 'GFS', +} diff --git a/frontend/src/features/backups/ui/BackupsComponent.tsx b/frontend/src/features/backups/ui/BackupsComponent.tsx index 318dcd9..cc6b8f0 100644 --- a/frontend/src/features/backups/ui/BackupsComponent.tsx +++ b/frontend/src/features/backups/ui/BackupsComponent.tsx @@ -56,8 +56,7 @@ export const BackupsComponent = ({ database, isCanManageDBs, scrollContainerRef const [showingRestoresBackupId, setShowingRestoresBackupId] = useState(); - const isReloadInProgress = useRef(false); - const isLazyLoadInProgress = useRef(false); + const lastRequestTimeRef = useRef(0); const [downloadingBackupId, setDownloadingBackupId] = useState(); const [cancellingBackupId, setCancellingBackupId] = useState(); @@ -73,85 +72,54 @@ export const BackupsComponent = ({ database, isCanManageDBs, scrollContainerRef }; const loadBackups = async (limit?: number) => { - if (isReloadInProgress.current || isLazyLoadInProgress.current) { - return; - } + const requestTime = Date.now(); + lastRequestTimeRef.current = requestTime; - isReloadInProgress.current = true; + const loadLimit = limit ?? currentLimit; try { - const loadLimit = limit || currentLimit; const response = await backupsApi.getBackups(database.id, loadLimit, 0); + if (lastRequestTimeRef.current !== requestTime) return; + setBackups(response.backups); setTotalBackups(response.total); setHasMore(response.backups.length < response.total); } catch (e) { - alert((e as Error).message); + if (lastRequestTimeRef.current === requestTime) { + alert((e as Error).message); + } } - - isReloadInProgress.current = false; - }; - - const reloadInProgressBackups = async () => { - if (isReloadInProgress.current || isLazyLoadInProgress.current) { - return; - } - - isReloadInProgress.current = true; - - try { - // Fetch only the recent backups that could be in progress - // We fetch a small number (20) to capture recent backups that might be in progress - const response = await backupsApi.getBackups(database.id, 20, 0); - - // Update only the backups that exist in both lists - setBackups((prevBackups) => { - const updatedBackups = [...prevBackups]; - - response.backups.forEach((newBackup) => { - const index = updatedBackups.findIndex((b) => b.id === newBackup.id); - if (index !== -1) { - updatedBackups[index] = newBackup; - } else if (index === -1 && updatedBackups.length < currentLimit) { - // New backup that doesn't exist yet (e.g., just created) - updatedBackups.unshift(newBackup); - } - }); - - return updatedBackups; - }); - - setTotalBackups(response.total); - } catch (e) { - alert((e as Error).message); - } - - isReloadInProgress.current = false; }; const loadMoreBackups = async () => { - if (isLoadingMore || !hasMore || isLazyLoadInProgress.current) { + if (isLoadingMore || !hasMore) { return; } - isLazyLoadInProgress.current = true; setIsLoadingMore(true); + const newLimit = currentLimit + BACKUPS_PAGE_SIZE; + setCurrentLimit(newLimit); + + const requestTime = Date.now(); + lastRequestTimeRef.current = requestTime; + try { - const newLimit = currentLimit + BACKUPS_PAGE_SIZE; const response = await backupsApi.getBackups(database.id, newLimit, 0); + if (lastRequestTimeRef.current !== requestTime) return; + setBackups(response.backups); - setCurrentLimit(newLimit); setTotalBackups(response.total); setHasMore(response.backups.length < response.total); } catch (e) { - alert((e as Error).message); + if (lastRequestTimeRef.current === requestTime) { + alert((e as Error).message); + } } setIsLoadingMore(false); - isLazyLoadInProgress.current = false; }; const makeBackup = async () => { @@ -196,7 +164,7 @@ export const BackupsComponent = ({ database, isCanManageDBs, scrollContainerRef try { await backupsApi.cancelBackup(backupId); - await reloadInProgressBackups(); + await loadBackups(); } catch (e) { alert((e as Error).message); } @@ -220,22 +188,13 @@ export const BackupsComponent = ({ database, isCanManageDBs, scrollContainerRef return () => {}; }, [database]); - // Reload backups that are in progress to update their state useEffect(() => { - const hasInProgressBackups = backups.some( - (backup) => backup.status === BackupStatus.IN_PROGRESS, - ); - - if (!hasInProgressBackups) { - return; - } - - const timeoutId = setTimeout(async () => { - await reloadInProgressBackups(); + const intervalId = setInterval(() => { + loadBackups(); }, 1_000); - return () => clearTimeout(timeoutId); - }, [backups]); + return () => clearInterval(intervalId); + }, [currentLimit]); useEffect(() => { if (downloadingBackupId) { diff --git a/frontend/src/features/backups/ui/EditBackupConfigComponent.tsx b/frontend/src/features/backups/ui/EditBackupConfigComponent.tsx index a16a61a..ff434eb 100644 --- a/frontend/src/features/backups/ui/EditBackupConfigComponent.tsx +++ b/frontend/src/features/backups/ui/EditBackupConfigComponent.tsx @@ -20,6 +20,7 @@ import { type BackupConfig, BackupEncryption, type DatabasePlan, + RetentionPolicyType, backupConfigApi, } from '../../../entity/backups'; import { BackupNotificationType } from '../../../entity/backups/model/BackupNotificationType'; @@ -64,6 +65,15 @@ const weekdayOptions = [ { value: 7, label: 'Sun' }, ]; +const retentionPolicyOptions = [ + { + label: 'GFS (keep last N hourly, daily, weekly, monthly and yearly backups)', + value: RetentionPolicyType.GFS, + }, + { label: 'Time period (last N days)', value: RetentionPolicyType.TimePeriod }, + { label: 'Count (N last backups)', value: RetentionPolicyType.Count }, +]; + export const EditBackupConfigComponent = ({ user, database, @@ -95,6 +105,7 @@ export const EditBackupConfigComponent = ({ (backupConfig?.maxBackupSizeMb ?? 0) > 0 || (backupConfig?.maxBackupsTotalSizeMb ?? 0) > 0; const [isShowAdvanced, setShowAdvanced] = useState(hasAdvancedValues); + const [isShowGfsHint, setShowGfsHint] = useState(false); const timeFormat = useMemo(() => { const is12 = getIs12Hour(); @@ -242,8 +253,15 @@ export const EditBackupConfigComponent = ({ timeOfDay: '00:00', }, storage: undefined, - storePeriod: + retentionPolicyType: RetentionPolicyType.GFS, + retentionTimePeriod: plan.maxStoragePeriod === Period.FOREVER ? Period.THREE_MONTH : plan.maxStoragePeriod, + retentionCount: 100, + retentionGfsHours: 24, + retentionGfsDays: 7, + retentionGfsWeeks: 4, + retentionGfsMonths: 12, + retentionGfsYears: 3, sendNotificationsOn: [BackupNotificationType.BackupFailed], isRetryIfFailed: true, maxFailedTriesCount: 3, @@ -295,10 +313,27 @@ export const EditBackupConfigComponent = ({ ? getLocalDayOfMonth(backupInterval.dayOfMonth, backupInterval.timeOfDay) : backupInterval?.dayOfMonth; - // mandatory-field check + const retentionPolicyType = backupConfig.retentionPolicyType ?? RetentionPolicyType.TimePeriod; + + const isRetentionValid = (() => { + switch (retentionPolicyType) { + case RetentionPolicyType.TimePeriod: + return Boolean(backupConfig.retentionTimePeriod); + case RetentionPolicyType.Count: + return (backupConfig.retentionCount ?? 0) > 0; + case RetentionPolicyType.GFS: + return ( + (backupConfig.retentionGfsDays ?? 0) > 0 || + (backupConfig.retentionGfsWeeks ?? 0) > 0 || + (backupConfig.retentionGfsMonths ?? 0) > 0 || + (backupConfig.retentionGfsYears ?? 0) > 0 + ); + } + })(); + const isAllFieldsFilled = !backupConfig.isBackupsEnabled || - (Boolean(backupConfig.storePeriod) && + (isRetentionValid && Boolean(backupConfig.storage?.id) && Boolean(backupConfig.encryption) && Boolean(backupInterval?.interval) && @@ -467,7 +502,7 @@ export const EditBackupConfigComponent = ({ )} -
+
Storage
updateBackupConfig({ storePeriod: v })} + value={retentionPolicyType} + options={retentionPolicyOptions} size="small" className="w-[200px]" - options={availablePeriods} + popupMatchSelectWidth={false} + onChange={(v) => { + const type = v as RetentionPolicyType; + const updates: Partial = { retentionPolicyType: type }; + + if (type === RetentionPolicyType.GFS) { + updates.retentionGfsHours = 24; + updates.retentionGfsDays = 7; + updates.retentionGfsWeeks = 4; + updates.retentionGfsMonths = 12; + updates.retentionGfsYears = 3; + } else if (type === RetentionPolicyType.Count) { + updates.retentionCount = 100; + } + + updateBackupConfig(updates); + }} /> - - - + {retentionPolicyType === RetentionPolicyType.TimePeriod && ( +
+