mirror of
https://github.com/databasus/databasus.git
synced 2026-04-06 00:32:03 +02:00
Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d4763f26b2 | ||
|
|
0e389ba16b | ||
|
|
594a3294c6 | ||
|
|
4e4a323cf1 | ||
|
|
7d9ecf697b | ||
|
|
755c420157 | ||
|
|
ff73627287 | ||
|
|
9c9ab00ace | ||
|
|
7366e21a1a |
@@ -272,6 +272,10 @@ func runBackgroundTasks(log *slog.Logger) {
|
||||
backuping.GetBackupsScheduler().Run(ctx)
|
||||
})
|
||||
|
||||
go runWithPanicLogging(log, "backup cleaner background service", func() {
|
||||
backuping.GetBackupCleaner().Run(ctx)
|
||||
})
|
||||
|
||||
go runWithPanicLogging(log, "restore background service", func() {
|
||||
restoring.GetRestoresScheduler().Run(ctx)
|
||||
})
|
||||
|
||||
@@ -70,16 +70,18 @@ func (n *BackuperNode) Run(ctx context.Context) {
|
||||
}
|
||||
|
||||
backupHandler := func(backupID uuid.UUID, isCallNotifier bool) {
|
||||
n.MakeBackup(backupID, isCallNotifier)
|
||||
if err := n.backupNodesRegistry.PublishBackupCompletion(n.nodeID, backupID); err != nil {
|
||||
n.logger.Error(
|
||||
"Failed to publish backup completion",
|
||||
"error",
|
||||
err,
|
||||
"backupID",
|
||||
backupID,
|
||||
)
|
||||
}
|
||||
go func() {
|
||||
n.MakeBackup(backupID, isCallNotifier)
|
||||
if err := n.backupNodesRegistry.PublishBackupCompletion(n.nodeID, backupID); err != nil {
|
||||
n.logger.Error(
|
||||
"Failed to publish backup completion",
|
||||
"error",
|
||||
err,
|
||||
"backupID",
|
||||
backupID,
|
||||
)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
err := n.backupNodesRegistry.SubscribeNodeForBackupsAssignment(n.nodeID, backupHandler)
|
||||
@@ -157,21 +159,41 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) {
|
||||
|
||||
start := time.Now().UTC()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
n.backupCancelManager.RegisterTask(backup.ID, cancel)
|
||||
defer n.backupCancelManager.UnregisterTask(backup.ID)
|
||||
|
||||
backupProgressListener := func(
|
||||
completedMBs float64,
|
||||
) {
|
||||
backup.BackupSizeMb = completedMBs
|
||||
backup.BackupDurationMs = time.Since(start).Milliseconds()
|
||||
|
||||
// Check size limit (0 = unlimited)
|
||||
if backupConfig.MaxBackupSizeMB > 0 &&
|
||||
completedMBs > float64(backupConfig.MaxBackupSizeMB) {
|
||||
errMsg := fmt.Sprintf(
|
||||
"backup size (%.2f MB) exceeded maximum allowed size (%d MB)",
|
||||
completedMBs,
|
||||
backupConfig.MaxBackupSizeMB,
|
||||
)
|
||||
|
||||
backup.Status = backups_core.BackupStatusFailed
|
||||
backup.IsSkipRetry = true
|
||||
backup.FailMessage = &errMsg
|
||||
if err := n.backupRepository.Save(backup); err != nil {
|
||||
n.logger.Error("Failed to save backup with size exceeded error", "error", err)
|
||||
}
|
||||
cancel() // Cancel the backup context
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if err := n.backupRepository.Save(backup); err != nil {
|
||||
n.logger.Error("Failed to update backup progress", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
n.backupCancelManager.RegisterTask(backup.ID, cancel)
|
||||
defer n.backupCancelManager.UnregisterTask(backup.ID)
|
||||
|
||||
backupMetadata, err := n.createBackupUseCase.Execute(
|
||||
ctx,
|
||||
backup.ID,
|
||||
@@ -181,6 +203,29 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) {
|
||||
backupProgressListener,
|
||||
)
|
||||
if err != nil {
|
||||
// Check if backup was already marked as failed by progress listener (e.g., size limit exceeded)
|
||||
// If so, skip error handling to avoid overwriting the status
|
||||
currentBackup, fetchErr := n.backupRepository.FindByID(backup.ID)
|
||||
if fetchErr == nil && currentBackup.Status == backups_core.BackupStatusFailed {
|
||||
n.logger.Warn(
|
||||
"Backup already marked as failed by progress listener, skipping error handling",
|
||||
"backupId",
|
||||
backup.ID,
|
||||
"failMessage",
|
||||
*currentBackup.FailMessage,
|
||||
)
|
||||
|
||||
// Still call notification for size limit failures
|
||||
n.SendBackupNotification(
|
||||
backupConfig,
|
||||
currentBackup,
|
||||
backups_config.NotificationBackupFailed,
|
||||
currentBackup.FailMessage,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
errMsg := err.Error()
|
||||
|
||||
// Log detailed error information for debugging
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
package backuping
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
common "databasus-backend/internal/features/backups/backups/common"
|
||||
backups_core "databasus-backend/internal/features/backups/backups/core"
|
||||
backups_config "databasus-backend/internal/features/backups/config"
|
||||
"databasus-backend/internal/features/databases"
|
||||
@@ -18,7 +15,6 @@ import (
|
||||
workspaces_testing "databasus-backend/internal/features/workspaces/testing"
|
||||
cache_utils "databasus-backend/internal/util/cache"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
@@ -158,35 +154,120 @@ func Test_BackupExecuted_NotificationSent(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
type CreateFailedBackupUsecase struct {
|
||||
}
|
||||
func Test_BackupSizeLimits(t *testing.T) {
|
||||
cache_utils.ClearAllCache()
|
||||
user := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
|
||||
router := CreateTestRouter()
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", user, router)
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
notifier := notifiers.CreateTestNotifier(workspace.ID)
|
||||
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
|
||||
|
||||
func (uc *CreateFailedBackupUsecase) Execute(
|
||||
ctx context.Context,
|
||||
backupID uuid.UUID,
|
||||
backupConfig *backups_config.BackupConfig,
|
||||
database *databases.Database,
|
||||
storage *storages.Storage,
|
||||
backupProgressListener func(completedMBs float64),
|
||||
) (*common.BackupMetadata, error) {
|
||||
backupProgressListener(10)
|
||||
return nil, errors.New("backup failed")
|
||||
}
|
||||
defer func() {
|
||||
// cleanup backups first
|
||||
backups, _ := backupRepository.FindByDatabaseID(database.ID)
|
||||
for _, backup := range backups {
|
||||
backupRepository.DeleteByID(backup.ID)
|
||||
}
|
||||
|
||||
type CreateSuccessBackupUsecase struct{}
|
||||
databases.RemoveTestDatabase(database)
|
||||
time.Sleep(50 * time.Millisecond) // Wait for cascading deletes
|
||||
notifiers.RemoveTestNotifier(notifier)
|
||||
storages.RemoveTestStorage(storage.ID)
|
||||
workspaces_testing.RemoveTestWorkspace(workspace, router)
|
||||
}()
|
||||
|
||||
func (uc *CreateSuccessBackupUsecase) Execute(
|
||||
ctx context.Context,
|
||||
backupID uuid.UUID,
|
||||
backupConfig *backups_config.BackupConfig,
|
||||
database *databases.Database,
|
||||
storage *storages.Storage,
|
||||
backupProgressListener func(completedMBs float64),
|
||||
) (*common.BackupMetadata, error) {
|
||||
backupProgressListener(10)
|
||||
return &common.BackupMetadata{
|
||||
EncryptionSalt: nil,
|
||||
EncryptionIV: nil,
|
||||
Encryption: backups_config.BackupEncryptionNone,
|
||||
}, nil
|
||||
t.Run("UnlimitedSize_MaxBackupSizeMBIsZero_BackupCompletes", func(t *testing.T) {
|
||||
// Enable backups with unlimited size (0)
|
||||
backupConfig := backups_config.EnableBackupsForTestDatabase(database.ID, storage)
|
||||
backupConfig.MaxBackupSizeMB = 0 // unlimited
|
||||
backupConfig, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
backuperNode := CreateTestBackuperNode()
|
||||
backuperNode.createBackupUseCase = &CreateLargeBackupUsecase{}
|
||||
|
||||
// Create a backup record
|
||||
backup := &backups_core.Backup{
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusInProgress,
|
||||
CreatedAt: time.Now().UTC(),
|
||||
}
|
||||
err = backupRepository.Save(backup)
|
||||
assert.NoError(t, err)
|
||||
|
||||
backuperNode.MakeBackup(backup.ID, false)
|
||||
|
||||
// Verify backup completed successfully even with large size
|
||||
updatedBackup, err := backupRepository.FindByID(backup.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, backups_core.BackupStatusCompleted, updatedBackup.Status)
|
||||
assert.Equal(t, float64(10000), updatedBackup.BackupSizeMb)
|
||||
assert.Nil(t, updatedBackup.FailMessage)
|
||||
})
|
||||
|
||||
t.Run("SizeExceeded_BackupFailedWithIsSkipRetry", func(t *testing.T) {
|
||||
// Enable backups with 5 MB limit
|
||||
backupConfig := backups_config.EnableBackupsForTestDatabase(database.ID, storage)
|
||||
backupConfig.MaxBackupSizeMB = 5
|
||||
backupConfig, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
backuperNode := CreateTestBackuperNode()
|
||||
backuperNode.createBackupUseCase = &CreateProgressiveBackupUsecase{}
|
||||
|
||||
// Create a backup record
|
||||
backup := &backups_core.Backup{
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusInProgress,
|
||||
CreatedAt: time.Now().UTC(),
|
||||
}
|
||||
err = backupRepository.Save(backup)
|
||||
assert.NoError(t, err)
|
||||
|
||||
backuperNode.MakeBackup(backup.ID, false)
|
||||
|
||||
// Verify backup was marked as failed with IsSkipRetry=true
|
||||
updatedBackup, err := backupRepository.FindByID(backup.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, backups_core.BackupStatusFailed, updatedBackup.Status)
|
||||
assert.True(t, updatedBackup.IsSkipRetry)
|
||||
assert.NotNil(t, updatedBackup.FailMessage)
|
||||
assert.Contains(t, *updatedBackup.FailMessage, "exceeded maximum allowed size")
|
||||
assert.Contains(t, *updatedBackup.FailMessage, "10.00 MB")
|
||||
assert.Contains(t, *updatedBackup.FailMessage, "5 MB")
|
||||
assert.Greater(t, updatedBackup.BackupSizeMb, float64(5))
|
||||
})
|
||||
|
||||
t.Run("SizeWithinLimit_BackupCompletes", func(t *testing.T) {
|
||||
// Enable backups with 100 MB limit
|
||||
backupConfig := backups_config.EnableBackupsForTestDatabase(database.ID, storage)
|
||||
backupConfig.MaxBackupSizeMB = 100
|
||||
backupConfig, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
backuperNode := CreateTestBackuperNode()
|
||||
backuperNode.createBackupUseCase = &CreateMediumBackupUsecase{}
|
||||
|
||||
// Create a backup record
|
||||
backup := &backups_core.Backup{
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusInProgress,
|
||||
CreatedAt: time.Now().UTC(),
|
||||
}
|
||||
err = backupRepository.Save(backup)
|
||||
assert.NoError(t, err)
|
||||
|
||||
backuperNode.MakeBackup(backup.ID, false)
|
||||
|
||||
// Verify backup completed successfully
|
||||
updatedBackup, err := backupRepository.FindByID(backup.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, backups_core.BackupStatusCompleted, updatedBackup.Status)
|
||||
assert.Equal(t, float64(50), updatedBackup.BackupSizeMb)
|
||||
assert.Nil(t, updatedBackup.FailMessage)
|
||||
})
|
||||
}
|
||||
|
||||
242
backend/internal/features/backups/backups/backuping/cleaner.go
Normal file
242
backend/internal/features/backups/backups/backuping/cleaner.go
Normal file
@@ -0,0 +1,242 @@
|
||||
package backuping
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
backups_core "databasus-backend/internal/features/backups/backups/core"
|
||||
backups_config "databasus-backend/internal/features/backups/config"
|
||||
"databasus-backend/internal/features/storages"
|
||||
util_encryption "databasus-backend/internal/util/encryption"
|
||||
"databasus-backend/internal/util/period"
|
||||
)
|
||||
|
||||
const (
|
||||
cleanerTickerInterval = 1 * time.Minute
|
||||
)
|
||||
|
||||
type BackupCleaner struct {
|
||||
backupRepository *backups_core.BackupRepository
|
||||
storageService *storages.StorageService
|
||||
backupConfigService *backups_config.BackupConfigService
|
||||
fieldEncryptor util_encryption.FieldEncryptor
|
||||
logger *slog.Logger
|
||||
backupRemoveListeners []backups_core.BackupRemoveListener
|
||||
|
||||
runOnce sync.Once
|
||||
hasRun atomic.Bool
|
||||
}
|
||||
|
||||
func (c *BackupCleaner) Run(ctx context.Context) {
|
||||
wasAlreadyRun := c.hasRun.Load()
|
||||
|
||||
c.runOnce.Do(func() {
|
||||
c.hasRun.Store(true)
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(cleanerTickerInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
if err := c.cleanOldBackups(); err != nil {
|
||||
c.logger.Error("Failed to clean old backups", "error", err)
|
||||
}
|
||||
|
||||
if err := c.cleanExceededBackups(); err != nil {
|
||||
c.logger.Error("Failed to clean exceeded backups", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if wasAlreadyRun {
|
||||
panic(fmt.Sprintf("%T.Run() called multiple times", c))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BackupCleaner) DeleteBackup(backup *backups_core.Backup) error {
|
||||
for _, listener := range c.backupRemoveListeners {
|
||||
if err := listener.OnBeforeBackupRemove(backup); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
storage, err := c.storageService.GetStorageByID(backup.StorageID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = storage.DeleteFile(c.fieldEncryptor, backup.ID)
|
||||
if err != nil {
|
||||
// we do not return error here, because sometimes clean up performed
|
||||
// before unavailable storage removal or change - therefore we should
|
||||
// proceed even in case of error. It's possible that some S3 or
|
||||
// storage is not available yet, it should not block us
|
||||
c.logger.Error("Failed to delete backup file", "error", err)
|
||||
}
|
||||
|
||||
return c.backupRepository.DeleteByID(backup.ID)
|
||||
}
|
||||
|
||||
func (c *BackupCleaner) AddBackupRemoveListener(listener backups_core.BackupRemoveListener) {
|
||||
c.backupRemoveListeners = append(c.backupRemoveListeners, listener)
|
||||
}
|
||||
|
||||
func (c *BackupCleaner) cleanOldBackups() error {
|
||||
enabledBackupConfigs, err := c.backupConfigService.GetBackupConfigsWithEnabledBackups()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, backupConfig := range enabledBackupConfigs {
|
||||
backupStorePeriod := backupConfig.StorePeriod
|
||||
|
||||
if backupStorePeriod == period.PeriodForever {
|
||||
continue
|
||||
}
|
||||
|
||||
storeDuration := backupStorePeriod.ToDuration()
|
||||
dateBeforeBackupsShouldBeDeleted := time.Now().UTC().Add(-storeDuration)
|
||||
|
||||
oldBackups, err := c.backupRepository.FindBackupsBeforeDate(
|
||||
backupConfig.DatabaseID,
|
||||
dateBeforeBackupsShouldBeDeleted,
|
||||
)
|
||||
if err != nil {
|
||||
c.logger.Error(
|
||||
"Failed to find old backups for database",
|
||||
"databaseId",
|
||||
backupConfig.DatabaseID,
|
||||
"error",
|
||||
err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, backup := range oldBackups {
|
||||
if err := c.DeleteBackup(backup); err != nil {
|
||||
c.logger.Error("Failed to delete old backup", "backupId", backup.ID, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
c.logger.Info(
|
||||
"Deleted old backup",
|
||||
"backupId",
|
||||
backup.ID,
|
||||
"databaseId",
|
||||
backupConfig.DatabaseID,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *BackupCleaner) cleanExceededBackups() error {
|
||||
enabledBackupConfigs, err := c.backupConfigService.GetBackupConfigsWithEnabledBackups()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, backupConfig := range enabledBackupConfigs {
|
||||
if backupConfig.MaxBackupsTotalSizeMB <= 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := c.cleanExceededBackupsForDatabase(
|
||||
backupConfig.DatabaseID,
|
||||
backupConfig.MaxBackupsTotalSizeMB,
|
||||
); err != nil {
|
||||
c.logger.Error(
|
||||
"Failed to clean exceeded backups for database",
|
||||
"databaseId",
|
||||
backupConfig.DatabaseID,
|
||||
"error",
|
||||
err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *BackupCleaner) cleanExceededBackupsForDatabase(
|
||||
databaseID uuid.UUID,
|
||||
limitperDbMB int64,
|
||||
) error {
|
||||
for {
|
||||
backupsTotalSizeMB, err := c.backupRepository.GetTotalSizeByDatabase(databaseID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if backupsTotalSizeMB <= float64(limitperDbMB) {
|
||||
break
|
||||
}
|
||||
|
||||
oldestBackups, err := c.backupRepository.FindOldestByDatabaseExcludingInProgress(
|
||||
databaseID,
|
||||
1,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(oldestBackups) == 0 {
|
||||
c.logger.Warn(
|
||||
"No backups to delete but still over limit",
|
||||
"databaseId",
|
||||
databaseID,
|
||||
"totalSizeMB",
|
||||
backupsTotalSizeMB,
|
||||
"limitMB",
|
||||
limitperDbMB,
|
||||
)
|
||||
break
|
||||
}
|
||||
|
||||
backup := oldestBackups[0]
|
||||
if err := c.DeleteBackup(backup); err != nil {
|
||||
c.logger.Error(
|
||||
"Failed to delete exceeded backup",
|
||||
"backupId",
|
||||
backup.ID,
|
||||
"databaseId",
|
||||
databaseID,
|
||||
"error",
|
||||
err,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
c.logger.Info(
|
||||
"Deleted exceeded backup",
|
||||
"backupId",
|
||||
backup.ID,
|
||||
"databaseId",
|
||||
databaseID,
|
||||
"backupSizeMB",
|
||||
backup.BackupSizeMb,
|
||||
"totalSizeMB",
|
||||
backupsTotalSizeMB,
|
||||
"limitMB",
|
||||
limitperDbMB,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,491 @@
|
||||
package backuping
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
backups_core "databasus-backend/internal/features/backups/backups/core"
|
||||
backups_config "databasus-backend/internal/features/backups/config"
|
||||
"databasus-backend/internal/features/databases"
|
||||
"databasus-backend/internal/features/intervals"
|
||||
"databasus-backend/internal/features/notifiers"
|
||||
"databasus-backend/internal/features/storages"
|
||||
users_enums "databasus-backend/internal/features/users/enums"
|
||||
users_testing "databasus-backend/internal/features/users/testing"
|
||||
workspaces_testing "databasus-backend/internal/features/workspaces/testing"
|
||||
"databasus-backend/internal/storage"
|
||||
"databasus-backend/internal/util/period"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_CleanOldBackups_DeletesBackupsOlderThanStorePeriod(t *testing.T) {
|
||||
router := CreateTestRouter()
|
||||
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
notifier := notifiers.CreateTestNotifier(workspace.ID)
|
||||
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
|
||||
|
||||
// Create backup interval
|
||||
interval := createTestInterval()
|
||||
|
||||
backupConfig := &backups_config.BackupConfig{
|
||||
DatabaseID: database.ID,
|
||||
IsBackupsEnabled: true,
|
||||
StorePeriod: period.PeriodWeek,
|
||||
StorageID: &storage.ID,
|
||||
BackupIntervalID: interval.ID,
|
||||
BackupInterval: interval,
|
||||
}
|
||||
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create backups with different ages
|
||||
now := time.Now().UTC()
|
||||
oldBackup1 := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
BackupSizeMb: 10,
|
||||
CreatedAt: now.Add(-10 * 24 * time.Hour), // 10 days old
|
||||
}
|
||||
oldBackup2 := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
BackupSizeMb: 10,
|
||||
CreatedAt: now.Add(-8 * 24 * time.Hour), // 8 days old
|
||||
}
|
||||
recentBackup := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
BackupSizeMb: 10,
|
||||
CreatedAt: now.Add(-3 * 24 * time.Hour), // 3 days old
|
||||
}
|
||||
|
||||
err = backupRepository.Save(oldBackup1)
|
||||
assert.NoError(t, err)
|
||||
err = backupRepository.Save(oldBackup2)
|
||||
assert.NoError(t, err)
|
||||
err = backupRepository.Save(recentBackup)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Run cleanup
|
||||
cleaner := GetBackupCleaner()
|
||||
err = cleaner.cleanOldBackups()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify old backups deleted, recent backup remains
|
||||
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(remainingBackups))
|
||||
assert.Equal(t, recentBackup.ID, remainingBackups[0].ID)
|
||||
}
|
||||
|
||||
func Test_CleanOldBackups_SkipsDatabaseWithForeverStorePeriod(t *testing.T) {
|
||||
router := CreateTestRouter()
|
||||
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
notifier := notifiers.CreateTestNotifier(workspace.ID)
|
||||
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
|
||||
|
||||
// Create backup interval
|
||||
interval := createTestInterval()
|
||||
|
||||
backupConfig := &backups_config.BackupConfig{
|
||||
DatabaseID: database.ID,
|
||||
IsBackupsEnabled: true,
|
||||
StorePeriod: period.PeriodForever,
|
||||
StorageID: &storage.ID,
|
||||
BackupIntervalID: interval.ID,
|
||||
BackupInterval: interval,
|
||||
}
|
||||
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create very old backup
|
||||
oldBackup := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
BackupSizeMb: 10,
|
||||
CreatedAt: time.Now().UTC().Add(-365 * 24 * time.Hour), // 1 year old
|
||||
}
|
||||
err = backupRepository.Save(oldBackup)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Run cleanup
|
||||
cleaner := GetBackupCleaner()
|
||||
err = cleaner.cleanOldBackups()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify backup still exists
|
||||
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(remainingBackups))
|
||||
assert.Equal(t, oldBackup.ID, remainingBackups[0].ID)
|
||||
}
|
||||
|
||||
func Test_CleanExceededBackups_WhenUnderLimit_NoBackupsDeleted(t *testing.T) {
|
||||
router := CreateTestRouter()
|
||||
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
notifier := notifiers.CreateTestNotifier(workspace.ID)
|
||||
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
|
||||
|
||||
// Create backup interval
|
||||
interval := createTestInterval()
|
||||
|
||||
backupConfig := &backups_config.BackupConfig{
|
||||
DatabaseID: database.ID,
|
||||
IsBackupsEnabled: true,
|
||||
StorePeriod: period.PeriodForever,
|
||||
StorageID: &storage.ID,
|
||||
MaxBackupsTotalSizeMB: 100, // 100 MB limit
|
||||
BackupIntervalID: interval.ID,
|
||||
BackupInterval: interval,
|
||||
}
|
||||
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create 3 backups totaling 50MB (under limit)
|
||||
for i := 0; i < 3; i++ {
|
||||
backup := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
BackupSizeMb: 16.67,
|
||||
CreatedAt: time.Now().UTC().Add(-time.Duration(i) * time.Hour),
|
||||
}
|
||||
err = backupRepository.Save(backup)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// Run cleanup
|
||||
cleaner := GetBackupCleaner()
|
||||
err = cleaner.cleanExceededBackups()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify all backups remain
|
||||
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(remainingBackups))
|
||||
}
|
||||
|
||||
func Test_CleanExceededBackups_WhenOverLimit_DeletesOldestBackups(t *testing.T) {
|
||||
router := CreateTestRouter()
|
||||
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
notifier := notifiers.CreateTestNotifier(workspace.ID)
|
||||
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
|
||||
|
||||
// Create backup interval
|
||||
interval := createTestInterval()
|
||||
|
||||
backupConfig := &backups_config.BackupConfig{
|
||||
DatabaseID: database.ID,
|
||||
IsBackupsEnabled: true,
|
||||
StorePeriod: period.PeriodForever,
|
||||
StorageID: &storage.ID,
|
||||
MaxBackupsTotalSizeMB: 30, // 30 MB limit
|
||||
BackupIntervalID: interval.ID,
|
||||
BackupInterval: interval,
|
||||
}
|
||||
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create 5 backups of 10MB each (total 50MB, over 30MB limit)
|
||||
now := time.Now().UTC()
|
||||
var backupIDs []uuid.UUID
|
||||
for i := 0; i < 5; i++ {
|
||||
backup := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
BackupSizeMb: 10,
|
||||
CreatedAt: now.Add(-time.Duration(4-i) * time.Hour), // Oldest first
|
||||
}
|
||||
err = backupRepository.Save(backup)
|
||||
assert.NoError(t, err)
|
||||
backupIDs = append(backupIDs, backup.ID)
|
||||
}
|
||||
|
||||
// Run cleanup
|
||||
cleaner := GetBackupCleaner()
|
||||
err = cleaner.cleanExceededBackups()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify 2 oldest backups deleted, 3 newest remain
|
||||
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(remainingBackups))
|
||||
|
||||
// Check that the newest 3 backups remain
|
||||
remainingIDs := make(map[uuid.UUID]bool)
|
||||
for _, backup := range remainingBackups {
|
||||
remainingIDs[backup.ID] = true
|
||||
}
|
||||
assert.False(t, remainingIDs[backupIDs[0]]) // Oldest deleted
|
||||
assert.False(t, remainingIDs[backupIDs[1]]) // 2nd oldest deleted
|
||||
assert.True(t, remainingIDs[backupIDs[2]]) // 3rd remains
|
||||
assert.True(t, remainingIDs[backupIDs[3]]) // 4th remains
|
||||
assert.True(t, remainingIDs[backupIDs[4]]) // Newest remains
|
||||
}
|
||||
|
||||
func Test_CleanExceededBackups_SkipsInProgressBackups(t *testing.T) {
|
||||
router := CreateTestRouter()
|
||||
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
notifier := notifiers.CreateTestNotifier(workspace.ID)
|
||||
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
|
||||
|
||||
// Create backup interval
|
||||
interval := createTestInterval()
|
||||
|
||||
backupConfig := &backups_config.BackupConfig{
|
||||
DatabaseID: database.ID,
|
||||
IsBackupsEnabled: true,
|
||||
StorePeriod: period.PeriodForever,
|
||||
StorageID: &storage.ID,
|
||||
MaxBackupsTotalSizeMB: 50, // 50 MB limit
|
||||
BackupIntervalID: interval.ID,
|
||||
BackupInterval: interval,
|
||||
}
|
||||
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Create 3 completed backups of 30MB each
|
||||
completedBackups := make([]*backups_core.Backup, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
backup := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
BackupSizeMb: 30,
|
||||
CreatedAt: now.Add(-time.Duration(3-i) * time.Hour),
|
||||
}
|
||||
err = backupRepository.Save(backup)
|
||||
assert.NoError(t, err)
|
||||
completedBackups[i] = backup
|
||||
}
|
||||
|
||||
// Create 1 in-progress backup (should be excluded from size calculation and deletion)
|
||||
inProgressBackup := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusInProgress,
|
||||
BackupSizeMb: 10,
|
||||
CreatedAt: now,
|
||||
}
|
||||
err = backupRepository.Save(inProgressBackup)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Run cleanup
|
||||
cleaner := GetBackupCleaner()
|
||||
err = cleaner.cleanExceededBackups()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify: only completed backups deleted, in-progress remains
|
||||
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Should have in-progress + 1 completed (total 40MB completed + 10MB in-progress)
|
||||
assert.GreaterOrEqual(t, len(remainingBackups), 2)
|
||||
|
||||
// Verify in-progress backup still exists
|
||||
var inProgressFound bool
|
||||
for _, backup := range remainingBackups {
|
||||
if backup.ID == inProgressBackup.ID {
|
||||
inProgressFound = true
|
||||
assert.Equal(t, backups_core.BackupStatusInProgress, backup.Status)
|
||||
}
|
||||
}
|
||||
assert.True(t, inProgressFound, "In-progress backup should not be deleted")
|
||||
}
|
||||
|
||||
func Test_CleanExceededBackups_WithZeroLimit_SkipsDatabase(t *testing.T) {
|
||||
router := CreateTestRouter()
|
||||
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
notifier := notifiers.CreateTestNotifier(workspace.ID)
|
||||
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
|
||||
|
||||
// Create backup interval
|
||||
interval := createTestInterval()
|
||||
|
||||
backupConfig := &backups_config.BackupConfig{
|
||||
DatabaseID: database.ID,
|
||||
IsBackupsEnabled: true,
|
||||
StorePeriod: period.PeriodForever,
|
||||
StorageID: &storage.ID,
|
||||
MaxBackupsTotalSizeMB: 0, // No size limit
|
||||
BackupIntervalID: interval.ID,
|
||||
BackupInterval: interval,
|
||||
}
|
||||
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create large backups
|
||||
for i := 0; i < 10; i++ {
|
||||
backup := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
BackupSizeMb: 100,
|
||||
CreatedAt: time.Now().UTC().Add(-time.Duration(i) * time.Hour),
|
||||
}
|
||||
err = backupRepository.Save(backup)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// Run cleanup
|
||||
cleaner := GetBackupCleaner()
|
||||
err = cleaner.cleanExceededBackups()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify all backups remain
|
||||
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 10, len(remainingBackups))
|
||||
}
|
||||
|
||||
func Test_GetTotalSizeByDatabase_CalculatesCorrectly(t *testing.T) {
|
||||
router := CreateTestRouter()
|
||||
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
notifier := notifiers.CreateTestNotifier(workspace.ID)
|
||||
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
|
||||
|
||||
// Create completed backups
|
||||
completedBackup1 := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
BackupSizeMb: 10.5,
|
||||
CreatedAt: time.Now().UTC(),
|
||||
}
|
||||
completedBackup2 := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
BackupSizeMb: 20.3,
|
||||
CreatedAt: time.Now().UTC(),
|
||||
}
|
||||
// Create failed backup (should be included)
|
||||
failedBackup := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusFailed,
|
||||
BackupSizeMb: 5.2,
|
||||
CreatedAt: time.Now().UTC(),
|
||||
}
|
||||
// Create in-progress backup (should be excluded)
|
||||
inProgressBackup := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusInProgress,
|
||||
BackupSizeMb: 100,
|
||||
CreatedAt: time.Now().UTC(),
|
||||
}
|
||||
|
||||
err := backupRepository.Save(completedBackup1)
|
||||
assert.NoError(t, err)
|
||||
err = backupRepository.Save(completedBackup2)
|
||||
assert.NoError(t, err)
|
||||
err = backupRepository.Save(failedBackup)
|
||||
assert.NoError(t, err)
|
||||
err = backupRepository.Save(inProgressBackup)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Calculate total size
|
||||
totalSize, err := backupRepository.GetTotalSizeByDatabase(database.ID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Should be 10.5 + 20.3 + 5.2 = 36.0 (excluding in-progress 100)
|
||||
assert.InDelta(t, 36.0, totalSize, 0.1)
|
||||
}
|
||||
|
||||
// Mock listener for testing
|
||||
type mockBackupRemoveListener struct {
|
||||
onBeforeBackupRemove func(*backups_core.Backup) error
|
||||
}
|
||||
|
||||
func (m *mockBackupRemoveListener) OnBeforeBackupRemove(backup *backups_core.Backup) error {
|
||||
if m.onBeforeBackupRemove != nil {
|
||||
return m.onBeforeBackupRemove(backup)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Test_DeleteBackup_WhenStorageDeleteFails_BackupStillRemovedFromDatabase verifies resilience
|
||||
// when storage becomes unavailable. Even if storage.DeleteFile fails (e.g., storage is offline,
|
||||
// credentials changed, or storage was deleted), the backup record should still be removed from
|
||||
// the database. This prevents orphaned backup records when storage is no longer accessible.
|
||||
func Test_DeleteBackup_WhenStorageDeleteFails_BackupStillRemovedFromDatabase(t *testing.T) {
|
||||
router := CreateTestRouter()
|
||||
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
|
||||
testStorage := storages.CreateTestStorage(workspace.ID)
|
||||
notifier := notifiers.CreateTestNotifier(workspace.ID)
|
||||
database := databases.CreateTestDatabase(workspace.ID, testStorage, notifier)
|
||||
|
||||
backup := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: database.ID,
|
||||
StorageID: testStorage.ID,
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
BackupSizeMb: 10,
|
||||
CreatedAt: time.Now().UTC(),
|
||||
}
|
||||
err := backupRepository.Save(backup)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cleaner := GetBackupCleaner()
|
||||
|
||||
err = cleaner.DeleteBackup(backup)
|
||||
assert.NoError(t, err, "DeleteBackup should succeed even when storage file doesn't exist")
|
||||
|
||||
deletedBackup, err := backupRepository.FindByID(backup.ID)
|
||||
assert.Error(t, err, "Backup should not exist in database")
|
||||
assert.Nil(t, deletedBackup)
|
||||
}
|
||||
|
||||
func createTestInterval() *intervals.Interval {
|
||||
timeOfDay := "04:00"
|
||||
interval := &intervals.Interval{
|
||||
Interval: intervals.IntervalDaily,
|
||||
TimeOfDay: &timeOfDay,
|
||||
}
|
||||
|
||||
err := storage.GetDb().Create(interval).Error
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return interval
|
||||
}
|
||||
@@ -24,6 +24,17 @@ var backupRepository = &backups_core.BackupRepository{}
|
||||
|
||||
var taskCancelManager = tasks_cancellation.GetTaskCancelManager()
|
||||
|
||||
var backupCleaner = &BackupCleaner{
|
||||
backupRepository: backupRepository,
|
||||
storageService: storages.GetStorageService(),
|
||||
backupConfigService: backups_config.GetBackupConfigService(),
|
||||
fieldEncryptor: encryption.GetFieldEncryptor(),
|
||||
logger: logger.GetLogger(),
|
||||
backupRemoveListeners: []backups_core.BackupRemoveListener{},
|
||||
runOnce: sync.Once{},
|
||||
hasRun: atomic.Bool{},
|
||||
}
|
||||
|
||||
var backupNodesRegistry = &BackupNodesRegistry{
|
||||
client: cache_utils.GetValkeyClient(),
|
||||
logger: logger.GetLogger(),
|
||||
@@ -59,7 +70,6 @@ var backuperNode = &BackuperNode{
|
||||
var backupsScheduler = &BackupsScheduler{
|
||||
backupRepository: backupRepository,
|
||||
backupConfigService: backups_config.GetBackupConfigService(),
|
||||
storageService: storages.GetStorageService(),
|
||||
taskCancelManager: taskCancelManager,
|
||||
backupNodesRegistry: backupNodesRegistry,
|
||||
lastBackupTime: time.Now().UTC(),
|
||||
@@ -81,3 +91,7 @@ func GetBackuperNode() *BackuperNode {
|
||||
func GetBackupNodesRegistry() *BackupNodesRegistry {
|
||||
return backupNodesRegistry
|
||||
}
|
||||
|
||||
func GetBackupCleaner() *BackupCleaner {
|
||||
return backupCleaner
|
||||
}
|
||||
|
||||
@@ -1,8 +1,18 @@
|
||||
package backuping
|
||||
|
||||
import (
|
||||
"databasus-backend/internal/features/notifiers"
|
||||
"context"
|
||||
"errors"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
common "databasus-backend/internal/features/backups/backups/common"
|
||||
backups_config "databasus-backend/internal/features/backups/config"
|
||||
"databasus-backend/internal/features/databases"
|
||||
"databasus-backend/internal/features/notifiers"
|
||||
"databasus-backend/internal/features/storages"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
@@ -17,3 +27,168 @@ func (m *MockNotificationSender) SendNotification(
|
||||
) {
|
||||
m.Called(notifier, title, message)
|
||||
}
|
||||
|
||||
type CreateFailedBackupUsecase struct{}
|
||||
|
||||
func (uc *CreateFailedBackupUsecase) Execute(
|
||||
ctx context.Context,
|
||||
backupID uuid.UUID,
|
||||
backupConfig *backups_config.BackupConfig,
|
||||
database *databases.Database,
|
||||
storage *storages.Storage,
|
||||
backupProgressListener func(completedMBs float64),
|
||||
) (*common.BackupMetadata, error) {
|
||||
backupProgressListener(10)
|
||||
return nil, errors.New("backup failed")
|
||||
}
|
||||
|
||||
type CreateSuccessBackupUsecase struct{}
|
||||
|
||||
func (uc *CreateSuccessBackupUsecase) Execute(
|
||||
ctx context.Context,
|
||||
backupID uuid.UUID,
|
||||
backupConfig *backups_config.BackupConfig,
|
||||
database *databases.Database,
|
||||
storage *storages.Storage,
|
||||
backupProgressListener func(completedMBs float64),
|
||||
) (*common.BackupMetadata, error) {
|
||||
backupProgressListener(10)
|
||||
return &common.BackupMetadata{
|
||||
EncryptionSalt: nil,
|
||||
EncryptionIV: nil,
|
||||
Encryption: backups_config.BackupEncryptionNone,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateLargeBackupUsecase simulates a large backup (10000 MB)
|
||||
type CreateLargeBackupUsecase struct{}
|
||||
|
||||
func (uc *CreateLargeBackupUsecase) Execute(
|
||||
ctx context.Context,
|
||||
backupID uuid.UUID,
|
||||
backupConfig *backups_config.BackupConfig,
|
||||
database *databases.Database,
|
||||
storage *storages.Storage,
|
||||
backupProgressListener func(completedMBs float64),
|
||||
) (*common.BackupMetadata, error) {
|
||||
backupProgressListener(10000)
|
||||
return &common.BackupMetadata{
|
||||
EncryptionSalt: nil,
|
||||
EncryptionIV: nil,
|
||||
Encryption: backups_config.BackupEncryptionNone,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateProgressiveBackupUsecase simulates progressive size updates that exceed limit
|
||||
type CreateProgressiveBackupUsecase struct{}
|
||||
|
||||
func (uc *CreateProgressiveBackupUsecase) Execute(
|
||||
ctx context.Context,
|
||||
backupID uuid.UUID,
|
||||
backupConfig *backups_config.BackupConfig,
|
||||
database *databases.Database,
|
||||
storage *storages.Storage,
|
||||
backupProgressListener func(completedMBs float64),
|
||||
) (*common.BackupMetadata, error) {
|
||||
// Simulate progressive backup that grows beyond limit
|
||||
backupProgressListener(1)
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
backupProgressListener(3)
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
backupProgressListener(5)
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
backupProgressListener(10) // This exceeds the 5 MB limit
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
// Should not reach here due to cancellation
|
||||
return &common.BackupMetadata{
|
||||
EncryptionSalt: nil,
|
||||
EncryptionIV: nil,
|
||||
Encryption: backups_config.BackupEncryptionNone,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateMediumBackupUsecase simulates a 50 MB backup
|
||||
type CreateMediumBackupUsecase struct{}
|
||||
|
||||
func (uc *CreateMediumBackupUsecase) Execute(
|
||||
ctx context.Context,
|
||||
backupID uuid.UUID,
|
||||
backupConfig *backups_config.BackupConfig,
|
||||
database *databases.Database,
|
||||
storage *storages.Storage,
|
||||
backupProgressListener func(completedMBs float64),
|
||||
) (*common.BackupMetadata, error) {
|
||||
backupProgressListener(50)
|
||||
return &common.BackupMetadata{
|
||||
EncryptionSalt: nil,
|
||||
EncryptionIV: nil,
|
||||
Encryption: backups_config.BackupEncryptionNone,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MockTrackingBackupUsecase tracks backup use case calls for testing parallel execution
|
||||
type MockTrackingBackupUsecase struct {
|
||||
callCount atomic.Int32
|
||||
calledBackupIDs chan uuid.UUID
|
||||
}
|
||||
|
||||
func NewMockTrackingBackupUsecase() *MockTrackingBackupUsecase {
|
||||
return &MockTrackingBackupUsecase{
|
||||
calledBackupIDs: make(chan uuid.UUID, 10),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockTrackingBackupUsecase) Execute(
|
||||
ctx context.Context,
|
||||
backupID uuid.UUID,
|
||||
backupConfig *backups_config.BackupConfig,
|
||||
database *databases.Database,
|
||||
storage *storages.Storage,
|
||||
backupProgressListener func(completedMBs float64),
|
||||
) (*common.BackupMetadata, error) {
|
||||
m.callCount.Add(1)
|
||||
|
||||
// Send backup ID to channel (non-blocking)
|
||||
select {
|
||||
case m.calledBackupIDs <- backupID:
|
||||
default:
|
||||
}
|
||||
|
||||
// Simulate backup work
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
backupProgressListener(10)
|
||||
|
||||
return &common.BackupMetadata{
|
||||
EncryptionSalt: nil,
|
||||
EncryptionIV: nil,
|
||||
Encryption: backups_config.BackupEncryptionNone,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MockTrackingBackupUsecase) GetCallCount() int32 {
|
||||
return m.callCount.Load()
|
||||
}
|
||||
|
||||
func (m *MockTrackingBackupUsecase) GetCalledBackupIDs() []uuid.UUID {
|
||||
ids := []uuid.UUID{}
|
||||
for {
|
||||
select {
|
||||
case id := <-m.calledBackupIDs:
|
||||
ids = append(ids, id)
|
||||
default:
|
||||
return ids
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,10 +13,7 @@ import (
|
||||
"databasus-backend/internal/config"
|
||||
backups_core "databasus-backend/internal/features/backups/backups/core"
|
||||
backups_config "databasus-backend/internal/features/backups/config"
|
||||
"databasus-backend/internal/features/storages"
|
||||
task_cancellation "databasus-backend/internal/features/tasks/cancellation"
|
||||
"databasus-backend/internal/util/encryption"
|
||||
"databasus-backend/internal/util/period"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -28,7 +25,6 @@ const (
|
||||
type BackupsScheduler struct {
|
||||
backupRepository *backups_core.BackupRepository
|
||||
backupConfigService *backups_config.BackupConfigService
|
||||
storageService *storages.StorageService
|
||||
taskCancelManager *task_cancellation.TaskCancelManager
|
||||
backupNodesRegistry *BackupNodesRegistry
|
||||
|
||||
@@ -84,10 +80,6 @@ func (s *BackupsScheduler) Run(ctx context.Context) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
if err := s.cleanOldBackups(); err != nil {
|
||||
s.logger.Error("Failed to clean old backups", "error", err)
|
||||
}
|
||||
|
||||
if err := s.checkDeadNodesAndFailBackups(); err != nil {
|
||||
s.logger.Error("Failed to check dead nodes and fail backups", "error", err)
|
||||
}
|
||||
@@ -166,6 +158,33 @@ func (s *BackupsScheduler) StartBackup(databaseID uuid.UUID, isCallNotifier bool
|
||||
return
|
||||
}
|
||||
|
||||
// Check for existing in-progress backups
|
||||
inProgressBackups, err := s.backupRepository.FindByDatabaseIdAndStatus(
|
||||
databaseID,
|
||||
backups_core.BackupStatusInProgress,
|
||||
)
|
||||
if err != nil {
|
||||
s.logger.Error(
|
||||
"Failed to check for in-progress backups",
|
||||
"databaseId",
|
||||
databaseID,
|
||||
"error",
|
||||
err,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
if len(inProgressBackups) > 0 {
|
||||
s.logger.Warn(
|
||||
"Backup already in progress for database, skipping new backup",
|
||||
"databaseId",
|
||||
databaseID,
|
||||
"existingBackupId",
|
||||
inProgressBackups[0].ID,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
leastBusyNodeID, err := s.calculateLeastBusyNode()
|
||||
if err != nil {
|
||||
s.logger.Error(
|
||||
@@ -266,6 +285,10 @@ func (s *BackupsScheduler) GetRemainedBackupTryCount(lastBackup *backups_core.Ba
|
||||
return 0
|
||||
}
|
||||
|
||||
if lastBackup.IsSkipRetry {
|
||||
return 0
|
||||
}
|
||||
|
||||
backupConfig, err := s.backupConfigService.GetBackupConfigByDbId(lastBackup.DatabaseID)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to get backup config by database ID", "error", err)
|
||||
@@ -298,74 +321,6 @@ func (s *BackupsScheduler) GetRemainedBackupTryCount(lastBackup *backups_core.Ba
|
||||
return maxFailedTriesCount - len(lastFailedBackups)
|
||||
}
|
||||
|
||||
func (s *BackupsScheduler) cleanOldBackups() error {
|
||||
enabledBackupConfigs, err := s.backupConfigService.GetBackupConfigsWithEnabledBackups()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, backupConfig := range enabledBackupConfigs {
|
||||
backupStorePeriod := backupConfig.StorePeriod
|
||||
|
||||
if backupStorePeriod == period.PeriodForever {
|
||||
continue
|
||||
}
|
||||
|
||||
storeDuration := backupStorePeriod.ToDuration()
|
||||
dateBeforeBackupsShouldBeDeleted := time.Now().UTC().Add(-storeDuration)
|
||||
|
||||
oldBackups, err := s.backupRepository.FindBackupsBeforeDate(
|
||||
backupConfig.DatabaseID,
|
||||
dateBeforeBackupsShouldBeDeleted,
|
||||
)
|
||||
if err != nil {
|
||||
s.logger.Error(
|
||||
"Failed to find old backups for database",
|
||||
"databaseId",
|
||||
backupConfig.DatabaseID,
|
||||
"error",
|
||||
err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, backup := range oldBackups {
|
||||
storage, err := s.storageService.GetStorageByID(backup.StorageID)
|
||||
if err != nil {
|
||||
s.logger.Error(
|
||||
"Failed to get storage by ID",
|
||||
"storageId",
|
||||
backup.StorageID,
|
||||
"error",
|
||||
err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
encryptor := encryption.GetFieldEncryptor()
|
||||
err = storage.DeleteFile(encryptor, backup.ID)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to delete backup file", "backupId", backup.ID, "error", err)
|
||||
}
|
||||
|
||||
if err := s.backupRepository.DeleteByID(backup.ID); err != nil {
|
||||
s.logger.Error("Failed to delete old backup", "backupId", backup.ID, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
s.logger.Info(
|
||||
"Deleted old backup",
|
||||
"backupId",
|
||||
backup.ID,
|
||||
"databaseId",
|
||||
backupConfig.DatabaseID,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *BackupsScheduler) runPendingBackups() error {
|
||||
enabledBackupConfigs, err := s.backupConfigService.GetBackupConfigsWithEnabledBackups()
|
||||
if err != nil {
|
||||
|
||||
@@ -1033,3 +1033,289 @@ func Test_StartBackup_WhenBackupFails_DecrementsActiveTaskCount(t *testing.T) {
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
|
||||
func Test_StartBackup_WhenBackupAlreadyInProgress_SkipsNewBackup(t *testing.T) {
|
||||
cache_utils.ClearAllCache()
|
||||
backuperNode := CreateTestBackuperNode()
|
||||
cancel := StartBackuperNodeForTest(t, backuperNode)
|
||||
defer StopBackuperNodeForTest(t, cancel, backuperNode)
|
||||
|
||||
user := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
|
||||
router := CreateTestRouter()
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", user, router)
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
notifier := notifiers.CreateTestNotifier(workspace.ID)
|
||||
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
|
||||
|
||||
defer func() {
|
||||
backups, _ := backupRepository.FindByDatabaseID(database.ID)
|
||||
for _, backup := range backups {
|
||||
backupRepository.DeleteByID(backup.ID)
|
||||
}
|
||||
|
||||
databases.RemoveTestDatabase(database)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
storages.RemoveTestStorage(storage.ID)
|
||||
notifiers.RemoveTestNotifier(notifier)
|
||||
workspaces_testing.RemoveTestWorkspace(workspace, router)
|
||||
}()
|
||||
|
||||
backupConfig, err := backups_config.GetBackupConfigService().GetBackupConfigByDbId(database.ID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
timeOfDay := "04:00"
|
||||
backupConfig.BackupInterval = &intervals.Interval{
|
||||
Interval: intervals.IntervalDaily,
|
||||
TimeOfDay: &timeOfDay,
|
||||
}
|
||||
backupConfig.IsBackupsEnabled = true
|
||||
backupConfig.StorePeriod = period.PeriodWeek
|
||||
backupConfig.Storage = storage
|
||||
backupConfig.StorageID = &storage.ID
|
||||
|
||||
_, err = backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create an in-progress backup manually
|
||||
inProgressBackup := &backups_core.Backup{
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
Status: backups_core.BackupStatusInProgress,
|
||||
BackupSizeMb: 0,
|
||||
CreatedAt: time.Now().UTC(),
|
||||
}
|
||||
err = backupRepository.Save(inProgressBackup)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Try to start a new backup - should be skipped
|
||||
GetBackupsScheduler().StartBackup(database.ID, false)
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
// Verify only 1 backup exists (the original in-progress one)
|
||||
backups, err := backupRepository.FindByDatabaseID(database.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, backups, 1)
|
||||
assert.Equal(t, backups_core.BackupStatusInProgress, backups[0].Status)
|
||||
assert.Equal(t, inProgressBackup.ID, backups[0].ID)
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
|
||||
func Test_RunPendingBackups_WhenLastBackupFailedWithIsSkipRetry_SkipsBackupEvenWithRetriesEnabled(
|
||||
t *testing.T,
|
||||
) {
|
||||
cache_utils.ClearAllCache()
|
||||
backuperNode := CreateTestBackuperNode()
|
||||
cancel := StartBackuperNodeForTest(t, backuperNode)
|
||||
defer StopBackuperNodeForTest(t, cancel, backuperNode)
|
||||
|
||||
user := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
|
||||
router := CreateTestRouter()
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", user, router)
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
notifier := notifiers.CreateTestNotifier(workspace.ID)
|
||||
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
|
||||
|
||||
defer func() {
|
||||
backups, _ := backupRepository.FindByDatabaseID(database.ID)
|
||||
for _, backup := range backups {
|
||||
backupRepository.DeleteByID(backup.ID)
|
||||
}
|
||||
|
||||
databases.RemoveTestDatabase(database)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
storages.RemoveTestStorage(storage.ID)
|
||||
notifiers.RemoveTestNotifier(notifier)
|
||||
workspaces_testing.RemoveTestWorkspace(workspace, router)
|
||||
}()
|
||||
|
||||
// Enable backups with retries enabled and high retry count
|
||||
backupConfig, err := backups_config.GetBackupConfigService().GetBackupConfigByDbId(database.ID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
timeOfDay := "04:00"
|
||||
backupConfig.BackupInterval = &intervals.Interval{
|
||||
Interval: intervals.IntervalDaily,
|
||||
TimeOfDay: &timeOfDay,
|
||||
}
|
||||
backupConfig.IsBackupsEnabled = true
|
||||
backupConfig.StorePeriod = period.PeriodWeek
|
||||
backupConfig.Storage = storage
|
||||
backupConfig.StorageID = &storage.ID
|
||||
backupConfig.IsRetryIfFailed = true
|
||||
backupConfig.MaxFailedTriesCount = 5
|
||||
|
||||
_, err = backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create a failed backup with IsSkipRetry set to true
|
||||
failMessage := "backup failed due to size limit exceeded"
|
||||
backupRepository.Save(&backups_core.Backup{
|
||||
DatabaseID: database.ID,
|
||||
StorageID: storage.ID,
|
||||
|
||||
Status: backups_core.BackupStatusFailed,
|
||||
FailMessage: &failMessage,
|
||||
IsSkipRetry: true,
|
||||
|
||||
CreatedAt: time.Now().UTC().Add(-1 * time.Hour),
|
||||
})
|
||||
|
||||
// Verify GetRemainedBackupTryCount returns 0 even though retries are enabled
|
||||
lastBackup, err := backupRepository.FindLastByDatabaseID(database.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, lastBackup)
|
||||
|
||||
remainedTries := GetBackupsScheduler().GetRemainedBackupTryCount(lastBackup)
|
||||
assert.Equal(t, 0, remainedTries, "Should return 0 tries when IsSkipRetry is true")
|
||||
|
||||
// Run the scheduler
|
||||
GetBackupsScheduler().runPendingBackups()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Verify no new backup was created (still only 1 backup exists)
|
||||
backups, err := backupRepository.FindByDatabaseID(database.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, backups, 1, "No retry should be attempted when IsSkipRetry is true")
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
|
||||
func Test_StartBackup_When2BackupsStartedForDifferentDatabases_BothUseCasesAreCalled(t *testing.T) {
|
||||
cache_utils.ClearAllCache()
|
||||
|
||||
// Create mock tracking use case
|
||||
mockUseCase := NewMockTrackingBackupUsecase()
|
||||
|
||||
// Create BackuperNode with mock use case
|
||||
backuperNode := CreateTestBackuperNodeWithUseCase(mockUseCase)
|
||||
cancel := StartBackuperNodeForTest(t, backuperNode)
|
||||
defer StopBackuperNodeForTest(t, cancel, backuperNode)
|
||||
|
||||
// Create scheduler
|
||||
scheduler := CreateTestScheduler()
|
||||
schedulerCancel := StartSchedulerForTest(t, scheduler)
|
||||
defer schedulerCancel()
|
||||
|
||||
// Setup test data
|
||||
user := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
|
||||
router := CreateTestRouter()
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", user, router)
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
notifier := notifiers.CreateTestNotifier(workspace.ID)
|
||||
|
||||
// Create 2 separate databases
|
||||
database1 := databases.CreateTestDatabase(workspace.ID, storage, notifier)
|
||||
database2 := databases.CreateTestDatabase(workspace.ID, storage, notifier)
|
||||
|
||||
defer func() {
|
||||
// Cleanup backups for database1
|
||||
backups1, _ := backupRepository.FindByDatabaseID(database1.ID)
|
||||
for _, backup := range backups1 {
|
||||
backupRepository.DeleteByID(backup.ID)
|
||||
}
|
||||
|
||||
// Cleanup backups for database2
|
||||
backups2, _ := backupRepository.FindByDatabaseID(database2.ID)
|
||||
for _, backup := range backups2 {
|
||||
backupRepository.DeleteByID(backup.ID)
|
||||
}
|
||||
|
||||
databases.RemoveTestDatabase(database1)
|
||||
databases.RemoveTestDatabase(database2)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
storages.RemoveTestStorage(storage.ID)
|
||||
notifiers.RemoveTestNotifier(notifier)
|
||||
workspaces_testing.RemoveTestWorkspace(workspace, router)
|
||||
}()
|
||||
|
||||
// Enable backups for database1
|
||||
backupConfig1, err := backups_config.GetBackupConfigService().
|
||||
GetBackupConfigByDbId(database1.ID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
timeOfDay := "04:00"
|
||||
backupConfig1.BackupInterval = &intervals.Interval{
|
||||
Interval: intervals.IntervalDaily,
|
||||
TimeOfDay: &timeOfDay,
|
||||
}
|
||||
backupConfig1.IsBackupsEnabled = true
|
||||
backupConfig1.StorePeriod = period.PeriodWeek
|
||||
backupConfig1.Storage = storage
|
||||
backupConfig1.StorageID = &storage.ID
|
||||
|
||||
_, err = backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Enable backups for database2
|
||||
backupConfig2, err := backups_config.GetBackupConfigService().
|
||||
GetBackupConfigByDbId(database2.ID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
backupConfig2.BackupInterval = &intervals.Interval{
|
||||
Interval: intervals.IntervalDaily,
|
||||
TimeOfDay: &timeOfDay,
|
||||
}
|
||||
backupConfig2.IsBackupsEnabled = true
|
||||
backupConfig2.StorePeriod = period.PeriodWeek
|
||||
backupConfig2.Storage = storage
|
||||
backupConfig2.StorageID = &storage.ID
|
||||
|
||||
_, err = backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Start 2 backups simultaneously
|
||||
t.Log("Starting backup for database1")
|
||||
scheduler.StartBackup(database1.ID, false)
|
||||
|
||||
t.Log("Starting backup for database2")
|
||||
scheduler.StartBackup(database2.ID, false)
|
||||
|
||||
// Wait up to 10 seconds for both backups to complete
|
||||
t.Log("Waiting for both backups to complete...")
|
||||
|
||||
success := assert.Eventually(t, func() bool {
|
||||
callCount := mockUseCase.GetCallCount()
|
||||
t.Logf("Current call count: %d/2", callCount)
|
||||
return callCount == 2
|
||||
}, 10*time.Second, 200*time.Millisecond, "Both use cases should be called within 10 seconds")
|
||||
|
||||
if !success {
|
||||
t.Logf("Test failed: Only %d out of 2 use cases were called", mockUseCase.GetCallCount())
|
||||
}
|
||||
|
||||
// Verify both backup IDs were received
|
||||
calledBackupIDs := mockUseCase.GetCalledBackupIDs()
|
||||
t.Logf("Called backup IDs: %v", calledBackupIDs)
|
||||
assert.Len(t, calledBackupIDs, 2, "Both backup IDs should be tracked")
|
||||
|
||||
// Verify both backups exist in repository and are completed
|
||||
backups1, err := backupRepository.FindByDatabaseID(database1.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, backups1, 1, "Database1 should have 1 backup")
|
||||
if len(backups1) > 0 {
|
||||
t.Logf("Database1 backup status: %s", backups1[0].Status)
|
||||
}
|
||||
|
||||
backups2, err := backupRepository.FindByDatabaseID(database2.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, backups2, 1, "Database2 should have 1 backup")
|
||||
if len(backups2) > 0 {
|
||||
t.Logf("Database2 backup status: %s", backups2[0].Status)
|
||||
}
|
||||
|
||||
// Verify both backups completed successfully
|
||||
if len(backups1) > 0 {
|
||||
assert.Equal(t, backups_core.BackupStatusCompleted, backups1[0].Status,
|
||||
"Database1 backup should be completed")
|
||||
}
|
||||
|
||||
if len(backups2) > 0 {
|
||||
assert.Equal(t, backups_core.BackupStatusCompleted, backups2[0].Status,
|
||||
"Database2 backup should be completed")
|
||||
}
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
|
||||
@@ -55,19 +55,38 @@ func CreateTestBackuperNode() *BackuperNode {
|
||||
}
|
||||
}
|
||||
|
||||
func CreateTestBackuperNodeWithUseCase(useCase backups_core.CreateBackupUsecase) *BackuperNode {
|
||||
return &BackuperNode{
|
||||
databaseService: databases.GetDatabaseService(),
|
||||
fieldEncryptor: encryption.GetFieldEncryptor(),
|
||||
workspaceService: workspaces_services.GetWorkspaceService(),
|
||||
backupRepository: backupRepository,
|
||||
backupConfigService: backups_config.GetBackupConfigService(),
|
||||
storageService: storages.GetStorageService(),
|
||||
notificationSender: notifiers.GetNotifierService(),
|
||||
backupCancelManager: taskCancelManager,
|
||||
backupNodesRegistry: backupNodesRegistry,
|
||||
logger: logger.GetLogger(),
|
||||
createBackupUseCase: useCase,
|
||||
nodeID: uuid.New(),
|
||||
lastHeartbeat: time.Time{},
|
||||
runOnce: sync.Once{},
|
||||
hasRun: atomic.Bool{},
|
||||
}
|
||||
}
|
||||
|
||||
func CreateTestScheduler() *BackupsScheduler {
|
||||
return &BackupsScheduler{
|
||||
backupRepository,
|
||||
backups_config.GetBackupConfigService(),
|
||||
storages.GetStorageService(),
|
||||
taskCancelManager,
|
||||
backupNodesRegistry,
|
||||
time.Now().UTC(),
|
||||
logger.GetLogger(),
|
||||
make(map[uuid.UUID]BackupToNodeRelation),
|
||||
CreateTestBackuperNode(),
|
||||
sync.Once{},
|
||||
atomic.Bool{},
|
||||
backupRepository: backupRepository,
|
||||
backupConfigService: backups_config.GetBackupConfigService(),
|
||||
taskCancelManager: taskCancelManager,
|
||||
backupNodesRegistry: backupNodesRegistry,
|
||||
lastBackupTime: time.Now().UTC(),
|
||||
logger: logger.GetLogger(),
|
||||
backupToNodeRelations: make(map[uuid.UUID]BackupToNodeRelation),
|
||||
backuperNode: CreateTestBackuperNode(),
|
||||
runOnce: sync.Once{},
|
||||
hasRun: atomic.Bool{},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ type Backup struct {
|
||||
|
||||
Status BackupStatus `json:"status" gorm:"column:status;not null"`
|
||||
FailMessage *string `json:"failMessage" gorm:"column:fail_message"`
|
||||
IsSkipRetry bool `json:"isSkipRetry" gorm:"column:is_skip_retry;type:boolean;not null"`
|
||||
|
||||
BackupSizeMb float64 `json:"backupSizeMb" gorm:"column:backup_size_mb;default:0"`
|
||||
|
||||
|
||||
@@ -212,3 +212,36 @@ func (r *BackupRepository) CountByDatabaseID(databaseID uuid.UUID) (int64, error
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (r *BackupRepository) GetTotalSizeByDatabase(databaseID uuid.UUID) (float64, error) {
|
||||
var totalSize float64
|
||||
|
||||
if err := storage.
|
||||
GetDb().
|
||||
Model(&Backup{}).
|
||||
Select("COALESCE(SUM(backup_size_mb), 0)").
|
||||
Where("database_id = ? AND status != ?", databaseID, BackupStatusInProgress).
|
||||
Scan(&totalSize).Error; err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return totalSize, nil
|
||||
}
|
||||
|
||||
func (r *BackupRepository) FindOldestByDatabaseExcludingInProgress(
|
||||
databaseID uuid.UUID,
|
||||
limit int,
|
||||
) ([]*Backup, error) {
|
||||
var backups []*Backup
|
||||
|
||||
if err := storage.
|
||||
GetDb().
|
||||
Where("database_id = ? AND status != ?", databaseID, BackupStatusInProgress).
|
||||
Order("created_at ASC").
|
||||
Limit(limit).
|
||||
Find(&backups).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return backups, nil
|
||||
}
|
||||
|
||||
@@ -25,22 +25,23 @@ var backupRepository = &backups_core.BackupRepository{}
|
||||
var taskCancelManager = task_cancellation.GetTaskCancelManager()
|
||||
|
||||
var backupService = &BackupService{
|
||||
databaseService: databases.GetDatabaseService(),
|
||||
storageService: storages.GetStorageService(),
|
||||
backupRepository: backupRepository,
|
||||
notifierService: notifiers.GetNotifierService(),
|
||||
notificationSender: notifiers.GetNotifierService(),
|
||||
backupConfigService: backups_config.GetBackupConfigService(),
|
||||
secretKeyService: encryption_secrets.GetSecretKeyService(),
|
||||
fieldEncryptor: encryption.GetFieldEncryptor(),
|
||||
createBackupUseCase: usecases.GetCreateBackupUsecase(),
|
||||
logger: logger.GetLogger(),
|
||||
backupRemoveListeners: []backups_core.BackupRemoveListener{},
|
||||
workspaceService: workspaces_services.GetWorkspaceService(),
|
||||
auditLogService: audit_logs.GetAuditLogService(),
|
||||
taskCancelManager: taskCancelManager,
|
||||
downloadTokenService: backups_download.GetDownloadTokenService(),
|
||||
backupSchedulerService: backuping.GetBackupsScheduler(),
|
||||
databases.GetDatabaseService(),
|
||||
storages.GetStorageService(),
|
||||
backupRepository,
|
||||
notifiers.GetNotifierService(),
|
||||
notifiers.GetNotifierService(),
|
||||
backups_config.GetBackupConfigService(),
|
||||
encryption_secrets.GetSecretKeyService(),
|
||||
encryption.GetFieldEncryptor(),
|
||||
usecases.GetCreateBackupUsecase(),
|
||||
logger.GetLogger(),
|
||||
[]backups_core.BackupRemoveListener{},
|
||||
workspaces_services.GetWorkspaceService(),
|
||||
audit_logs.GetAuditLogService(),
|
||||
taskCancelManager,
|
||||
backups_download.GetDownloadTokenService(),
|
||||
backuping.GetBackupsScheduler(),
|
||||
backuping.GetBackupCleaner(),
|
||||
}
|
||||
|
||||
var backupController = &BackupController{
|
||||
|
||||
@@ -46,6 +46,7 @@ type BackupService struct {
|
||||
taskCancelManager *task_cancellation.TaskCancelManager
|
||||
downloadTokenService *backups_download.DownloadTokenService
|
||||
backupSchedulerService *backuping.BackupsScheduler
|
||||
backupCleaner *backuping.BackupCleaner
|
||||
}
|
||||
|
||||
func (s *BackupService) AddBackupRemoveListener(listener backups_core.BackupRemoveListener) {
|
||||
@@ -189,7 +190,7 @@ func (s *BackupService) DeleteBackup(
|
||||
database.WorkspaceID,
|
||||
)
|
||||
|
||||
return s.deleteBackup(backup)
|
||||
return s.backupCleaner.DeleteBackup(backup)
|
||||
}
|
||||
|
||||
func (s *BackupService) GetBackup(backupID uuid.UUID) (*backups_core.Backup, error) {
|
||||
@@ -292,29 +293,6 @@ func (s *BackupService) GetBackupFile(
|
||||
return reader, backup, database, nil
|
||||
}
|
||||
|
||||
func (s *BackupService) deleteBackup(backup *backups_core.Backup) error {
|
||||
for _, listener := range s.backupRemoveListeners {
|
||||
if err := listener.OnBeforeBackupRemove(backup); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
storage, err := s.storageService.GetStorageByID(backup.StorageID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = storage.DeleteFile(s.fieldEncryptor, backup.ID)
|
||||
if err != nil {
|
||||
// we do not return error here, because sometimes clean up performed
|
||||
// before unavailable storage removal or change - therefore we should
|
||||
// proceed even in case of error
|
||||
s.logger.Error("Failed to delete backup file", "error", err)
|
||||
}
|
||||
|
||||
return s.backupRepository.DeleteByID(backup.ID)
|
||||
}
|
||||
|
||||
func (s *BackupService) deleteDbBackups(databaseID uuid.UUID) error {
|
||||
dbBackupsInProgress, err := s.backupRepository.FindByDatabaseIdAndStatus(
|
||||
databaseID,
|
||||
@@ -336,7 +314,7 @@ func (s *BackupService) deleteDbBackups(databaseID uuid.UUID) error {
|
||||
}
|
||||
|
||||
for _, dbBackup := range dbBackups {
|
||||
err := s.deleteBackup(dbBackup)
|
||||
err := s.backupCleaner.DeleteBackup(dbBackup)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -31,6 +31,11 @@ type BackupConfig struct {
|
||||
MaxFailedTriesCount int `json:"maxFailedTriesCount" gorm:"column:max_failed_tries_count;type:int;not null"`
|
||||
|
||||
Encryption BackupEncryption `json:"encryption" gorm:"column:encryption;type:text;not null;default:'NONE'"`
|
||||
|
||||
// MaxBackupSizeMB limits individual backup size. 0 = unlimited.
|
||||
MaxBackupSizeMB int64 `json:"maxBackupSizeMb" gorm:"column:max_backup_size_mb;type:int;not null"`
|
||||
// MaxBackupsTotalSizeMB limits total size of all backups. 0 = unlimited.
|
||||
MaxBackupsTotalSizeMB int64 `json:"maxBackupsTotalSizeMb" gorm:"column:max_backups_total_size_mb;type:int;not null"`
|
||||
}
|
||||
|
||||
func (h *BackupConfig) TableName() string {
|
||||
@@ -89,20 +94,30 @@ func (b *BackupConfig) Validate() error {
|
||||
return errors.New("encryption must be NONE or ENCRYPTED")
|
||||
}
|
||||
|
||||
if b.MaxBackupSizeMB < 0 {
|
||||
return errors.New("max backup size must be non-negative")
|
||||
}
|
||||
|
||||
if b.MaxBackupsTotalSizeMB < 0 {
|
||||
return errors.New("max backups total size must be non-negative")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BackupConfig) Copy(newDatabaseID uuid.UUID) *BackupConfig {
|
||||
return &BackupConfig{
|
||||
DatabaseID: newDatabaseID,
|
||||
IsBackupsEnabled: b.IsBackupsEnabled,
|
||||
StorePeriod: b.StorePeriod,
|
||||
BackupIntervalID: uuid.Nil,
|
||||
BackupInterval: b.BackupInterval.Copy(),
|
||||
StorageID: b.StorageID,
|
||||
SendNotificationsOn: b.SendNotificationsOn,
|
||||
IsRetryIfFailed: b.IsRetryIfFailed,
|
||||
MaxFailedTriesCount: b.MaxFailedTriesCount,
|
||||
Encryption: b.Encryption,
|
||||
DatabaseID: newDatabaseID,
|
||||
IsBackupsEnabled: b.IsBackupsEnabled,
|
||||
StorePeriod: b.StorePeriod,
|
||||
BackupIntervalID: uuid.Nil,
|
||||
BackupInterval: b.BackupInterval.Copy(),
|
||||
StorageID: b.StorageID,
|
||||
SendNotificationsOn: b.SendNotificationsOn,
|
||||
IsRetryIfFailed: b.IsRetryIfFailed,
|
||||
MaxFailedTriesCount: b.MaxFailedTriesCount,
|
||||
Encryption: b.Encryption,
|
||||
MaxBackupSizeMB: b.MaxBackupSizeMB,
|
||||
MaxBackupsTotalSizeMB: b.MaxBackupsTotalSizeMB,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -515,9 +515,13 @@ func detectPrivileges(ctx context.Context, db *sql.DB, database string) (string,
|
||||
hasProcess := false
|
||||
hasAllPrivileges := false
|
||||
|
||||
// Escape underscores to match MariaDB's grant output format
|
||||
// MariaDB escapes _ as \_ in SHOW GRANTS output
|
||||
// Pattern matches either literal _ or escaped \_
|
||||
escapedDbName := strings.ReplaceAll(regexp.QuoteMeta(database), "_", `(_|\\_)`)
|
||||
dbPatternStr := fmt.Sprintf(
|
||||
`(?i)ON\s+[\x60'"]?%s[\x60'"]?\s*\.\s*\*`,
|
||||
regexp.QuoteMeta(database),
|
||||
escapedDbName,
|
||||
)
|
||||
dbPattern := regexp.MustCompile(dbPatternStr)
|
||||
globalPattern := regexp.MustCompile(`(?i)ON\s+\*\s*\.\s*\*`)
|
||||
|
||||
@@ -694,6 +694,115 @@ func Test_TestConnection_DatabaseWithUnderscores_Success(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func Test_TestConnection_DatabaseWithUnderscoresAndAllPrivileges_Success(t *testing.T) {
|
||||
env := config.GetEnv()
|
||||
cases := []struct {
|
||||
name string
|
||||
version tools.MariadbVersion
|
||||
port string
|
||||
}{
|
||||
{"MariaDB 5.5", tools.MariadbVersion55, env.TestMariadb55Port},
|
||||
{"MariaDB 10.1", tools.MariadbVersion101, env.TestMariadb101Port},
|
||||
{"MariaDB 10.2", tools.MariadbVersion102, env.TestMariadb102Port},
|
||||
{"MariaDB 10.3", tools.MariadbVersion103, env.TestMariadb103Port},
|
||||
{"MariaDB 10.4", tools.MariadbVersion104, env.TestMariadb104Port},
|
||||
{"MariaDB 10.5", tools.MariadbVersion105, env.TestMariadb105Port},
|
||||
{"MariaDB 10.6", tools.MariadbVersion106, env.TestMariadb106Port},
|
||||
{"MariaDB 10.11", tools.MariadbVersion1011, env.TestMariadb1011Port},
|
||||
{"MariaDB 11.4", tools.MariadbVersion114, env.TestMariadb114Port},
|
||||
{"MariaDB 11.8", tools.MariadbVersion118, env.TestMariadb118Port},
|
||||
{"MariaDB 12.0", tools.MariadbVersion120, env.TestMariadb120Port},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
container := connectToMariadbContainer(t, tc.port, tc.version)
|
||||
defer container.DB.Close()
|
||||
|
||||
underscoreDbName := "test_all_db"
|
||||
|
||||
_, err := container.DB.Exec(
|
||||
fmt.Sprintf("DROP DATABASE IF EXISTS `%s`", underscoreDbName),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = container.DB.Exec(fmt.Sprintf("CREATE DATABASE `%s`", underscoreDbName))
|
||||
assert.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
_, _ = container.DB.Exec(
|
||||
fmt.Sprintf("DROP DATABASE IF EXISTS `%s`", underscoreDbName),
|
||||
)
|
||||
}()
|
||||
|
||||
underscoreDSN := fmt.Sprintf(
|
||||
"%s:%s@tcp(%s:%d)/%s?parseTime=true",
|
||||
container.Username,
|
||||
container.Password,
|
||||
container.Host,
|
||||
container.Port,
|
||||
underscoreDbName,
|
||||
)
|
||||
underscoreDB, err := sqlx.Connect("mysql", underscoreDSN)
|
||||
assert.NoError(t, err)
|
||||
defer underscoreDB.Close()
|
||||
|
||||
_, err = underscoreDB.Exec(`
|
||||
CREATE TABLE all_priv_test (
|
||||
id INT AUTO_INCREMENT PRIMARY KEY,
|
||||
data VARCHAR(255) NOT NULL
|
||||
)
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = underscoreDB.Exec(`INSERT INTO all_priv_test (data) VALUES ('test1')`)
|
||||
assert.NoError(t, err)
|
||||
|
||||
allPrivUsername := fmt.Sprintf("allpriv%s", uuid.New().String()[:8])
|
||||
allPrivPassword := "allprivpass123"
|
||||
|
||||
_, err = underscoreDB.Exec(fmt.Sprintf(
|
||||
"CREATE USER '%s'@'%%' IDENTIFIED BY '%s'",
|
||||
allPrivUsername,
|
||||
allPrivPassword,
|
||||
))
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = underscoreDB.Exec(fmt.Sprintf(
|
||||
"GRANT ALL PRIVILEGES ON `%s`.* TO '%s'@'%%'",
|
||||
underscoreDbName,
|
||||
allPrivUsername,
|
||||
))
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = underscoreDB.Exec("FLUSH PRIVILEGES")
|
||||
assert.NoError(t, err)
|
||||
|
||||
defer dropUserSafe(underscoreDB, allPrivUsername)
|
||||
|
||||
mariadbModel := &MariadbDatabase{
|
||||
Version: tc.version,
|
||||
Host: container.Host,
|
||||
Port: container.Port,
|
||||
Username: allPrivUsername,
|
||||
Password: allPrivPassword,
|
||||
Database: &underscoreDbName,
|
||||
IsHttps: false,
|
||||
}
|
||||
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
|
||||
err = mariadbModel.TestConnection(logger, nil, uuid.New())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, mariadbModel.Privileges)
|
||||
assert.Contains(t, mariadbModel.Privileges, "SELECT")
|
||||
assert.Contains(t, mariadbModel.Privileges, "SHOW VIEW")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type MariadbContainer struct {
|
||||
Host string
|
||||
Port int
|
||||
|
||||
@@ -489,9 +489,13 @@ func detectPrivileges(ctx context.Context, db *sql.DB, database string) (string,
|
||||
hasProcess := false
|
||||
hasAllPrivileges := false
|
||||
|
||||
// Escape underscores to match MySQL's grant output format
|
||||
// MySQL escapes _ as \_ in SHOW GRANTS output
|
||||
// Pattern matches either literal _ or escaped \_
|
||||
escapedDbName := strings.ReplaceAll(regexp.QuoteMeta(database), "_", `(_|\\_)`)
|
||||
dbPatternStr := fmt.Sprintf(
|
||||
`(?i)ON\s+[\x60'"]?%s[\x60'"]?\s*\.\s*\*`,
|
||||
regexp.QuoteMeta(database),
|
||||
escapedDbName,
|
||||
)
|
||||
dbPattern := regexp.MustCompile(dbPatternStr)
|
||||
globalPattern := regexp.MustCompile(`(?i)ON\s+\*\s*\.\s*\*`)
|
||||
|
||||
@@ -674,6 +674,112 @@ func Test_TestConnection_DatabaseWithUnderscores_Success(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func Test_TestConnection_DatabaseWithUnderscoresAndAllPrivileges_Success(t *testing.T) {
|
||||
env := config.GetEnv()
|
||||
cases := []struct {
|
||||
name string
|
||||
version tools.MysqlVersion
|
||||
port string
|
||||
}{
|
||||
{"MySQL 5.7", tools.MysqlVersion57, env.TestMysql57Port},
|
||||
{"MySQL 8.0", tools.MysqlVersion80, env.TestMysql80Port},
|
||||
{"MySQL 8.4", tools.MysqlVersion84, env.TestMysql84Port},
|
||||
{"MySQL 9", tools.MysqlVersion9, env.TestMysql90Port},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
container := connectToMysqlContainer(t, tc.port, tc.version)
|
||||
defer container.DB.Close()
|
||||
|
||||
underscoreDbName := "test_all_db"
|
||||
|
||||
_, err := container.DB.Exec(
|
||||
fmt.Sprintf("DROP DATABASE IF EXISTS `%s`", underscoreDbName),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = container.DB.Exec(fmt.Sprintf("CREATE DATABASE `%s`", underscoreDbName))
|
||||
assert.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
_, _ = container.DB.Exec(
|
||||
fmt.Sprintf("DROP DATABASE IF EXISTS `%s`", underscoreDbName),
|
||||
)
|
||||
}()
|
||||
|
||||
underscoreDSN := fmt.Sprintf(
|
||||
"%s:%s@tcp(%s:%d)/%s?parseTime=true",
|
||||
container.Username,
|
||||
container.Password,
|
||||
container.Host,
|
||||
container.Port,
|
||||
underscoreDbName,
|
||||
)
|
||||
underscoreDB, err := sqlx.Connect("mysql", underscoreDSN)
|
||||
assert.NoError(t, err)
|
||||
defer underscoreDB.Close()
|
||||
|
||||
_, err = underscoreDB.Exec(`
|
||||
CREATE TABLE all_priv_test (
|
||||
id INT AUTO_INCREMENT PRIMARY KEY,
|
||||
data VARCHAR(255) NOT NULL
|
||||
)
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = underscoreDB.Exec(`INSERT INTO all_priv_test (data) VALUES ('test1')`)
|
||||
assert.NoError(t, err)
|
||||
|
||||
allPrivUsername := fmt.Sprintf("allpriv_%s", uuid.New().String()[:8])
|
||||
allPrivPassword := "allprivpass123"
|
||||
|
||||
_, err = underscoreDB.Exec(fmt.Sprintf(
|
||||
"CREATE USER '%s'@'%%' IDENTIFIED BY '%s'",
|
||||
allPrivUsername,
|
||||
allPrivPassword,
|
||||
))
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = underscoreDB.Exec(fmt.Sprintf(
|
||||
"GRANT ALL PRIVILEGES ON `%s`.* TO '%s'@'%%'",
|
||||
underscoreDbName,
|
||||
allPrivUsername,
|
||||
))
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = underscoreDB.Exec("FLUSH PRIVILEGES")
|
||||
assert.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
_, _ = container.DB.Exec(
|
||||
fmt.Sprintf("DROP USER IF EXISTS '%s'@'%%'", allPrivUsername),
|
||||
)
|
||||
}()
|
||||
|
||||
mysqlModel := &MysqlDatabase{
|
||||
Version: tc.version,
|
||||
Host: container.Host,
|
||||
Port: container.Port,
|
||||
Username: allPrivUsername,
|
||||
Password: allPrivPassword,
|
||||
Database: &underscoreDbName,
|
||||
IsHttps: false,
|
||||
}
|
||||
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
|
||||
err = mysqlModel.TestConnection(logger, nil, uuid.New())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, mysqlModel.Privileges)
|
||||
assert.Contains(t, mysqlModel.Privileges, "SELECT")
|
||||
assert.Contains(t, mysqlModel.Privileges, "SHOW VIEW")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type MysqlContainer struct {
|
||||
Host string
|
||||
Port int
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
@@ -394,10 +395,13 @@ func (p *PostgresqlDatabase) IsUserReadOnly(
|
||||
//
|
||||
// This method performs the following operations atomically in a single transaction:
|
||||
// 1. Creates a PostgreSQL user with a UUID-based password
|
||||
// 2. Grants CONNECT privilege on the database
|
||||
// 3. Grants USAGE on all non-system schemas
|
||||
// 4. Grants SELECT on all existing tables and sequences
|
||||
// 5. Sets default privileges for future tables and sequences
|
||||
// 2. Revokes CREATE privilege on public schema from PUBLIC role
|
||||
// 3. Grants CONNECT privilege on the database
|
||||
// 4. Discovers all user-created schemas
|
||||
// 5. Grants USAGE on all non-system schemas
|
||||
// 6. Grants SELECT on all existing tables and sequences
|
||||
// 7. Sets default privileges for future tables and sequences
|
||||
// 8. Verifies user creation before committing
|
||||
//
|
||||
// Security features:
|
||||
// - Username format: "databasus-{8-char-uuid}" for uniqueness
|
||||
@@ -487,33 +491,56 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
return "", "", fmt.Errorf("failed to create user: %w", err)
|
||||
}
|
||||
|
||||
// Step 1.5: Revoke CREATE privilege from PUBLIC role on public schema
|
||||
// Step 2: Check if public schema exists and revoke CREATE privilege if it does
|
||||
// This is necessary because all PostgreSQL users inherit CREATE privilege on the
|
||||
// public schema through the PUBLIC role. This is a one-time operation that affects
|
||||
// the entire database, making it more secure by default.
|
||||
// Note: This only affects the public schema; other schemas are unaffected.
|
||||
_, err = tx.Exec(ctx, `REVOKE CREATE ON SCHEMA public FROM PUBLIC`)
|
||||
if err != nil {
|
||||
logger.Error("Failed to revoke CREATE on public from PUBLIC", "error", err)
|
||||
if !strings.Contains(err.Error(), "schema \"public\" does not exist") &&
|
||||
!strings.Contains(err.Error(), "permission denied") {
|
||||
return "", "", fmt.Errorf("failed to revoke CREATE from PUBLIC: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Now revoke from the specific user as well (belt and suspenders)
|
||||
_, err = tx.Exec(ctx, fmt.Sprintf(`REVOKE CREATE ON SCHEMA public FROM "%s"`, baseUsername))
|
||||
if err != nil {
|
||||
logger.Error(
|
||||
"Failed to revoke CREATE on public schema from user",
|
||||
"error",
|
||||
err,
|
||||
"username",
|
||||
baseUsername,
|
||||
var publicSchemaExists bool
|
||||
err = tx.QueryRow(ctx, `
|
||||
SELECT EXISTS(
|
||||
SELECT 1 FROM information_schema.schemata
|
||||
WHERE schema_name = 'public'
|
||||
)
|
||||
`).Scan(&publicSchemaExists)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to check if public schema exists: %w", err)
|
||||
}
|
||||
|
||||
// Step 2: Grant database connection privilege and revoke TEMP
|
||||
if publicSchemaExists {
|
||||
// Revoke CREATE from PUBLIC role (affects all users)
|
||||
_, err = tx.Exec(ctx, `REVOKE CREATE ON SCHEMA public FROM PUBLIC`)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "permission denied") {
|
||||
logger.Warn(
|
||||
"Failed to revoke CREATE on public from PUBLIC (permission denied)",
|
||||
"error",
|
||||
err,
|
||||
)
|
||||
} else {
|
||||
return "", "", fmt.Errorf("failed to revoke CREATE from PUBLIC on existing public schema: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Now revoke from the specific user as well (belt and suspenders)
|
||||
_, err = tx.Exec(
|
||||
ctx,
|
||||
fmt.Sprintf(`REVOKE CREATE ON SCHEMA public FROM "%s"`, baseUsername),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Warn(
|
||||
"Failed to revoke CREATE on public schema from user",
|
||||
"error",
|
||||
err,
|
||||
"username",
|
||||
baseUsername,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
logger.Info("Public schema does not exist, skipping CREATE privilege revocation")
|
||||
}
|
||||
|
||||
// Step 3: Grant database connection privilege and revoke TEMP
|
||||
_, err = tx.Exec(
|
||||
ctx,
|
||||
fmt.Sprintf(`GRANT CONNECT ON DATABASE "%s" TO "%s"`, *p.Database, baseUsername),
|
||||
@@ -537,7 +564,7 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
logger.Warn("Failed to revoke TEMP privilege", "error", err, "username", baseUsername)
|
||||
}
|
||||
|
||||
// Step 3: Discover all user-created schemas
|
||||
// Step 4: Discover all user-created schemas
|
||||
rows, err := tx.Query(ctx, `
|
||||
SELECT schema_name
|
||||
FROM information_schema.schemata
|
||||
@@ -562,7 +589,7 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
return "", "", fmt.Errorf("error iterating schemas: %w", err)
|
||||
}
|
||||
|
||||
// Step 4: Grant USAGE on each schema and explicitly prevent CREATE
|
||||
// Step 5: Grant USAGE on each schema and explicitly prevent CREATE
|
||||
for _, schema := range schemas {
|
||||
// Revoke CREATE specifically (handles inheritance from PUBLIC role)
|
||||
_, err = tx.Exec(
|
||||
@@ -591,7 +618,7 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
}
|
||||
}
|
||||
|
||||
// Step 5: Grant SELECT on ALL existing tables and sequences
|
||||
// Step 6: Grant SELECT on ALL existing tables and sequences
|
||||
grantSelectSQL := fmt.Sprintf(`
|
||||
DO $$
|
||||
DECLARE
|
||||
@@ -613,7 +640,7 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
return "", "", fmt.Errorf("failed to grant select on tables: %w", err)
|
||||
}
|
||||
|
||||
// Step 6: Set default privileges for FUTURE tables and sequences
|
||||
// Step 7: Set default privileges for FUTURE tables and sequences
|
||||
defaultPrivilegesSQL := fmt.Sprintf(`
|
||||
DO $$
|
||||
DECLARE
|
||||
@@ -635,7 +662,7 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
return "", "", fmt.Errorf("failed to set default privileges: %w", err)
|
||||
}
|
||||
|
||||
// Step 7: Verify user creation before committing
|
||||
// Step 8: Verify user creation before committing
|
||||
var verifyUsername string
|
||||
err = tx.QueryRow(ctx, fmt.Sprintf(`SELECT rolname FROM pg_roles WHERE rolname = '%s'`, baseUsername)).
|
||||
Scan(&verifyUsername)
|
||||
@@ -851,7 +878,15 @@ func checkBackupPermissions(
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot check SELECT privileges: %w", err)
|
||||
// If the user doesn't have USAGE on the schema, has_table_privilege will fail
|
||||
// with "permission denied for schema". This means they definitely don't have
|
||||
// SELECT privileges, so treat this as missing permissions rather than an error.
|
||||
var pgErr *pgconn.PgError
|
||||
if errors.As(err, &pgErr) && pgErr.Code == "42501" { // insufficient_privilege
|
||||
selectableTableCount = 0
|
||||
} else {
|
||||
return fmt.Errorf("cannot check SELECT privileges: %w", err)
|
||||
}
|
||||
}
|
||||
if selectableTableCount == 0 {
|
||||
missingPrivileges = append(missingPrivileges, "SELECT on tables")
|
||||
|
||||
@@ -709,6 +709,344 @@ func Test_CreateReadOnlyUser_Supabase_UserCanReadButNotWrite(t *testing.T) {
|
||||
assert.Contains(t, err.Error(), "permission denied")
|
||||
}
|
||||
|
||||
func Test_CreateReadOnlyUser_WithPublicSchema_Success(t *testing.T) {
|
||||
env := config.GetEnv()
|
||||
cases := []struct {
|
||||
name string
|
||||
version string
|
||||
port string
|
||||
}{
|
||||
{"PostgreSQL 12", "12", env.TestPostgres12Port},
|
||||
{"PostgreSQL 13", "13", env.TestPostgres13Port},
|
||||
{"PostgreSQL 14", "14", env.TestPostgres14Port},
|
||||
{"PostgreSQL 15", "15", env.TestPostgres15Port},
|
||||
{"PostgreSQL 16", "16", env.TestPostgres16Port},
|
||||
{"PostgreSQL 17", "17", env.TestPostgres17Port},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
container := connectToPostgresContainer(t, tc.port)
|
||||
defer container.DB.Close()
|
||||
|
||||
_, err := container.DB.Exec(`
|
||||
DROP TABLE IF EXISTS public_schema_test CASCADE;
|
||||
CREATE TABLE public_schema_test (
|
||||
id SERIAL PRIMARY KEY,
|
||||
data TEXT NOT NULL
|
||||
);
|
||||
INSERT INTO public_schema_test (data) VALUES ('test1'), ('test2');
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pgModel := createPostgresModel(container)
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
ctx := context.Background()
|
||||
|
||||
username, password, err := pgModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, username)
|
||||
assert.NotEmpty(t, password)
|
||||
assert.True(t, strings.HasPrefix(username, "databasus-"))
|
||||
|
||||
readOnlyModel := &PostgresqlDatabase{
|
||||
Version: pgModel.Version,
|
||||
Host: pgModel.Host,
|
||||
Port: pgModel.Port,
|
||||
Username: username,
|
||||
Password: password,
|
||||
Database: pgModel.Database,
|
||||
IsHttps: false,
|
||||
}
|
||||
|
||||
isReadOnly, privileges, err := readOnlyModel.IsUserReadOnly(
|
||||
ctx,
|
||||
logger,
|
||||
nil,
|
||||
uuid.New(),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, isReadOnly, "User should be read-only")
|
||||
assert.Empty(t, privileges, "Read-only user should have no write privileges")
|
||||
|
||||
readOnlyDSN := fmt.Sprintf(
|
||||
"host=%s port=%d user=%s password=%s dbname=%s sslmode=disable",
|
||||
container.Host,
|
||||
container.Port,
|
||||
username,
|
||||
password,
|
||||
container.Database,
|
||||
)
|
||||
readOnlyConn, err := sqlx.Connect("postgres", readOnlyDSN)
|
||||
assert.NoError(t, err)
|
||||
defer readOnlyConn.Close()
|
||||
|
||||
var count int
|
||||
err = readOnlyConn.Get(&count, "SELECT COUNT(*) FROM public_schema_test")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, count)
|
||||
|
||||
_, err = readOnlyConn.Exec(
|
||||
"INSERT INTO public_schema_test (data) VALUES ('should-fail')",
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "permission denied")
|
||||
|
||||
_, err = readOnlyConn.Exec("CREATE TABLE public.hack_table (id INT)")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "permission denied")
|
||||
|
||||
_, err = container.DB.Exec(fmt.Sprintf(`DROP OWNED BY "%s" CASCADE`, username))
|
||||
if err != nil {
|
||||
t.Logf("Warning: Failed to drop owned objects: %v", err)
|
||||
}
|
||||
_, err = container.DB.Exec(fmt.Sprintf(`DROP USER IF EXISTS "%s"`, username))
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_CreateReadOnlyUser_WithoutPublicSchema_Success(t *testing.T) {
|
||||
env := config.GetEnv()
|
||||
cases := []struct {
|
||||
name string
|
||||
version string
|
||||
port string
|
||||
}{
|
||||
{"PostgreSQL 12", "12", env.TestPostgres12Port},
|
||||
{"PostgreSQL 13", "13", env.TestPostgres13Port},
|
||||
{"PostgreSQL 14", "14", env.TestPostgres14Port},
|
||||
{"PostgreSQL 15", "15", env.TestPostgres15Port},
|
||||
{"PostgreSQL 16", "16", env.TestPostgres16Port},
|
||||
{"PostgreSQL 17", "17", env.TestPostgres17Port},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
container := connectToPostgresContainer(t, tc.port)
|
||||
defer container.DB.Close()
|
||||
|
||||
_, err := container.DB.Exec(`
|
||||
DROP SCHEMA IF EXISTS public CASCADE;
|
||||
DROP SCHEMA IF EXISTS app_schema CASCADE;
|
||||
DROP SCHEMA IF EXISTS data_schema CASCADE;
|
||||
CREATE SCHEMA app_schema;
|
||||
CREATE SCHEMA data_schema;
|
||||
CREATE TABLE app_schema.users (
|
||||
id SERIAL PRIMARY KEY,
|
||||
username TEXT NOT NULL
|
||||
);
|
||||
CREATE TABLE data_schema.records (
|
||||
id SERIAL PRIMARY KEY,
|
||||
info TEXT NOT NULL
|
||||
);
|
||||
INSERT INTO app_schema.users (username) VALUES ('user1'), ('user2');
|
||||
INSERT INTO data_schema.records (info) VALUES ('record1'), ('record2');
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pgModel := createPostgresModel(container)
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
ctx := context.Background()
|
||||
|
||||
username, password, err := pgModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New())
|
||||
assert.NoError(t, err, "CreateReadOnlyUser should succeed without public schema")
|
||||
assert.NotEmpty(t, username)
|
||||
assert.NotEmpty(t, password)
|
||||
assert.True(t, strings.HasPrefix(username, "databasus-"))
|
||||
|
||||
readOnlyModel := &PostgresqlDatabase{
|
||||
Version: pgModel.Version,
|
||||
Host: pgModel.Host,
|
||||
Port: pgModel.Port,
|
||||
Username: username,
|
||||
Password: password,
|
||||
Database: pgModel.Database,
|
||||
IsHttps: false,
|
||||
}
|
||||
|
||||
isReadOnly, privileges, err := readOnlyModel.IsUserReadOnly(
|
||||
ctx,
|
||||
logger,
|
||||
nil,
|
||||
uuid.New(),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, isReadOnly, "User should be read-only")
|
||||
assert.Empty(t, privileges, "Read-only user should have no write privileges")
|
||||
|
||||
readOnlyDSN := fmt.Sprintf(
|
||||
"host=%s port=%d user=%s password=%s dbname=%s sslmode=disable",
|
||||
container.Host,
|
||||
container.Port,
|
||||
username,
|
||||
password,
|
||||
container.Database,
|
||||
)
|
||||
readOnlyConn, err := sqlx.Connect("postgres", readOnlyDSN)
|
||||
assert.NoError(t, err)
|
||||
defer readOnlyConn.Close()
|
||||
|
||||
var userCount int
|
||||
err = readOnlyConn.Get(&userCount, "SELECT COUNT(*) FROM app_schema.users")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, userCount)
|
||||
|
||||
var recordCount int
|
||||
err = readOnlyConn.Get(&recordCount, "SELECT COUNT(*) FROM data_schema.records")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, recordCount)
|
||||
|
||||
_, err = readOnlyConn.Exec(
|
||||
"INSERT INTO app_schema.users (username) VALUES ('should-fail')",
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "permission denied")
|
||||
|
||||
_, err = readOnlyConn.Exec("CREATE TABLE app_schema.hack_table (id INT)")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "permission denied")
|
||||
|
||||
_, err = readOnlyConn.Exec("CREATE TABLE data_schema.hack_table (id INT)")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "permission denied")
|
||||
|
||||
_, err = container.DB.Exec(fmt.Sprintf(`DROP OWNED BY "%s" CASCADE`, username))
|
||||
if err != nil {
|
||||
t.Logf("Warning: Failed to drop owned objects: %v", err)
|
||||
}
|
||||
_, err = container.DB.Exec(fmt.Sprintf(`DROP USER IF EXISTS "%s"`, username))
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = container.DB.Exec(`
|
||||
DROP SCHEMA IF EXISTS app_schema CASCADE;
|
||||
DROP SCHEMA IF EXISTS data_schema CASCADE;
|
||||
CREATE SCHEMA IF NOT EXISTS public;
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_CreateReadOnlyUser_PublicSchemaExistsButNoPermissions_ReturnsError(t *testing.T) {
|
||||
env := config.GetEnv()
|
||||
cases := []struct {
|
||||
name string
|
||||
version string
|
||||
port string
|
||||
}{
|
||||
{"PostgreSQL 12", "12", env.TestPostgres12Port},
|
||||
{"PostgreSQL 13", "13", env.TestPostgres13Port},
|
||||
{"PostgreSQL 14", "14", env.TestPostgres14Port},
|
||||
{"PostgreSQL 15", "15", env.TestPostgres15Port},
|
||||
{"PostgreSQL 16", "16", env.TestPostgres16Port},
|
||||
{"PostgreSQL 17", "17", env.TestPostgres17Port},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
container := connectToPostgresContainer(t, tc.port)
|
||||
defer container.DB.Close()
|
||||
|
||||
limitedAdminUsername := fmt.Sprintf("limited_admin_%s", uuid.New().String()[:8])
|
||||
limitedAdminPassword := "limited_password_123"
|
||||
|
||||
_, err := container.DB.Exec(`
|
||||
CREATE SCHEMA IF NOT EXISTS public;
|
||||
DROP TABLE IF EXISTS public.permission_test_table CASCADE;
|
||||
CREATE TABLE public.permission_test_table (
|
||||
id SERIAL PRIMARY KEY,
|
||||
data TEXT NOT NULL
|
||||
);
|
||||
INSERT INTO public.permission_test_table (data) VALUES ('test1');
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = container.DB.Exec(`GRANT CREATE ON SCHEMA public TO PUBLIC`)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = container.DB.Exec(fmt.Sprintf(
|
||||
`CREATE USER "%s" WITH PASSWORD '%s' LOGIN CREATEROLE`,
|
||||
limitedAdminUsername,
|
||||
limitedAdminPassword,
|
||||
))
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = container.DB.Exec(fmt.Sprintf(
|
||||
`GRANT CONNECT ON DATABASE "%s" TO "%s"`,
|
||||
container.Database,
|
||||
limitedAdminUsername,
|
||||
))
|
||||
assert.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
_, _ = container.DB.Exec(
|
||||
fmt.Sprintf(`DROP OWNED BY "%s" CASCADE`, limitedAdminUsername),
|
||||
)
|
||||
_, _ = container.DB.Exec(
|
||||
fmt.Sprintf(`DROP USER IF EXISTS "%s"`, limitedAdminUsername),
|
||||
)
|
||||
_, _ = container.DB.Exec(`REVOKE CREATE ON SCHEMA public FROM PUBLIC`)
|
||||
}()
|
||||
|
||||
limitedAdminDSN := fmt.Sprintf(
|
||||
"host=%s port=%d user=%s password=%s dbname=%s sslmode=disable",
|
||||
container.Host,
|
||||
container.Port,
|
||||
limitedAdminUsername,
|
||||
limitedAdminPassword,
|
||||
container.Database,
|
||||
)
|
||||
limitedAdminConn, err := sqlx.Connect("postgres", limitedAdminDSN)
|
||||
assert.NoError(t, err)
|
||||
defer limitedAdminConn.Close()
|
||||
|
||||
pgModel := &PostgresqlDatabase{
|
||||
Version: tools.GetPostgresqlVersionEnum(tc.version),
|
||||
Host: container.Host,
|
||||
Port: container.Port,
|
||||
Username: limitedAdminUsername,
|
||||
Password: limitedAdminPassword,
|
||||
Database: &container.Database,
|
||||
IsHttps: false,
|
||||
}
|
||||
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
ctx := context.Background()
|
||||
|
||||
username, password, err := pgModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New())
|
||||
assert.Error(
|
||||
t,
|
||||
err,
|
||||
"CreateReadOnlyUser should fail when admin lacks permissions to secure public schema",
|
||||
)
|
||||
if err != nil {
|
||||
errorMsg := err.Error()
|
||||
hasExpectedError := strings.Contains(
|
||||
errorMsg,
|
||||
"failed to revoke CREATE from PUBLIC on existing public schema",
|
||||
) ||
|
||||
strings.Contains(errorMsg, "permission denied for schema public") ||
|
||||
strings.Contains(errorMsg, "failed to grant")
|
||||
assert.True(
|
||||
t,
|
||||
hasExpectedError,
|
||||
"Error should indicate permission issues with public schema, got: %s",
|
||||
errorMsg,
|
||||
)
|
||||
}
|
||||
assert.Empty(t, username)
|
||||
assert.Empty(t, password)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Validate_WhenLocalhostAndDatabasus_ReturnsError(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
audit_logs "databasus-backend/internal/features/audit_logs"
|
||||
"databasus-backend/internal/features/backups/backups"
|
||||
"databasus-backend/internal/features/backups/backups/backuping"
|
||||
backups_config "databasus-backend/internal/features/backups/config"
|
||||
"databasus-backend/internal/features/databases"
|
||||
"databasus-backend/internal/features/disk"
|
||||
@@ -51,6 +52,7 @@ func SetupDependencies() {
|
||||
|
||||
setupOnce.Do(func() {
|
||||
backups.GetBackupService().AddBackupRemoveListener(restoreService)
|
||||
backuping.GetBackupCleaner().AddBackupRemoveListener(restoreService)
|
||||
|
||||
isSetup.Store(true)
|
||||
})
|
||||
|
||||
23
backend/migrations/20260119124334_add_backup_size_limits.sql
Normal file
23
backend/migrations/20260119124334_add_backup_size_limits.sql
Normal file
@@ -0,0 +1,23 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE backup_configs
|
||||
ADD COLUMN max_backup_size_mb BIGINT NOT NULL DEFAULT 0,
|
||||
ADD COLUMN max_backups_total_size_mb BIGINT NOT NULL DEFAULT 0;
|
||||
|
||||
ALTER TABLE backups
|
||||
ADD COLUMN is_skip_retry BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE backup_configs
|
||||
DROP COLUMN IF EXISTS max_backups_total_size_mb,
|
||||
DROP COLUMN IF EXISTS max_backup_size_mb;
|
||||
|
||||
ALTER TABLE backups
|
||||
DROP COLUMN IF EXISTS is_skip_retry;
|
||||
|
||||
-- +goose StatementEnd
|
||||
@@ -15,4 +15,7 @@ export interface BackupConfig {
|
||||
isRetryIfFailed: boolean;
|
||||
maxFailedTriesCount: number;
|
||||
encryption: BackupEncryption;
|
||||
|
||||
maxBackupSizeMb: number;
|
||||
maxBackupsTotalSizeMb: number;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { InfoCircleOutlined } from '@ant-design/icons';
|
||||
import { DownOutlined, InfoCircleOutlined, UpOutlined } from '@ant-design/icons';
|
||||
import {
|
||||
Button,
|
||||
Checkbox,
|
||||
@@ -79,6 +79,12 @@ export const EditBackupConfigComponent = ({
|
||||
|
||||
const [isShowWarn, setIsShowWarn] = useState(false);
|
||||
|
||||
const hasAdvancedValues =
|
||||
!!backupConfig?.isRetryIfFailed ||
|
||||
(backupConfig?.maxBackupSizeMb ?? 0) > 0 ||
|
||||
(backupConfig?.maxBackupsTotalSizeMb ?? 0) > 0;
|
||||
const [isShowAdvanced, setShowAdvanced] = useState(hasAdvancedValues);
|
||||
|
||||
const timeFormat = useMemo(() => {
|
||||
const is12 = getIs12Hour();
|
||||
return { use12Hours: is12, format: is12 ? 'h:mm A' : 'HH:mm' };
|
||||
@@ -155,10 +161,13 @@ export const EditBackupConfigComponent = ({
|
||||
},
|
||||
storage: undefined,
|
||||
storePeriod: Period.THREE_MONTH,
|
||||
sendNotificationsOn: [],
|
||||
sendNotificationsOn: [BackupNotificationType.BackupFailed],
|
||||
isRetryIfFailed: true,
|
||||
maxFailedTriesCount: 3,
|
||||
encryption: BackupEncryption.ENCRYPTED,
|
||||
|
||||
maxBackupSizeMb: 0,
|
||||
maxBackupsTotalSizeMb: 0,
|
||||
});
|
||||
}
|
||||
loadStorages();
|
||||
@@ -363,79 +372,6 @@ export const EditBackupConfigComponent = ({
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="mt-4 mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
|
||||
<div className="mb-1 min-w-[150px] sm:mb-0">Retry backup if failed</div>
|
||||
<div className="flex items-center">
|
||||
<Switch
|
||||
size="small"
|
||||
checked={backupConfig.isRetryIfFailed}
|
||||
onChange={(checked) => updateBackupConfig({ isRetryIfFailed: checked })}
|
||||
/>
|
||||
|
||||
<Tooltip
|
||||
className="cursor-pointer"
|
||||
title="Automatically retry failed backups. Backups can fail due to network failures, storage issues or temporary database unavailability."
|
||||
>
|
||||
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{backupConfig.isRetryIfFailed && (
|
||||
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
|
||||
<div className="mb-1 min-w-[150px] sm:mb-0">Max failed tries count</div>
|
||||
<div className="flex items-center">
|
||||
<InputNumber
|
||||
min={1}
|
||||
max={10}
|
||||
value={backupConfig.maxFailedTriesCount}
|
||||
onChange={(value) => updateBackupConfig({ maxFailedTriesCount: value || 1 })}
|
||||
size="small"
|
||||
className="w-full max-w-[200px] grow"
|
||||
/>
|
||||
|
||||
<Tooltip
|
||||
className="cursor-pointer"
|
||||
title="Maximum number of retry attempts for failed backups. You will receive a notification when all tries have failed."
|
||||
>
|
||||
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
|
||||
<div className="mb-1 min-w-[150px] sm:mb-0">Store period</div>
|
||||
<div className="flex items-center">
|
||||
<Select
|
||||
value={backupConfig.storePeriod}
|
||||
onChange={(v) => updateBackupConfig({ storePeriod: v })}
|
||||
size="small"
|
||||
className="w-full max-w-[200px] grow"
|
||||
options={[
|
||||
{ label: '1 day', value: Period.DAY },
|
||||
{ label: '1 week', value: Period.WEEK },
|
||||
{ label: '1 month', value: Period.MONTH },
|
||||
{ label: '3 months', value: Period.THREE_MONTH },
|
||||
{ label: '6 months', value: Period.SIX_MONTH },
|
||||
{ label: '1 year', value: Period.YEAR },
|
||||
{ label: '2 years', value: Period.TWO_YEARS },
|
||||
{ label: '3 years', value: Period.THREE_YEARS },
|
||||
{ label: '4 years', value: Period.FOUR_YEARS },
|
||||
{ label: '5 years', value: Period.FIVE_YEARS },
|
||||
{ label: 'Forever', value: Period.FOREVER },
|
||||
]}
|
||||
/>
|
||||
|
||||
<Tooltip
|
||||
className="cursor-pointer"
|
||||
title="How long to keep the backups? Make sure you have enough storage space."
|
||||
>
|
||||
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="mb-3" />
|
||||
</>
|
||||
)}
|
||||
@@ -485,7 +421,7 @@ export const EditBackupConfigComponent = ({
|
||||
value={backupConfig.encryption}
|
||||
onChange={(v) => updateBackupConfig({ encryption: v })}
|
||||
size="small"
|
||||
className="w-full max-w-[200px] grow"
|
||||
className="w-[200px]"
|
||||
options={[
|
||||
{ label: 'None', value: BackupEncryption.NONE },
|
||||
{ label: 'Encrypt backup files', value: BackupEncryption.ENCRYPTED },
|
||||
@@ -501,6 +437,38 @@ export const EditBackupConfigComponent = ({
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
|
||||
<div className="mb-1 min-w-[150px] sm:mb-0">Store period</div>
|
||||
<div className="flex items-center">
|
||||
<Select
|
||||
value={backupConfig.storePeriod}
|
||||
onChange={(v) => updateBackupConfig({ storePeriod: v })}
|
||||
size="small"
|
||||
className="w-[200px]"
|
||||
options={[
|
||||
{ label: '1 day', value: Period.DAY },
|
||||
{ label: '1 week', value: Period.WEEK },
|
||||
{ label: '1 month', value: Period.MONTH },
|
||||
{ label: '3 months', value: Period.THREE_MONTH },
|
||||
{ label: '6 months', value: Period.SIX_MONTH },
|
||||
{ label: '1 year', value: Period.YEAR },
|
||||
{ label: '2 years', value: Period.TWO_YEARS },
|
||||
{ label: '3 years', value: Period.THREE_YEARS },
|
||||
{ label: '4 years', value: Period.FOUR_YEARS },
|
||||
{ label: '5 years', value: Period.FIVE_YEARS },
|
||||
{ label: 'Forever', value: Period.FOREVER },
|
||||
]}
|
||||
/>
|
||||
|
||||
<Tooltip
|
||||
className="cursor-pointer"
|
||||
title="How long to keep the backups? Make sure you have enough storage space."
|
||||
>
|
||||
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{backupConfig.isBackupsEnabled && (
|
||||
<>
|
||||
<div className="mt-4 mb-1 flex w-full flex-col items-start sm:flex-row sm:items-start">
|
||||
@@ -546,9 +514,152 @@ export const EditBackupConfigComponent = ({
|
||||
</>
|
||||
)}
|
||||
|
||||
<div className="mt-4 mb-1 flex items-center">
|
||||
<div
|
||||
className="flex cursor-pointer items-center text-sm text-blue-600 hover:text-blue-800"
|
||||
onClick={() => setShowAdvanced(!isShowAdvanced)}
|
||||
>
|
||||
<span className="mr-2">Advanced settings</span>
|
||||
|
||||
{isShowAdvanced ? (
|
||||
<UpOutlined style={{ fontSize: '12px' }} />
|
||||
) : (
|
||||
<DownOutlined style={{ fontSize: '12px' }} />
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{isShowAdvanced && backupConfig.isBackupsEnabled && (
|
||||
<>
|
||||
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
|
||||
<div className="mb-1 min-w-[150px] sm:mb-0">Retry backup if failed</div>
|
||||
<div className="flex items-center">
|
||||
<Switch
|
||||
size="small"
|
||||
checked={backupConfig.isRetryIfFailed}
|
||||
onChange={(checked) => updateBackupConfig({ isRetryIfFailed: checked })}
|
||||
/>
|
||||
|
||||
<Tooltip
|
||||
className="cursor-pointer"
|
||||
title="Automatically retry failed backups. Backups can fail due to network failures, storage issues or temporary database unavailability."
|
||||
>
|
||||
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{backupConfig.isRetryIfFailed && (
|
||||
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
|
||||
<div className="mb-1 min-w-[150px] sm:mb-0">Max failed tries count</div>
|
||||
<div className="flex items-center">
|
||||
<InputNumber
|
||||
min={1}
|
||||
max={10}
|
||||
value={backupConfig.maxFailedTriesCount}
|
||||
onChange={(value) => updateBackupConfig({ maxFailedTriesCount: value || 1 })}
|
||||
size="small"
|
||||
className="w-full max-w-[200px] grow"
|
||||
/>
|
||||
|
||||
<Tooltip
|
||||
className="cursor-pointer"
|
||||
title="Maximum number of retry attempts for failed backups. You will receive a notification when all tries have failed."
|
||||
>
|
||||
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="mt-5 mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
|
||||
<div className="mb-1 min-w-[150px] sm:mb-0">Max backup size limit</div>
|
||||
<div className="flex items-center">
|
||||
<Checkbox
|
||||
checked={backupConfig.maxBackupSizeMb > 0}
|
||||
onChange={(e) => {
|
||||
updateBackupConfig({
|
||||
maxBackupSizeMb: e.target.checked ? backupConfig.maxBackupSizeMb || 1000 : 0,
|
||||
});
|
||||
}}
|
||||
>
|
||||
Enable
|
||||
</Checkbox>
|
||||
|
||||
<Tooltip
|
||||
className="cursor-pointer"
|
||||
title="Limits the size of each individual backup. Note that backups are typically 15× smaller than the database size. For example, a 100 MB backup represents approximately 1.5 GB database."
|
||||
>
|
||||
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{backupConfig.maxBackupSizeMb > 0 && (
|
||||
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
|
||||
<div className="mb-1 min-w-[150px] sm:mb-0">Max backup size (MB)</div>
|
||||
|
||||
<InputNumber
|
||||
min={1}
|
||||
value={backupConfig.maxBackupSizeMb}
|
||||
onChange={(value) => updateBackupConfig({ maxBackupSizeMb: value || 1 })}
|
||||
size="small"
|
||||
className="w-full max-w-[100px] grow"
|
||||
/>
|
||||
|
||||
<div className="ml-2 text-xs text-gray-600 dark:text-gray-400">
|
||||
{(backupConfig.maxBackupSizeMb / 1024).toFixed(2)} GB
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
|
||||
<div className="mb-1 min-w-[150px] sm:mb-0">Limit total backups size</div>
|
||||
<div className="flex items-center">
|
||||
<Checkbox
|
||||
checked={backupConfig.maxBackupsTotalSizeMb > 0}
|
||||
onChange={(e) => {
|
||||
updateBackupConfig({
|
||||
maxBackupsTotalSizeMb: e.target.checked
|
||||
? backupConfig.maxBackupsTotalSizeMb || 1_000_000
|
||||
: 0,
|
||||
});
|
||||
}}
|
||||
>
|
||||
Enable
|
||||
</Checkbox>
|
||||
|
||||
<Tooltip
|
||||
className="cursor-pointer"
|
||||
title="Limits the total size of all backups in storage. Once this limit is exceeded, the oldest backups are automatically removed until the total size is within the limit again."
|
||||
>
|
||||
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{backupConfig.maxBackupsTotalSizeMb > 0 && (
|
||||
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
|
||||
<div className="mb-1 min-w-[150px] sm:mb-0">Max total size (MB)</div>
|
||||
<InputNumber
|
||||
min={1}
|
||||
value={backupConfig.maxBackupsTotalSizeMb}
|
||||
onChange={(value) => updateBackupConfig({ maxBackupsTotalSizeMb: value || 1 })}
|
||||
size="small"
|
||||
className="w-full max-w-[100px] grow"
|
||||
/>
|
||||
|
||||
<div className="ml-2 text-xs text-gray-600 dark:text-gray-400">
|
||||
{(backupConfig.maxBackupsTotalSizeMb / 1024).toFixed(2)} GB
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
|
||||
<div className="mt-5 flex">
|
||||
{isShowBackButton && (
|
||||
<Button className="mr-1" onClick={onBack}>
|
||||
<Button className="mr-1" type="primary" ghost onClick={onBack}>
|
||||
Back
|
||||
</Button>
|
||||
)}
|
||||
|
||||
@@ -137,9 +137,11 @@ export const CreateDatabaseComponent = ({ workspaceId, onCreated, onClose }: Pro
|
||||
database={database}
|
||||
onReadOnlyUserUpdated={(database) => {
|
||||
setDatabase({ ...database });
|
||||
setStep('backup-config');
|
||||
}}
|
||||
onGoBack={() => setStep('db-settings')}
|
||||
onContinue={() => setStep('backup-config')}
|
||||
onSkipped={() => setStep('backup-config')}
|
||||
onAlreadyExists={() => setStep('backup-config')}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -8,7 +8,8 @@ interface Props {
|
||||
onReadOnlyUserUpdated: (database: Database) => void;
|
||||
|
||||
onGoBack: () => void;
|
||||
onContinue: () => void;
|
||||
onSkipped: () => void;
|
||||
onAlreadyExists: () => void;
|
||||
}
|
||||
|
||||
const PRIVILEGES_TRUNCATE_LENGTH = 50;
|
||||
@@ -17,7 +18,8 @@ export const CreateReadOnlyComponent = ({
|
||||
database,
|
||||
onReadOnlyUserUpdated,
|
||||
onGoBack,
|
||||
onContinue,
|
||||
onSkipped,
|
||||
onAlreadyExists,
|
||||
}: Props) => {
|
||||
const [isCheckingReadOnlyUser, setIsCheckingReadOnlyUser] = useState(false);
|
||||
const [isCreatingReadOnlyUser, setIsCreatingReadOnlyUser] = useState(false);
|
||||
@@ -87,7 +89,6 @@ export const CreateReadOnlyComponent = ({
|
||||
}
|
||||
|
||||
onReadOnlyUserUpdated(database);
|
||||
onContinue();
|
||||
} catch (e) {
|
||||
alert((e as Error).message);
|
||||
}
|
||||
@@ -101,7 +102,7 @@ export const CreateReadOnlyComponent = ({
|
||||
|
||||
const handleSkipConfirmed = () => {
|
||||
setShowSkipConfirmation(false);
|
||||
onContinue();
|
||||
onSkipped();
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
@@ -110,7 +111,7 @@ export const CreateReadOnlyComponent = ({
|
||||
|
||||
const isReadOnly = await checkReadOnlyUser();
|
||||
if (isReadOnly) {
|
||||
onContinue();
|
||||
onAlreadyExists();
|
||||
}
|
||||
|
||||
setIsCheckingReadOnlyUser(false);
|
||||
@@ -228,7 +229,7 @@ export const CreateReadOnlyComponent = ({
|
||||
</div>
|
||||
|
||||
<div className="flex justify-end">
|
||||
<Button className="mr-2" danger onClick={handleSkipConfirmed}>
|
||||
<Button className="mr-2" danger ghost onClick={handleSkipConfirmed}>
|
||||
Yes, I accept risks
|
||||
</Button>
|
||||
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
import { type Database, DatabaseType } from '../../../../entity/databases';
|
||||
import { Modal } from 'antd';
|
||||
import { useState } from 'react';
|
||||
|
||||
import { type Database, DatabaseType, databaseApi } from '../../../../entity/databases';
|
||||
import { CreateReadOnlyComponent } from './CreateReadOnlyComponent';
|
||||
import { EditMariaDbSpecificDataComponent } from './EditMariaDbSpecificDataComponent';
|
||||
import { EditMongoDbSpecificDataComponent } from './EditMongoDbSpecificDataComponent';
|
||||
import { EditMySqlSpecificDataComponent } from './EditMySqlSpecificDataComponent';
|
||||
@@ -36,19 +40,84 @@ export const EditDatabaseSpecificDataComponent = ({
|
||||
isShowDbName = true,
|
||||
isRestoreMode = false,
|
||||
}: Props) => {
|
||||
const [isShowReadOnlyDialog, setIsShowReadOnlyDialog] = useState(false);
|
||||
const [editingDatabase, setEditingDatabase] = useState<Database>(database);
|
||||
|
||||
const saveDb = async (databaseToSave: Database) => {
|
||||
setEditingDatabase(databaseToSave);
|
||||
|
||||
if (!isSaveToApi) {
|
||||
onSaved(databaseToSave);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await databaseApi.isUserReadOnly(databaseToSave);
|
||||
|
||||
if (result.isReadOnly) {
|
||||
onSaved(databaseToSave);
|
||||
} else {
|
||||
setIsShowReadOnlyDialog(true);
|
||||
}
|
||||
} catch (e) {
|
||||
alert((e as Error).message);
|
||||
}
|
||||
};
|
||||
|
||||
const onReadOnlyUserCreated = (updatedDatabase: Database) => {
|
||||
setEditingDatabase(updatedDatabase);
|
||||
setIsShowReadOnlyDialog(false);
|
||||
};
|
||||
|
||||
const skipReadOnlyUser = () => {
|
||||
setIsShowReadOnlyDialog(false);
|
||||
onSaved(editingDatabase);
|
||||
};
|
||||
|
||||
if (isShowReadOnlyDialog) {
|
||||
return (
|
||||
<Modal
|
||||
title="Create read-only user"
|
||||
footer={<div />}
|
||||
open={isShowReadOnlyDialog}
|
||||
onCancel={() => setIsShowReadOnlyDialog(false)}
|
||||
maskClosable={false}
|
||||
width={450}
|
||||
>
|
||||
<CreateReadOnlyComponent
|
||||
database={editingDatabase}
|
||||
onReadOnlyUserUpdated={(db) => {
|
||||
console.log('onReadOnlyUserUpdated', db);
|
||||
onReadOnlyUserCreated(db);
|
||||
}}
|
||||
onGoBack={() => {
|
||||
setIsShowReadOnlyDialog(false);
|
||||
}}
|
||||
onSkipped={() => {
|
||||
skipReadOnlyUser();
|
||||
}}
|
||||
onAlreadyExists={() => {
|
||||
console.log('onAlreadyExists');
|
||||
onSaved(editingDatabase);
|
||||
}}
|
||||
/>
|
||||
</Modal>
|
||||
);
|
||||
}
|
||||
|
||||
const commonProps = {
|
||||
database,
|
||||
database: editingDatabase,
|
||||
isShowCancelButton,
|
||||
onCancel,
|
||||
isShowBackButton,
|
||||
onBack,
|
||||
saveButtonText,
|
||||
isSaveToApi,
|
||||
onSaved,
|
||||
onSaved: saveDb,
|
||||
isShowDbName,
|
||||
};
|
||||
|
||||
switch (database.type) {
|
||||
switch (editingDatabase.type) {
|
||||
case DatabaseType.POSTGRES:
|
||||
return <EditPostgreSqlSpecificDataComponent {...commonProps} isRestoreMode={isRestoreMode} />;
|
||||
case DatabaseType.MYSQL:
|
||||
|
||||
Reference in New Issue
Block a user