Compare commits

...

17 Commits

Author SHA1 Message Date
Rostislav Dugin
ae27f74c2e Merge pull request #304 from databasus/develop
FIX (playground): Fix flacky test with impossible value
2026-01-23 12:38:06 +03:00
Rostislav Dugin
9457516bb9 FIX (playground): Fix flacky test with impossible value 2026-01-23 12:37:10 +03:00
Rostislav Dugin
a36fc5bf8c Merge pull request #303 from databasus/develop
Develop
2026-01-23 12:24:29 +03:00
Rostislav Dugin
03ada5806d FEATURE (pre-commit): Add building step to pre-commit 2026-01-23 12:22:31 +03:00
Rostislav Dugin
a6675390e5 FIX (cors): Allow CORS for healthcheck endpoint 2026-01-23 12:04:29 +03:00
Rostislav Dugin
af2f978876 FEATURE (playground): Add playground 2026-01-23 12:00:56 +03:00
Rostislav Dugin
04e7eba5c5 Merge pull request #300 from databasus/develop
FIX (ci \ cd): Add build step after lint step for frontend to catch b…
2026-01-20 08:40:14 +03:00
Rostislav Dugin
520165541d FIX (ci \ cd): Add build step after lint step for frontend to catch build issues 2026-01-20 08:39:28 +03:00
Rostislav Dugin
5b556bc161 Merge pull request #299 from databasus/develop
Develop
2026-01-20 08:26:57 +03:00
Rostislav Dugin
0952a15ec5 FEATURE (navbar): Update navbar style 2026-01-20 08:25:58 +03:00
Rostislav Dugin
1afb3aa3ff Merge pull request #298 from tim-sas-kramp/main
FIX (theme): Integrate theme support for GitHub button color scheme
2026-01-20 07:25:57 +03:00
tim-sas-kramp
19b92e5f74 FIX (theme): Integrate theme support for GitHub button color scheme 2026-01-19 21:17:24 +00:00
Rostislav Dugin
d4763f26b2 Merge pull request #296 from databasus/develop
Develop
2026-01-19 19:27:03 +03:00
Rostislav Dugin
0e389ba16b FIX (backups): Allow parallel backups for different DBs 2026-01-19 19:26:03 +03:00
Rostislav Dugin
594a3294c6 FEATURE (limits): Add max backup size limit and total backups size limit 2026-01-19 19:26:03 +03:00
Rostislav Dugin
4e4a323cf1 FEATURE (config): Suggest read-only user creation when DB config changed 2026-01-19 19:26:03 +03:00
Rostislav Dugin
7d9ecf697b FIX (backups): Do not allow 2 parallel backups for the same DB 2026-01-19 19:26:03 +03:00
89 changed files with 4452 additions and 530 deletions

View File

@@ -81,6 +81,11 @@ jobs:
cd frontend
npm run lint
- name: Build frontend
run: |
cd frontend
npm run build
test-frontend:
runs-on: ubuntu-latest
needs: [lint-frontend]

View File

@@ -18,6 +18,13 @@ repos:
files: ^frontend/.*\.(ts|tsx|js|jsx)$
pass_filenames: false
- id: frontend-build
name: Frontend Build
entry: bash -c "cd frontend && npm run build"
language: system
files: ^frontend/.*\.(ts|tsx|js|jsx|json|css)$
pass_filenames: false
# Backend checks
- repo: local
hooks:

View File

@@ -251,6 +251,18 @@ fi
# PostgreSQL 17 binary paths
PG_BIN="/usr/lib/postgresql/17/bin"
# Generate runtime configuration for frontend
echo "Generating runtime configuration..."
cat > /app/ui/build/runtime-config.js << 'JSEOF'
// Runtime configuration injected at container startup
// This file is generated dynamically and should not be edited manually
window.__RUNTIME_CONFIG__ = {
IS_CLOUD: '\${IS_CLOUD:-false}',
GITHUB_CLIENT_ID: '\${GITHUB_CLIENT_ID:-}',
GOOGLE_CLIENT_ID: '\${GOOGLE_CLIENT_ID:-}'
};
JSEOF
# Ensure proper ownership of data directory
echo "Setting up data directory permissions..."
mkdir -p /databasus-data/pgdata

View File

@@ -272,6 +272,10 @@ func runBackgroundTasks(log *slog.Logger) {
backuping.GetBackupsScheduler().Run(ctx)
})
go runWithPanicLogging(log, "backup cleaner background service", func() {
backuping.GetBackupCleaner().Run(ctx)
})
go runWithPanicLogging(log, "restore background service", func() {
restoring.GetRestoresScheduler().Run(ctx)
})

View File

@@ -29,6 +29,7 @@ type EnvVariables struct {
MariadbInstallDir string `env:"MARIADB_INSTALL_DIR"`
MongodbInstallDir string `env:"MONGODB_INSTALL_DIR"`
IsCloud bool `env:"IS_CLOUD"`
TestLocalhost string `env:"TEST_LOCALHOST"`
ShowDbInstallationVerificationLogs bool `env:"SHOW_DB_INSTALLATION_VERIFICATION_LOGS"`
@@ -182,6 +183,11 @@ func loadEnvVariables() {
env.IsSkipExternalResourcesTests = false
}
// Set default value for IsCloud if not defined
if os.Getenv("IS_CLOUD") == "" {
env.IsCloud = false
}
for _, arg := range os.Args {
if strings.Contains(arg, "test") {
env.IsTesting = true

View File

@@ -70,16 +70,18 @@ func (n *BackuperNode) Run(ctx context.Context) {
}
backupHandler := func(backupID uuid.UUID, isCallNotifier bool) {
n.MakeBackup(backupID, isCallNotifier)
if err := n.backupNodesRegistry.PublishBackupCompletion(n.nodeID, backupID); err != nil {
n.logger.Error(
"Failed to publish backup completion",
"error",
err,
"backupID",
backupID,
)
}
go func() {
n.MakeBackup(backupID, isCallNotifier)
if err := n.backupNodesRegistry.PublishBackupCompletion(n.nodeID, backupID); err != nil {
n.logger.Error(
"Failed to publish backup completion",
"error",
err,
"backupID",
backupID,
)
}
}()
}
err := n.backupNodesRegistry.SubscribeNodeForBackupsAssignment(n.nodeID, backupHandler)
@@ -157,21 +159,41 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) {
start := time.Now().UTC()
ctx, cancel := context.WithCancel(context.Background())
n.backupCancelManager.RegisterTask(backup.ID, cancel)
defer n.backupCancelManager.UnregisterTask(backup.ID)
backupProgressListener := func(
completedMBs float64,
) {
backup.BackupSizeMb = completedMBs
backup.BackupDurationMs = time.Since(start).Milliseconds()
// Check size limit (0 = unlimited)
if backupConfig.MaxBackupSizeMB > 0 &&
completedMBs > float64(backupConfig.MaxBackupSizeMB) {
errMsg := fmt.Sprintf(
"backup size (%.2f MB) exceeded maximum allowed size (%d MB)",
completedMBs,
backupConfig.MaxBackupSizeMB,
)
backup.Status = backups_core.BackupStatusFailed
backup.IsSkipRetry = true
backup.FailMessage = &errMsg
if err := n.backupRepository.Save(backup); err != nil {
n.logger.Error("Failed to save backup with size exceeded error", "error", err)
}
cancel() // Cancel the backup context
return
}
if err := n.backupRepository.Save(backup); err != nil {
n.logger.Error("Failed to update backup progress", "error", err)
}
}
ctx, cancel := context.WithCancel(context.Background())
n.backupCancelManager.RegisterTask(backup.ID, cancel)
defer n.backupCancelManager.UnregisterTask(backup.ID)
backupMetadata, err := n.createBackupUseCase.Execute(
ctx,
backup.ID,
@@ -181,6 +203,29 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) {
backupProgressListener,
)
if err != nil {
// Check if backup was already marked as failed by progress listener (e.g., size limit exceeded)
// If so, skip error handling to avoid overwriting the status
currentBackup, fetchErr := n.backupRepository.FindByID(backup.ID)
if fetchErr == nil && currentBackup.Status == backups_core.BackupStatusFailed {
n.logger.Warn(
"Backup already marked as failed by progress listener, skipping error handling",
"backupId",
backup.ID,
"failMessage",
*currentBackup.FailMessage,
)
// Still call notification for size limit failures
n.SendBackupNotification(
backupConfig,
currentBackup,
backups_config.NotificationBackupFailed,
currentBackup.FailMessage,
)
return
}
errMsg := err.Error()
// Log detailed error information for debugging

View File

@@ -1,13 +1,10 @@
package backuping
import (
"context"
"errors"
"strings"
"testing"
"time"
common "databasus-backend/internal/features/backups/backups/common"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
@@ -18,7 +15,6 @@ import (
workspaces_testing "databasus-backend/internal/features/workspaces/testing"
cache_utils "databasus-backend/internal/util/cache"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
@@ -158,35 +154,120 @@ func Test_BackupExecuted_NotificationSent(t *testing.T) {
})
}
type CreateFailedBackupUsecase struct {
}
func Test_BackupSizeLimits(t *testing.T) {
cache_utils.ClearAllCache()
user := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
router := CreateTestRouter()
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", user, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
func (uc *CreateFailedBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*common.BackupMetadata, error) {
backupProgressListener(10)
return nil, errors.New("backup failed")
}
defer func() {
// cleanup backups first
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
type CreateSuccessBackupUsecase struct{}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond) // Wait for cascading deletes
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
func (uc *CreateSuccessBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*common.BackupMetadata, error) {
backupProgressListener(10)
return &common.BackupMetadata{
EncryptionSalt: nil,
EncryptionIV: nil,
Encryption: backups_config.BackupEncryptionNone,
}, nil
t.Run("UnlimitedSize_MaxBackupSizeMBIsZero_BackupCompletes", func(t *testing.T) {
// Enable backups with unlimited size (0)
backupConfig := backups_config.EnableBackupsForTestDatabase(database.ID, storage)
backupConfig.MaxBackupSizeMB = 0 // unlimited
backupConfig, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
backuperNode := CreateTestBackuperNode()
backuperNode.createBackupUseCase = &CreateLargeBackupUsecase{}
// Create a backup record
backup := &backups_core.Backup{
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusInProgress,
CreatedAt: time.Now().UTC(),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
backuperNode.MakeBackup(backup.ID, false)
// Verify backup completed successfully even with large size
updatedBackup, err := backupRepository.FindByID(backup.ID)
assert.NoError(t, err)
assert.Equal(t, backups_core.BackupStatusCompleted, updatedBackup.Status)
assert.Equal(t, float64(10000), updatedBackup.BackupSizeMb)
assert.Nil(t, updatedBackup.FailMessage)
})
t.Run("SizeExceeded_BackupFailedWithIsSkipRetry", func(t *testing.T) {
// Enable backups with 5 MB limit
backupConfig := backups_config.EnableBackupsForTestDatabase(database.ID, storage)
backupConfig.MaxBackupSizeMB = 5
backupConfig, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
backuperNode := CreateTestBackuperNode()
backuperNode.createBackupUseCase = &CreateProgressiveBackupUsecase{}
// Create a backup record
backup := &backups_core.Backup{
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusInProgress,
CreatedAt: time.Now().UTC(),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
backuperNode.MakeBackup(backup.ID, false)
// Verify backup was marked as failed with IsSkipRetry=true
updatedBackup, err := backupRepository.FindByID(backup.ID)
assert.NoError(t, err)
assert.Equal(t, backups_core.BackupStatusFailed, updatedBackup.Status)
assert.True(t, updatedBackup.IsSkipRetry)
assert.NotNil(t, updatedBackup.FailMessage)
assert.Contains(t, *updatedBackup.FailMessage, "exceeded maximum allowed size")
assert.Contains(t, *updatedBackup.FailMessage, "10.00 MB")
assert.Contains(t, *updatedBackup.FailMessage, "5 MB")
assert.Greater(t, updatedBackup.BackupSizeMb, float64(5))
})
t.Run("SizeWithinLimit_BackupCompletes", func(t *testing.T) {
// Enable backups with 100 MB limit
backupConfig := backups_config.EnableBackupsForTestDatabase(database.ID, storage)
backupConfig.MaxBackupSizeMB = 100
backupConfig, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
backuperNode := CreateTestBackuperNode()
backuperNode.createBackupUseCase = &CreateMediumBackupUsecase{}
// Create a backup record
backup := &backups_core.Backup{
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusInProgress,
CreatedAt: time.Now().UTC(),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
backuperNode.MakeBackup(backup.ID, false)
// Verify backup completed successfully
updatedBackup, err := backupRepository.FindByID(backup.ID)
assert.NoError(t, err)
assert.Equal(t, backups_core.BackupStatusCompleted, updatedBackup.Status)
assert.Equal(t, float64(50), updatedBackup.BackupSizeMb)
assert.Nil(t, updatedBackup.FailMessage)
})
}

View File

@@ -0,0 +1,242 @@
package backuping
import (
"context"
"fmt"
"log/slog"
"sync"
"sync/atomic"
"time"
"github.com/google/uuid"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/storages"
util_encryption "databasus-backend/internal/util/encryption"
"databasus-backend/internal/util/period"
)
const (
cleanerTickerInterval = 1 * time.Minute
)
type BackupCleaner struct {
backupRepository *backups_core.BackupRepository
storageService *storages.StorageService
backupConfigService *backups_config.BackupConfigService
fieldEncryptor util_encryption.FieldEncryptor
logger *slog.Logger
backupRemoveListeners []backups_core.BackupRemoveListener
runOnce sync.Once
hasRun atomic.Bool
}
func (c *BackupCleaner) Run(ctx context.Context) {
wasAlreadyRun := c.hasRun.Load()
c.runOnce.Do(func() {
c.hasRun.Store(true)
if ctx.Err() != nil {
return
}
ticker := time.NewTicker(cleanerTickerInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if err := c.cleanOldBackups(); err != nil {
c.logger.Error("Failed to clean old backups", "error", err)
}
if err := c.cleanExceededBackups(); err != nil {
c.logger.Error("Failed to clean exceeded backups", "error", err)
}
}
}
})
if wasAlreadyRun {
panic(fmt.Sprintf("%T.Run() called multiple times", c))
}
}
func (c *BackupCleaner) DeleteBackup(backup *backups_core.Backup) error {
for _, listener := range c.backupRemoveListeners {
if err := listener.OnBeforeBackupRemove(backup); err != nil {
return err
}
}
storage, err := c.storageService.GetStorageByID(backup.StorageID)
if err != nil {
return err
}
err = storage.DeleteFile(c.fieldEncryptor, backup.ID)
if err != nil {
// we do not return error here, because sometimes clean up performed
// before unavailable storage removal or change - therefore we should
// proceed even in case of error. It's possible that some S3 or
// storage is not available yet, it should not block us
c.logger.Error("Failed to delete backup file", "error", err)
}
return c.backupRepository.DeleteByID(backup.ID)
}
func (c *BackupCleaner) AddBackupRemoveListener(listener backups_core.BackupRemoveListener) {
c.backupRemoveListeners = append(c.backupRemoveListeners, listener)
}
func (c *BackupCleaner) cleanOldBackups() error {
enabledBackupConfigs, err := c.backupConfigService.GetBackupConfigsWithEnabledBackups()
if err != nil {
return err
}
for _, backupConfig := range enabledBackupConfigs {
backupStorePeriod := backupConfig.StorePeriod
if backupStorePeriod == period.PeriodForever {
continue
}
storeDuration := backupStorePeriod.ToDuration()
dateBeforeBackupsShouldBeDeleted := time.Now().UTC().Add(-storeDuration)
oldBackups, err := c.backupRepository.FindBackupsBeforeDate(
backupConfig.DatabaseID,
dateBeforeBackupsShouldBeDeleted,
)
if err != nil {
c.logger.Error(
"Failed to find old backups for database",
"databaseId",
backupConfig.DatabaseID,
"error",
err,
)
continue
}
for _, backup := range oldBackups {
if err := c.DeleteBackup(backup); err != nil {
c.logger.Error("Failed to delete old backup", "backupId", backup.ID, "error", err)
continue
}
c.logger.Info(
"Deleted old backup",
"backupId",
backup.ID,
"databaseId",
backupConfig.DatabaseID,
)
}
}
return nil
}
func (c *BackupCleaner) cleanExceededBackups() error {
enabledBackupConfigs, err := c.backupConfigService.GetBackupConfigsWithEnabledBackups()
if err != nil {
return err
}
for _, backupConfig := range enabledBackupConfigs {
if backupConfig.MaxBackupsTotalSizeMB <= 0 {
continue
}
if err := c.cleanExceededBackupsForDatabase(
backupConfig.DatabaseID,
backupConfig.MaxBackupsTotalSizeMB,
); err != nil {
c.logger.Error(
"Failed to clean exceeded backups for database",
"databaseId",
backupConfig.DatabaseID,
"error",
err,
)
continue
}
}
return nil
}
func (c *BackupCleaner) cleanExceededBackupsForDatabase(
databaseID uuid.UUID,
limitperDbMB int64,
) error {
for {
backupsTotalSizeMB, err := c.backupRepository.GetTotalSizeByDatabase(databaseID)
if err != nil {
return err
}
if backupsTotalSizeMB <= float64(limitperDbMB) {
break
}
oldestBackups, err := c.backupRepository.FindOldestByDatabaseExcludingInProgress(
databaseID,
1,
)
if err != nil {
return err
}
if len(oldestBackups) == 0 {
c.logger.Warn(
"No backups to delete but still over limit",
"databaseId",
databaseID,
"totalSizeMB",
backupsTotalSizeMB,
"limitMB",
limitperDbMB,
)
break
}
backup := oldestBackups[0]
if err := c.DeleteBackup(backup); err != nil {
c.logger.Error(
"Failed to delete exceeded backup",
"backupId",
backup.ID,
"databaseId",
databaseID,
"error",
err,
)
return err
}
c.logger.Info(
"Deleted exceeded backup",
"backupId",
backup.ID,
"databaseId",
databaseID,
"backupSizeMB",
backup.BackupSizeMb,
"totalSizeMB",
backupsTotalSizeMB,
"limitMB",
limitperDbMB,
)
}
return nil
}

View File

@@ -0,0 +1,491 @@
package backuping
import (
"testing"
"time"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/intervals"
"databasus-backend/internal/features/notifiers"
"databasus-backend/internal/features/storages"
users_enums "databasus-backend/internal/features/users/enums"
users_testing "databasus-backend/internal/features/users/testing"
workspaces_testing "databasus-backend/internal/features/workspaces/testing"
"databasus-backend/internal/storage"
"databasus-backend/internal/util/period"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
func Test_CleanOldBackups_DeletesBackupsOlderThanStorePeriod(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
// Create backup interval
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
// Create backups with different ages
now := time.Now().UTC()
oldBackup1 := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-10 * 24 * time.Hour), // 10 days old
}
oldBackup2 := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-8 * 24 * time.Hour), // 8 days old
}
recentBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-3 * 24 * time.Hour), // 3 days old
}
err = backupRepository.Save(oldBackup1)
assert.NoError(t, err)
err = backupRepository.Save(oldBackup2)
assert.NoError(t, err)
err = backupRepository.Save(recentBackup)
assert.NoError(t, err)
// Run cleanup
cleaner := GetBackupCleaner()
err = cleaner.cleanOldBackups()
assert.NoError(t, err)
// Verify old backups deleted, recent backup remains
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 1, len(remainingBackups))
assert.Equal(t, recentBackup.ID, remainingBackups[0].ID)
}
func Test_CleanOldBackups_SkipsDatabaseWithForeverStorePeriod(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
// Create backup interval
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodForever,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
// Create very old backup
oldBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: time.Now().UTC().Add(-365 * 24 * time.Hour), // 1 year old
}
err = backupRepository.Save(oldBackup)
assert.NoError(t, err)
// Run cleanup
cleaner := GetBackupCleaner()
err = cleaner.cleanOldBackups()
assert.NoError(t, err)
// Verify backup still exists
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 1, len(remainingBackups))
assert.Equal(t, oldBackup.ID, remainingBackups[0].ID)
}
func Test_CleanExceededBackups_WhenUnderLimit_NoBackupsDeleted(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
// Create backup interval
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodForever,
StorageID: &storage.ID,
MaxBackupsTotalSizeMB: 100, // 100 MB limit
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
// Create 3 backups totaling 50MB (under limit)
for i := 0; i < 3; i++ {
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 16.67,
CreatedAt: time.Now().UTC().Add(-time.Duration(i) * time.Hour),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
}
// Run cleanup
cleaner := GetBackupCleaner()
err = cleaner.cleanExceededBackups()
assert.NoError(t, err)
// Verify all backups remain
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 3, len(remainingBackups))
}
func Test_CleanExceededBackups_WhenOverLimit_DeletesOldestBackups(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
// Create backup interval
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodForever,
StorageID: &storage.ID,
MaxBackupsTotalSizeMB: 30, // 30 MB limit
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
// Create 5 backups of 10MB each (total 50MB, over 30MB limit)
now := time.Now().UTC()
var backupIDs []uuid.UUID
for i := 0; i < 5; i++ {
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-time.Duration(4-i) * time.Hour), // Oldest first
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
backupIDs = append(backupIDs, backup.ID)
}
// Run cleanup
cleaner := GetBackupCleaner()
err = cleaner.cleanExceededBackups()
assert.NoError(t, err)
// Verify 2 oldest backups deleted, 3 newest remain
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 3, len(remainingBackups))
// Check that the newest 3 backups remain
remainingIDs := make(map[uuid.UUID]bool)
for _, backup := range remainingBackups {
remainingIDs[backup.ID] = true
}
assert.False(t, remainingIDs[backupIDs[0]]) // Oldest deleted
assert.False(t, remainingIDs[backupIDs[1]]) // 2nd oldest deleted
assert.True(t, remainingIDs[backupIDs[2]]) // 3rd remains
assert.True(t, remainingIDs[backupIDs[3]]) // 4th remains
assert.True(t, remainingIDs[backupIDs[4]]) // Newest remains
}
func Test_CleanExceededBackups_SkipsInProgressBackups(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
// Create backup interval
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodForever,
StorageID: &storage.ID,
MaxBackupsTotalSizeMB: 50, // 50 MB limit
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
now := time.Now().UTC()
// Create 3 completed backups of 30MB each
completedBackups := make([]*backups_core.Backup, 3)
for i := 0; i < 3; i++ {
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 30,
CreatedAt: now.Add(-time.Duration(3-i) * time.Hour),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
completedBackups[i] = backup
}
// Create 1 in-progress backup (should be excluded from size calculation and deletion)
inProgressBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusInProgress,
BackupSizeMb: 10,
CreatedAt: now,
}
err = backupRepository.Save(inProgressBackup)
assert.NoError(t, err)
// Run cleanup
cleaner := GetBackupCleaner()
err = cleaner.cleanExceededBackups()
assert.NoError(t, err)
// Verify: only completed backups deleted, in-progress remains
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
// Should have in-progress + 1 completed (total 40MB completed + 10MB in-progress)
assert.GreaterOrEqual(t, len(remainingBackups), 2)
// Verify in-progress backup still exists
var inProgressFound bool
for _, backup := range remainingBackups {
if backup.ID == inProgressBackup.ID {
inProgressFound = true
assert.Equal(t, backups_core.BackupStatusInProgress, backup.Status)
}
}
assert.True(t, inProgressFound, "In-progress backup should not be deleted")
}
func Test_CleanExceededBackups_WithZeroLimit_SkipsDatabase(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
// Create backup interval
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodForever,
StorageID: &storage.ID,
MaxBackupsTotalSizeMB: 0, // No size limit
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
// Create large backups
for i := 0; i < 10; i++ {
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 100,
CreatedAt: time.Now().UTC().Add(-time.Duration(i) * time.Hour),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
}
// Run cleanup
cleaner := GetBackupCleaner()
err = cleaner.cleanExceededBackups()
assert.NoError(t, err)
// Verify all backups remain
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 10, len(remainingBackups))
}
func Test_GetTotalSizeByDatabase_CalculatesCorrectly(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
// Create completed backups
completedBackup1 := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10.5,
CreatedAt: time.Now().UTC(),
}
completedBackup2 := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 20.3,
CreatedAt: time.Now().UTC(),
}
// Create failed backup (should be included)
failedBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusFailed,
BackupSizeMb: 5.2,
CreatedAt: time.Now().UTC(),
}
// Create in-progress backup (should be excluded)
inProgressBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusInProgress,
BackupSizeMb: 100,
CreatedAt: time.Now().UTC(),
}
err := backupRepository.Save(completedBackup1)
assert.NoError(t, err)
err = backupRepository.Save(completedBackup2)
assert.NoError(t, err)
err = backupRepository.Save(failedBackup)
assert.NoError(t, err)
err = backupRepository.Save(inProgressBackup)
assert.NoError(t, err)
// Calculate total size
totalSize, err := backupRepository.GetTotalSizeByDatabase(database.ID)
assert.NoError(t, err)
// Should be 10.5 + 20.3 + 5.2 = 36.0 (excluding in-progress 100)
assert.InDelta(t, 36.0, totalSize, 0.1)
}
// Mock listener for testing
type mockBackupRemoveListener struct {
onBeforeBackupRemove func(*backups_core.Backup) error
}
func (m *mockBackupRemoveListener) OnBeforeBackupRemove(backup *backups_core.Backup) error {
if m.onBeforeBackupRemove != nil {
return m.onBeforeBackupRemove(backup)
}
return nil
}
// Test_DeleteBackup_WhenStorageDeleteFails_BackupStillRemovedFromDatabase verifies resilience
// when storage becomes unavailable. Even if storage.DeleteFile fails (e.g., storage is offline,
// credentials changed, or storage was deleted), the backup record should still be removed from
// the database. This prevents orphaned backup records when storage is no longer accessible.
func Test_DeleteBackup_WhenStorageDeleteFails_BackupStillRemovedFromDatabase(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
testStorage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, testStorage, notifier)
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: testStorage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: time.Now().UTC(),
}
err := backupRepository.Save(backup)
assert.NoError(t, err)
cleaner := GetBackupCleaner()
err = cleaner.DeleteBackup(backup)
assert.NoError(t, err, "DeleteBackup should succeed even when storage file doesn't exist")
deletedBackup, err := backupRepository.FindByID(backup.ID)
assert.Error(t, err, "Backup should not exist in database")
assert.Nil(t, deletedBackup)
}
func createTestInterval() *intervals.Interval {
timeOfDay := "04:00"
interval := &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
}
err := storage.GetDb().Create(interval).Error
if err != nil {
panic(err)
}
return interval
}

View File

@@ -24,6 +24,17 @@ var backupRepository = &backups_core.BackupRepository{}
var taskCancelManager = tasks_cancellation.GetTaskCancelManager()
var backupCleaner = &BackupCleaner{
backupRepository: backupRepository,
storageService: storages.GetStorageService(),
backupConfigService: backups_config.GetBackupConfigService(),
fieldEncryptor: encryption.GetFieldEncryptor(),
logger: logger.GetLogger(),
backupRemoveListeners: []backups_core.BackupRemoveListener{},
runOnce: sync.Once{},
hasRun: atomic.Bool{},
}
var backupNodesRegistry = &BackupNodesRegistry{
client: cache_utils.GetValkeyClient(),
logger: logger.GetLogger(),
@@ -59,7 +70,6 @@ var backuperNode = &BackuperNode{
var backupsScheduler = &BackupsScheduler{
backupRepository: backupRepository,
backupConfigService: backups_config.GetBackupConfigService(),
storageService: storages.GetStorageService(),
taskCancelManager: taskCancelManager,
backupNodesRegistry: backupNodesRegistry,
lastBackupTime: time.Now().UTC(),
@@ -81,3 +91,7 @@ func GetBackuperNode() *BackuperNode {
func GetBackupNodesRegistry() *BackupNodesRegistry {
return backupNodesRegistry
}
func GetBackupCleaner() *BackupCleaner {
return backupCleaner
}

View File

@@ -1,8 +1,18 @@
package backuping
import (
"databasus-backend/internal/features/notifiers"
"context"
"errors"
"sync/atomic"
"time"
common "databasus-backend/internal/features/backups/backups/common"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/notifiers"
"databasus-backend/internal/features/storages"
"github.com/google/uuid"
"github.com/stretchr/testify/mock"
)
@@ -17,3 +27,168 @@ func (m *MockNotificationSender) SendNotification(
) {
m.Called(notifier, title, message)
}
type CreateFailedBackupUsecase struct{}
func (uc *CreateFailedBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*common.BackupMetadata, error) {
backupProgressListener(10)
return nil, errors.New("backup failed")
}
type CreateSuccessBackupUsecase struct{}
func (uc *CreateSuccessBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*common.BackupMetadata, error) {
backupProgressListener(10)
return &common.BackupMetadata{
EncryptionSalt: nil,
EncryptionIV: nil,
Encryption: backups_config.BackupEncryptionNone,
}, nil
}
// CreateLargeBackupUsecase simulates a large backup (10000 MB)
type CreateLargeBackupUsecase struct{}
func (uc *CreateLargeBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*common.BackupMetadata, error) {
backupProgressListener(10000)
return &common.BackupMetadata{
EncryptionSalt: nil,
EncryptionIV: nil,
Encryption: backups_config.BackupEncryptionNone,
}, nil
}
// CreateProgressiveBackupUsecase simulates progressive size updates that exceed limit
type CreateProgressiveBackupUsecase struct{}
func (uc *CreateProgressiveBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*common.BackupMetadata, error) {
// Simulate progressive backup that grows beyond limit
backupProgressListener(1)
if ctx.Err() != nil {
return nil, ctx.Err()
}
backupProgressListener(3)
if ctx.Err() != nil {
return nil, ctx.Err()
}
backupProgressListener(5)
if ctx.Err() != nil {
return nil, ctx.Err()
}
backupProgressListener(10) // This exceeds the 5 MB limit
if ctx.Err() != nil {
return nil, ctx.Err()
}
// Should not reach here due to cancellation
return &common.BackupMetadata{
EncryptionSalt: nil,
EncryptionIV: nil,
Encryption: backups_config.BackupEncryptionNone,
}, nil
}
// CreateMediumBackupUsecase simulates a 50 MB backup
type CreateMediumBackupUsecase struct{}
func (uc *CreateMediumBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*common.BackupMetadata, error) {
backupProgressListener(50)
return &common.BackupMetadata{
EncryptionSalt: nil,
EncryptionIV: nil,
Encryption: backups_config.BackupEncryptionNone,
}, nil
}
// MockTrackingBackupUsecase tracks backup use case calls for testing parallel execution
type MockTrackingBackupUsecase struct {
callCount atomic.Int32
calledBackupIDs chan uuid.UUID
}
func NewMockTrackingBackupUsecase() *MockTrackingBackupUsecase {
return &MockTrackingBackupUsecase{
calledBackupIDs: make(chan uuid.UUID, 10),
}
}
func (m *MockTrackingBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*common.BackupMetadata, error) {
m.callCount.Add(1)
// Send backup ID to channel (non-blocking)
select {
case m.calledBackupIDs <- backupID:
default:
}
// Simulate backup work
time.Sleep(100 * time.Millisecond)
backupProgressListener(10)
return &common.BackupMetadata{
EncryptionSalt: nil,
EncryptionIV: nil,
Encryption: backups_config.BackupEncryptionNone,
}, nil
}
func (m *MockTrackingBackupUsecase) GetCallCount() int32 {
return m.callCount.Load()
}
func (m *MockTrackingBackupUsecase) GetCalledBackupIDs() []uuid.UUID {
ids := []uuid.UUID{}
for {
select {
case id := <-m.calledBackupIDs:
ids = append(ids, id)
default:
return ids
}
}
}

View File

@@ -13,10 +13,7 @@ import (
"databasus-backend/internal/config"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/storages"
task_cancellation "databasus-backend/internal/features/tasks/cancellation"
"databasus-backend/internal/util/encryption"
"databasus-backend/internal/util/period"
)
const (
@@ -28,7 +25,6 @@ const (
type BackupsScheduler struct {
backupRepository *backups_core.BackupRepository
backupConfigService *backups_config.BackupConfigService
storageService *storages.StorageService
taskCancelManager *task_cancellation.TaskCancelManager
backupNodesRegistry *BackupNodesRegistry
@@ -84,10 +80,6 @@ func (s *BackupsScheduler) Run(ctx context.Context) {
case <-ctx.Done():
return
case <-ticker.C:
if err := s.cleanOldBackups(); err != nil {
s.logger.Error("Failed to clean old backups", "error", err)
}
if err := s.checkDeadNodesAndFailBackups(); err != nil {
s.logger.Error("Failed to check dead nodes and fail backups", "error", err)
}
@@ -166,6 +158,33 @@ func (s *BackupsScheduler) StartBackup(databaseID uuid.UUID, isCallNotifier bool
return
}
// Check for existing in-progress backups
inProgressBackups, err := s.backupRepository.FindByDatabaseIdAndStatus(
databaseID,
backups_core.BackupStatusInProgress,
)
if err != nil {
s.logger.Error(
"Failed to check for in-progress backups",
"databaseId",
databaseID,
"error",
err,
)
return
}
if len(inProgressBackups) > 0 {
s.logger.Warn(
"Backup already in progress for database, skipping new backup",
"databaseId",
databaseID,
"existingBackupId",
inProgressBackups[0].ID,
)
return
}
leastBusyNodeID, err := s.calculateLeastBusyNode()
if err != nil {
s.logger.Error(
@@ -266,6 +285,10 @@ func (s *BackupsScheduler) GetRemainedBackupTryCount(lastBackup *backups_core.Ba
return 0
}
if lastBackup.IsSkipRetry {
return 0
}
backupConfig, err := s.backupConfigService.GetBackupConfigByDbId(lastBackup.DatabaseID)
if err != nil {
s.logger.Error("Failed to get backup config by database ID", "error", err)
@@ -298,74 +321,6 @@ func (s *BackupsScheduler) GetRemainedBackupTryCount(lastBackup *backups_core.Ba
return maxFailedTriesCount - len(lastFailedBackups)
}
func (s *BackupsScheduler) cleanOldBackups() error {
enabledBackupConfigs, err := s.backupConfigService.GetBackupConfigsWithEnabledBackups()
if err != nil {
return err
}
for _, backupConfig := range enabledBackupConfigs {
backupStorePeriod := backupConfig.StorePeriod
if backupStorePeriod == period.PeriodForever {
continue
}
storeDuration := backupStorePeriod.ToDuration()
dateBeforeBackupsShouldBeDeleted := time.Now().UTC().Add(-storeDuration)
oldBackups, err := s.backupRepository.FindBackupsBeforeDate(
backupConfig.DatabaseID,
dateBeforeBackupsShouldBeDeleted,
)
if err != nil {
s.logger.Error(
"Failed to find old backups for database",
"databaseId",
backupConfig.DatabaseID,
"error",
err,
)
continue
}
for _, backup := range oldBackups {
storage, err := s.storageService.GetStorageByID(backup.StorageID)
if err != nil {
s.logger.Error(
"Failed to get storage by ID",
"storageId",
backup.StorageID,
"error",
err,
)
continue
}
encryptor := encryption.GetFieldEncryptor()
err = storage.DeleteFile(encryptor, backup.ID)
if err != nil {
s.logger.Error("Failed to delete backup file", "backupId", backup.ID, "error", err)
}
if err := s.backupRepository.DeleteByID(backup.ID); err != nil {
s.logger.Error("Failed to delete old backup", "backupId", backup.ID, "error", err)
continue
}
s.logger.Info(
"Deleted old backup",
"backupId",
backup.ID,
"databaseId",
backupConfig.DatabaseID,
)
}
}
return nil
}
func (s *BackupsScheduler) runPendingBackups() error {
enabledBackupConfigs, err := s.backupConfigService.GetBackupConfigsWithEnabledBackups()
if err != nil {

View File

@@ -1033,3 +1033,289 @@ func Test_StartBackup_WhenBackupFails_DecrementsActiveTaskCount(t *testing.T) {
time.Sleep(200 * time.Millisecond)
}
func Test_StartBackup_WhenBackupAlreadyInProgress_SkipsNewBackup(t *testing.T) {
cache_utils.ClearAllCache()
backuperNode := CreateTestBackuperNode()
cancel := StartBackuperNodeForTest(t, backuperNode)
defer StopBackuperNodeForTest(t, cancel, backuperNode)
user := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
router := CreateTestRouter()
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", user, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
storages.RemoveTestStorage(storage.ID)
notifiers.RemoveTestNotifier(notifier)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
backupConfig, err := backups_config.GetBackupConfigService().GetBackupConfigByDbId(database.ID)
assert.NoError(t, err)
timeOfDay := "04:00"
backupConfig.BackupInterval = &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = true
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
_, err = backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
// Create an in-progress backup manually
inProgressBackup := &backups_core.Backup{
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusInProgress,
BackupSizeMb: 0,
CreatedAt: time.Now().UTC(),
}
err = backupRepository.Save(inProgressBackup)
assert.NoError(t, err)
// Try to start a new backup - should be skipped
GetBackupsScheduler().StartBackup(database.ID, false)
time.Sleep(200 * time.Millisecond)
// Verify only 1 backup exists (the original in-progress one)
backups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Len(t, backups, 1)
assert.Equal(t, backups_core.BackupStatusInProgress, backups[0].Status)
assert.Equal(t, inProgressBackup.ID, backups[0].ID)
time.Sleep(200 * time.Millisecond)
}
func Test_RunPendingBackups_WhenLastBackupFailedWithIsSkipRetry_SkipsBackupEvenWithRetriesEnabled(
t *testing.T,
) {
cache_utils.ClearAllCache()
backuperNode := CreateTestBackuperNode()
cancel := StartBackuperNodeForTest(t, backuperNode)
defer StopBackuperNodeForTest(t, cancel, backuperNode)
user := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
router := CreateTestRouter()
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", user, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
storages.RemoveTestStorage(storage.ID)
notifiers.RemoveTestNotifier(notifier)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
// Enable backups with retries enabled and high retry count
backupConfig, err := backups_config.GetBackupConfigService().GetBackupConfigByDbId(database.ID)
assert.NoError(t, err)
timeOfDay := "04:00"
backupConfig.BackupInterval = &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = true
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
backupConfig.IsRetryIfFailed = true
backupConfig.MaxFailedTriesCount = 5
_, err = backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
// Create a failed backup with IsSkipRetry set to true
failMessage := "backup failed due to size limit exceeded"
backupRepository.Save(&backups_core.Backup{
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusFailed,
FailMessage: &failMessage,
IsSkipRetry: true,
CreatedAt: time.Now().UTC().Add(-1 * time.Hour),
})
// Verify GetRemainedBackupTryCount returns 0 even though retries are enabled
lastBackup, err := backupRepository.FindLastByDatabaseID(database.ID)
assert.NoError(t, err)
assert.NotNil(t, lastBackup)
remainedTries := GetBackupsScheduler().GetRemainedBackupTryCount(lastBackup)
assert.Equal(t, 0, remainedTries, "Should return 0 tries when IsSkipRetry is true")
// Run the scheduler
GetBackupsScheduler().runPendingBackups()
time.Sleep(100 * time.Millisecond)
// Verify no new backup was created (still only 1 backup exists)
backups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Len(t, backups, 1, "No retry should be attempted when IsSkipRetry is true")
time.Sleep(200 * time.Millisecond)
}
func Test_StartBackup_When2BackupsStartedForDifferentDatabases_BothUseCasesAreCalled(t *testing.T) {
cache_utils.ClearAllCache()
// Create mock tracking use case
mockUseCase := NewMockTrackingBackupUsecase()
// Create BackuperNode with mock use case
backuperNode := CreateTestBackuperNodeWithUseCase(mockUseCase)
cancel := StartBackuperNodeForTest(t, backuperNode)
defer StopBackuperNodeForTest(t, cancel, backuperNode)
// Create scheduler
scheduler := CreateTestScheduler()
schedulerCancel := StartSchedulerForTest(t, scheduler)
defer schedulerCancel()
// Setup test data
user := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
router := CreateTestRouter()
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", user, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
// Create 2 separate databases
database1 := databases.CreateTestDatabase(workspace.ID, storage, notifier)
database2 := databases.CreateTestDatabase(workspace.ID, storage, notifier)
defer func() {
// Cleanup backups for database1
backups1, _ := backupRepository.FindByDatabaseID(database1.ID)
for _, backup := range backups1 {
backupRepository.DeleteByID(backup.ID)
}
// Cleanup backups for database2
backups2, _ := backupRepository.FindByDatabaseID(database2.ID)
for _, backup := range backups2 {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database1)
databases.RemoveTestDatabase(database2)
time.Sleep(50 * time.Millisecond)
storages.RemoveTestStorage(storage.ID)
notifiers.RemoveTestNotifier(notifier)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
// Enable backups for database1
backupConfig1, err := backups_config.GetBackupConfigService().
GetBackupConfigByDbId(database1.ID)
assert.NoError(t, err)
timeOfDay := "04:00"
backupConfig1.BackupInterval = &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
}
backupConfig1.IsBackupsEnabled = true
backupConfig1.StorePeriod = period.PeriodWeek
backupConfig1.Storage = storage
backupConfig1.StorageID = &storage.ID
_, err = backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig1)
assert.NoError(t, err)
// Enable backups for database2
backupConfig2, err := backups_config.GetBackupConfigService().
GetBackupConfigByDbId(database2.ID)
assert.NoError(t, err)
backupConfig2.BackupInterval = &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
}
backupConfig2.IsBackupsEnabled = true
backupConfig2.StorePeriod = period.PeriodWeek
backupConfig2.Storage = storage
backupConfig2.StorageID = &storage.ID
_, err = backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig2)
assert.NoError(t, err)
// Start 2 backups simultaneously
t.Log("Starting backup for database1")
scheduler.StartBackup(database1.ID, false)
t.Log("Starting backup for database2")
scheduler.StartBackup(database2.ID, false)
// Wait up to 10 seconds for both backups to complete
t.Log("Waiting for both backups to complete...")
success := assert.Eventually(t, func() bool {
callCount := mockUseCase.GetCallCount()
t.Logf("Current call count: %d/2", callCount)
return callCount == 2
}, 10*time.Second, 200*time.Millisecond, "Both use cases should be called within 10 seconds")
if !success {
t.Logf("Test failed: Only %d out of 2 use cases were called", mockUseCase.GetCallCount())
}
// Verify both backup IDs were received
calledBackupIDs := mockUseCase.GetCalledBackupIDs()
t.Logf("Called backup IDs: %v", calledBackupIDs)
assert.Len(t, calledBackupIDs, 2, "Both backup IDs should be tracked")
// Verify both backups exist in repository and are completed
backups1, err := backupRepository.FindByDatabaseID(database1.ID)
assert.NoError(t, err)
assert.Len(t, backups1, 1, "Database1 should have 1 backup")
if len(backups1) > 0 {
t.Logf("Database1 backup status: %s", backups1[0].Status)
}
backups2, err := backupRepository.FindByDatabaseID(database2.ID)
assert.NoError(t, err)
assert.Len(t, backups2, 1, "Database2 should have 1 backup")
if len(backups2) > 0 {
t.Logf("Database2 backup status: %s", backups2[0].Status)
}
// Verify both backups completed successfully
if len(backups1) > 0 {
assert.Equal(t, backups_core.BackupStatusCompleted, backups1[0].Status,
"Database1 backup should be completed")
}
if len(backups2) > 0 {
assert.Equal(t, backups_core.BackupStatusCompleted, backups2[0].Status,
"Database2 backup should be completed")
}
time.Sleep(200 * time.Millisecond)
}

View File

@@ -55,19 +55,38 @@ func CreateTestBackuperNode() *BackuperNode {
}
}
func CreateTestBackuperNodeWithUseCase(useCase backups_core.CreateBackupUsecase) *BackuperNode {
return &BackuperNode{
databaseService: databases.GetDatabaseService(),
fieldEncryptor: encryption.GetFieldEncryptor(),
workspaceService: workspaces_services.GetWorkspaceService(),
backupRepository: backupRepository,
backupConfigService: backups_config.GetBackupConfigService(),
storageService: storages.GetStorageService(),
notificationSender: notifiers.GetNotifierService(),
backupCancelManager: taskCancelManager,
backupNodesRegistry: backupNodesRegistry,
logger: logger.GetLogger(),
createBackupUseCase: useCase,
nodeID: uuid.New(),
lastHeartbeat: time.Time{},
runOnce: sync.Once{},
hasRun: atomic.Bool{},
}
}
func CreateTestScheduler() *BackupsScheduler {
return &BackupsScheduler{
backupRepository,
backups_config.GetBackupConfigService(),
storages.GetStorageService(),
taskCancelManager,
backupNodesRegistry,
time.Now().UTC(),
logger.GetLogger(),
make(map[uuid.UUID]BackupToNodeRelation),
CreateTestBackuperNode(),
sync.Once{},
atomic.Bool{},
backupRepository: backupRepository,
backupConfigService: backups_config.GetBackupConfigService(),
taskCancelManager: taskCancelManager,
backupNodesRegistry: backupNodesRegistry,
lastBackupTime: time.Now().UTC(),
logger: logger.GetLogger(),
backupToNodeRelations: make(map[uuid.UUID]BackupToNodeRelation),
backuperNode: CreateTestBackuperNode(),
runOnce: sync.Once{},
hasRun: atomic.Bool{},
}
}

View File

@@ -15,6 +15,7 @@ type Backup struct {
Status BackupStatus `json:"status" gorm:"column:status;not null"`
FailMessage *string `json:"failMessage" gorm:"column:fail_message"`
IsSkipRetry bool `json:"isSkipRetry" gorm:"column:is_skip_retry;type:boolean;not null"`
BackupSizeMb float64 `json:"backupSizeMb" gorm:"column:backup_size_mb;default:0"`

View File

@@ -212,3 +212,36 @@ func (r *BackupRepository) CountByDatabaseID(databaseID uuid.UUID) (int64, error
return count, nil
}
func (r *BackupRepository) GetTotalSizeByDatabase(databaseID uuid.UUID) (float64, error) {
var totalSize float64
if err := storage.
GetDb().
Model(&Backup{}).
Select("COALESCE(SUM(backup_size_mb), 0)").
Where("database_id = ? AND status != ?", databaseID, BackupStatusInProgress).
Scan(&totalSize).Error; err != nil {
return 0, err
}
return totalSize, nil
}
func (r *BackupRepository) FindOldestByDatabaseExcludingInProgress(
databaseID uuid.UUID,
limit int,
) ([]*Backup, error) {
var backups []*Backup
if err := storage.
GetDb().
Where("database_id = ? AND status != ?", databaseID, BackupStatusInProgress).
Order("created_at ASC").
Limit(limit).
Find(&backups).Error; err != nil {
return nil, err
}
return backups, nil
}

View File

@@ -25,22 +25,23 @@ var backupRepository = &backups_core.BackupRepository{}
var taskCancelManager = task_cancellation.GetTaskCancelManager()
var backupService = &BackupService{
databaseService: databases.GetDatabaseService(),
storageService: storages.GetStorageService(),
backupRepository: backupRepository,
notifierService: notifiers.GetNotifierService(),
notificationSender: notifiers.GetNotifierService(),
backupConfigService: backups_config.GetBackupConfigService(),
secretKeyService: encryption_secrets.GetSecretKeyService(),
fieldEncryptor: encryption.GetFieldEncryptor(),
createBackupUseCase: usecases.GetCreateBackupUsecase(),
logger: logger.GetLogger(),
backupRemoveListeners: []backups_core.BackupRemoveListener{},
workspaceService: workspaces_services.GetWorkspaceService(),
auditLogService: audit_logs.GetAuditLogService(),
taskCancelManager: taskCancelManager,
downloadTokenService: backups_download.GetDownloadTokenService(),
backupSchedulerService: backuping.GetBackupsScheduler(),
databases.GetDatabaseService(),
storages.GetStorageService(),
backupRepository,
notifiers.GetNotifierService(),
notifiers.GetNotifierService(),
backups_config.GetBackupConfigService(),
encryption_secrets.GetSecretKeyService(),
encryption.GetFieldEncryptor(),
usecases.GetCreateBackupUsecase(),
logger.GetLogger(),
[]backups_core.BackupRemoveListener{},
workspaces_services.GetWorkspaceService(),
audit_logs.GetAuditLogService(),
taskCancelManager,
backups_download.GetDownloadTokenService(),
backuping.GetBackupsScheduler(),
backuping.GetBackupCleaner(),
}
var backupController = &BackupController{

View File

@@ -46,6 +46,7 @@ type BackupService struct {
taskCancelManager *task_cancellation.TaskCancelManager
downloadTokenService *backups_download.DownloadTokenService
backupSchedulerService *backuping.BackupsScheduler
backupCleaner *backuping.BackupCleaner
}
func (s *BackupService) AddBackupRemoveListener(listener backups_core.BackupRemoveListener) {
@@ -189,7 +190,7 @@ func (s *BackupService) DeleteBackup(
database.WorkspaceID,
)
return s.deleteBackup(backup)
return s.backupCleaner.DeleteBackup(backup)
}
func (s *BackupService) GetBackup(backupID uuid.UUID) (*backups_core.Backup, error) {
@@ -292,29 +293,6 @@ func (s *BackupService) GetBackupFile(
return reader, backup, database, nil
}
func (s *BackupService) deleteBackup(backup *backups_core.Backup) error {
for _, listener := range s.backupRemoveListeners {
if err := listener.OnBeforeBackupRemove(backup); err != nil {
return err
}
}
storage, err := s.storageService.GetStorageByID(backup.StorageID)
if err != nil {
return err
}
err = storage.DeleteFile(s.fieldEncryptor, backup.ID)
if err != nil {
// we do not return error here, because sometimes clean up performed
// before unavailable storage removal or change - therefore we should
// proceed even in case of error
s.logger.Error("Failed to delete backup file", "error", err)
}
return s.backupRepository.DeleteByID(backup.ID)
}
func (s *BackupService) deleteDbBackups(databaseID uuid.UUID) error {
dbBackupsInProgress, err := s.backupRepository.FindByDatabaseIdAndStatus(
databaseID,
@@ -336,7 +314,7 @@ func (s *BackupService) deleteDbBackups(databaseID uuid.UUID) error {
}
for _, dbBackup := range dbBackups {
err := s.deleteBackup(dbBackup)
err := s.backupCleaner.DeleteBackup(dbBackup)
if err != nil {
return err
}

View File

@@ -16,6 +16,7 @@ type BackupConfigController struct {
func (c *BackupConfigController) RegisterRoutes(router *gin.RouterGroup) {
router.POST("/backup-configs/save", c.SaveBackupConfig)
router.GET("/backup-configs/database/:id/plan", c.GetDatabasePlan)
router.GET("/backup-configs/database/:id", c.GetBackupConfigByDbID)
router.GET("/backup-configs/storage/:id/is-using", c.IsStorageUsing)
router.GET("/backup-configs/storage/:id/databases-count", c.CountDatabasesForStorage)
@@ -92,6 +93,39 @@ func (c *BackupConfigController) GetBackupConfigByDbID(ctx *gin.Context) {
ctx.JSON(http.StatusOK, backupConfig)
}
// GetDatabasePlan
// @Summary Get database plan by database ID
// @Description Get the plan limits for a specific database (max backup size, max total size, max storage period)
// @Tags backup-configs
// @Produce json
// @Param id path string true "Database ID"
// @Success 200 {object} plans.DatabasePlan
// @Failure 400 {object} map[string]string "Invalid database ID"
// @Failure 401 {object} map[string]string "User not authenticated"
// @Failure 404 {object} map[string]string "Database not found or access denied"
// @Router /backup-configs/database/{id}/plan [get]
func (c *BackupConfigController) GetDatabasePlan(ctx *gin.Context) {
user, ok := users_middleware.GetUserFromContext(ctx)
if !ok {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "User not authenticated"})
return
}
id, err := uuid.Parse(ctx.Param("id"))
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "invalid database ID"})
return
}
plan, err := c.backupConfigService.GetDatabasePlan(user, id)
if err != nil {
ctx.JSON(http.StatusNotFound, gin.H{"error": "database plan not found"})
return
}
ctx.JSON(http.StatusOK, plan)
}
// IsStorageUsing
// @Summary Check if storage is being used
// @Description Check if a storage is currently being used by any backup configuration

View File

@@ -16,11 +16,14 @@ import (
"databasus-backend/internal/features/databases/databases/postgresql"
"databasus-backend/internal/features/intervals"
"databasus-backend/internal/features/notifiers"
plans "databasus-backend/internal/features/plan"
"databasus-backend/internal/features/storages"
local_storage "databasus-backend/internal/features/storages/models/local"
users_enums "databasus-backend/internal/features/users/enums"
users_testing "databasus-backend/internal/features/users/testing"
workspaces_controllers "databasus-backend/internal/features/workspaces/controllers"
workspaces_testing "databasus-backend/internal/features/workspaces/testing"
"databasus-backend/internal/storage"
"databasus-backend/internal/util/period"
test_utils "databasus-backend/internal/util/testing"
"databasus-backend/internal/util/tools"
@@ -300,14 +303,204 @@ func Test_GetBackupConfigByDbID_ReturnsDefaultConfigForNewDatabase(t *testing.T)
&response,
)
var plan plans.DatabasePlan
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
"/api/v1/backup-configs/database/"+database.ID.String()+"/plan",
"Bearer "+owner.Token,
http.StatusOK,
&plan,
)
assert.Equal(t, database.ID, response.DatabaseID)
assert.False(t, response.IsBackupsEnabled)
assert.Equal(t, period.PeriodWeek, response.StorePeriod)
assert.Equal(t, plan.MaxStoragePeriod, response.StorePeriod)
assert.Equal(t, plan.MaxBackupSizeMB, response.MaxBackupSizeMB)
assert.Equal(t, plan.MaxBackupsTotalSizeMB, response.MaxBackupsTotalSizeMB)
assert.True(t, response.IsRetryIfFailed)
assert.Equal(t, 3, response.MaxFailedTriesCount)
assert.NotNil(t, response.BackupInterval)
}
func Test_GetDatabasePlan_ForNewDatabase_PlanAlwaysReturned(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
database := createTestDatabaseViaAPI("Test Database", workspace.ID, owner.Token, router)
var response plans.DatabasePlan
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
"/api/v1/backup-configs/database/"+database.ID.String()+"/plan",
"Bearer "+owner.Token,
http.StatusOK,
&response,
)
assert.Equal(t, database.ID, response.DatabaseID)
assert.NotNil(t, response.MaxBackupSizeMB)
assert.NotNil(t, response.MaxBackupsTotalSizeMB)
assert.NotEmpty(t, response.MaxStoragePeriod)
}
func Test_SaveBackupConfig_WhenPlanLimitsAreAdjusted_ValidationEnforced(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
database := createTestDatabaseViaAPI("Test Database", workspace.ID, owner.Token, router)
// Get plan via API (triggers auto-creation)
var plan plans.DatabasePlan
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
"/api/v1/backup-configs/database/"+database.ID.String()+"/plan",
"Bearer "+owner.Token,
http.StatusOK,
&plan,
)
assert.Equal(t, database.ID, plan.DatabaseID)
// Adjust plan limits directly in database to fixed restrictive values
err := storage.GetDb().Model(&plans.DatabasePlan{}).
Where("database_id = ?", database.ID).
Updates(map[string]any{
"max_backup_size_mb": 100,
"max_backups_total_size_mb": 1000,
"max_storage_period": period.PeriodMonth,
}).Error
assert.NoError(t, err)
// Test 1: Try to save backup config with exceeded backup size limit
timeOfDay := "04:00"
backupConfigExceededSize := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
},
SendNotificationsOn: []BackupNotificationType{
NotificationBackupFailed,
},
IsRetryIfFailed: true,
MaxFailedTriesCount: 3,
Encryption: BackupEncryptionNone,
MaxBackupSizeMB: 200, // Exceeds limit of 100
MaxBackupsTotalSizeMB: 800,
}
respExceededSize := test_utils.MakePostRequest(
t,
router,
"/api/v1/backup-configs/save",
"Bearer "+owner.Token,
backupConfigExceededSize,
http.StatusBadRequest,
)
assert.Contains(t, string(respExceededSize.Body), "max backup size exceeds plan limit")
// Test 2: Try to save backup config with exceeded total size limit
backupConfigExceededTotal := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
},
SendNotificationsOn: []BackupNotificationType{
NotificationBackupFailed,
},
IsRetryIfFailed: true,
MaxFailedTriesCount: 3,
Encryption: BackupEncryptionNone,
MaxBackupSizeMB: 50,
MaxBackupsTotalSizeMB: 2000, // Exceeds limit of 1000
}
respExceededTotal := test_utils.MakePostRequest(
t,
router,
"/api/v1/backup-configs/save",
"Bearer "+owner.Token,
backupConfigExceededTotal,
http.StatusBadRequest,
)
assert.Contains(t, string(respExceededTotal.Body), "max total backups size exceeds plan limit")
// Test 3: Try to save backup config with exceeded storage period limit
backupConfigExceededPeriod := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodYear, // Exceeds limit of Month
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
},
SendNotificationsOn: []BackupNotificationType{
NotificationBackupFailed,
},
IsRetryIfFailed: true,
MaxFailedTriesCount: 3,
Encryption: BackupEncryptionNone,
MaxBackupSizeMB: 80,
MaxBackupsTotalSizeMB: 800,
}
respExceededPeriod := test_utils.MakePostRequest(
t,
router,
"/api/v1/backup-configs/save",
"Bearer "+owner.Token,
backupConfigExceededPeriod,
http.StatusBadRequest,
)
assert.Contains(t, string(respExceededPeriod.Body), "storage period exceeds plan limit")
// Test 4: Save backup config within all limits - should succeed
backupConfigValid := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek, // Within Month limit
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
},
SendNotificationsOn: []BackupNotificationType{
NotificationBackupFailed,
},
IsRetryIfFailed: true,
MaxFailedTriesCount: 3,
Encryption: BackupEncryptionNone,
MaxBackupSizeMB: 80, // Within 100 limit
MaxBackupsTotalSizeMB: 800, // Within 1000 limit
}
var responseValid BackupConfig
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/backup-configs/save",
"Bearer "+owner.Token,
backupConfigValid,
http.StatusOK,
&responseValid,
)
assert.Equal(t, database.ID, responseValid.DatabaseID)
assert.Equal(t, int64(80), responseValid.MaxBackupSizeMB)
assert.Equal(t, int64(800), responseValid.MaxBackupsTotalSizeMB)
assert.Equal(t, period.PeriodWeek, responseValid.StorePeriod)
}
func Test_IsStorageUsing_PermissionsEnforced(t *testing.T) {
tests := []struct {
name string
@@ -1443,6 +1636,110 @@ func Test_TransferDatabase_TargetStorageFromDifferentWorkspace_ReturnsBadRequest
assert.Contains(t, string(testResp.Body), "target storage does not belong to target workspace")
}
func Test_SaveBackupConfig_WithSystemStorage_CanBeUsedByAnyDatabase(t *testing.T) {
router := createTestRouterWithStorageForTransfer()
owner1 := users_testing.CreateTestUser(users_enums.UserRoleMember)
owner2 := users_testing.CreateTestUser(users_enums.UserRoleMember)
admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
workspaceA := workspaces_testing.CreateTestWorkspace("Workspace A", owner1, router)
workspaceB := workspaces_testing.CreateTestWorkspace("Workspace B", owner2, router)
databaseA := createTestDatabaseViaAPI("Database A", workspaceA.ID, owner1.Token, router)
// Test 1: Regular storage from workspace B cannot be used by database in workspace A
regularStorageB := createTestStorage(workspaceB.ID)
timeOfDay := "04:00"
backupConfigWithRegularStorage := BackupConfig{
DatabaseID: databaseA.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
},
StorageID: &regularStorageB.ID,
Storage: regularStorageB,
SendNotificationsOn: []BackupNotificationType{
NotificationBackupFailed,
},
IsRetryIfFailed: true,
MaxFailedTriesCount: 3,
Encryption: BackupEncryptionNone,
}
respRegular := test_utils.MakePostRequest(
t,
router,
"/api/v1/backup-configs/save",
"Bearer "+owner1.Token,
backupConfigWithRegularStorage,
http.StatusBadRequest,
)
assert.Contains(t, string(respRegular.Body), "storage does not belong to the same workspace")
// Test 2: System storage from workspace B CAN be used by database in workspace A
systemStorageB := &storages.Storage{
WorkspaceID: workspaceB.ID,
Type: storages.StorageTypeLocal,
Name: "Test System Storage " + uuid.New().String(),
IsSystem: true,
LocalStorage: &local_storage.LocalStorage{},
}
var savedSystemStorage storages.Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+admin.Token,
*systemStorageB,
http.StatusOK,
&savedSystemStorage,
)
assert.True(t, savedSystemStorage.IsSystem)
backupConfigWithSystemStorage := BackupConfig{
DatabaseID: databaseA.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
},
StorageID: &savedSystemStorage.ID,
Storage: &savedSystemStorage,
SendNotificationsOn: []BackupNotificationType{
NotificationBackupFailed,
},
IsRetryIfFailed: true,
MaxFailedTriesCount: 3,
Encryption: BackupEncryptionNone,
}
var savedConfig BackupConfig
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/backup-configs/save",
"Bearer "+owner1.Token,
backupConfigWithSystemStorage,
http.StatusOK,
&savedConfig,
)
assert.Equal(t, databaseA.ID, savedConfig.DatabaseID)
assert.NotNil(t, savedConfig.StorageID)
assert.Equal(t, savedSystemStorage.ID, *savedConfig.StorageID)
assert.True(t, savedConfig.IsBackupsEnabled)
storages.RemoveTestStorage(regularStorageB.ID)
}
func createTestDatabaseViaAPI(
name string,
workspaceID uuid.UUID,

View File

@@ -6,6 +6,7 @@ import (
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/notifiers"
plans "databasus-backend/internal/features/plan"
"databasus-backend/internal/features/storages"
workspaces_services "databasus-backend/internal/features/workspaces/services"
"databasus-backend/internal/util/logger"
@@ -18,6 +19,7 @@ var backupConfigService = &BackupConfigService{
storages.GetStorageService(),
notifiers.GetNotifierService(),
workspaces_services.GetWorkspaceService(),
plans.GetDatabasePlanService(),
nil,
}
var backupConfigController = &BackupConfigController{

View File

@@ -1,7 +1,9 @@
package backups_config
import (
"databasus-backend/internal/config"
"databasus-backend/internal/features/intervals"
plans "databasus-backend/internal/features/plan"
"databasus-backend/internal/features/storages"
"databasus-backend/internal/util/period"
"errors"
@@ -31,6 +33,11 @@ type BackupConfig struct {
MaxFailedTriesCount int `json:"maxFailedTriesCount" gorm:"column:max_failed_tries_count;type:int;not null"`
Encryption BackupEncryption `json:"encryption" gorm:"column:encryption;type:text;not null;default:'NONE'"`
// MaxBackupSizeMB limits individual backup size. 0 = unlimited.
MaxBackupSizeMB int64 `json:"maxBackupSizeMb" gorm:"column:max_backup_size_mb;type:int;not null"`
// MaxBackupsTotalSizeMB limits total size of all backups. 0 = unlimited.
MaxBackupsTotalSizeMB int64 `json:"maxBackupsTotalSizeMb" gorm:"column:max_backups_total_size_mb;type:int;not null"`
}
func (h *BackupConfig) TableName() string {
@@ -70,7 +77,7 @@ func (b *BackupConfig) AfterFind(tx *gorm.DB) error {
return nil
}
func (b *BackupConfig) Validate() error {
func (b *BackupConfig) Validate(plan *plans.DatabasePlan) error {
// Backup interval is required either as ID or as object
if b.BackupIntervalID == uuid.Nil && b.BackupInterval == nil {
return errors.New("backup interval is required")
@@ -89,20 +96,59 @@ func (b *BackupConfig) Validate() error {
return errors.New("encryption must be NONE or ENCRYPTED")
}
if config.GetEnv().IsCloud {
if b.Encryption != BackupEncryptionEncrypted {
return errors.New("encryption is mandatory for cloud storage")
}
}
if b.MaxBackupSizeMB < 0 {
return errors.New("max backup size must be non-negative")
}
if b.MaxBackupsTotalSizeMB < 0 {
return errors.New("max backups total size must be non-negative")
}
// Validate against plan limits
// Check storage period limit
if plan.MaxStoragePeriod != period.PeriodForever {
if b.StorePeriod.CompareTo(plan.MaxStoragePeriod) > 0 {
return errors.New("storage period exceeds plan limit")
}
}
// Check max backup size limit (0 in plan means unlimited)
if plan.MaxBackupSizeMB > 0 {
if b.MaxBackupSizeMB == 0 || b.MaxBackupSizeMB > plan.MaxBackupSizeMB {
return errors.New("max backup size exceeds plan limit")
}
}
// Check max total backups size limit (0 in plan means unlimited)
if plan.MaxBackupsTotalSizeMB > 0 {
if b.MaxBackupsTotalSizeMB == 0 ||
b.MaxBackupsTotalSizeMB > plan.MaxBackupsTotalSizeMB {
return errors.New("max total backups size exceeds plan limit")
}
}
return nil
}
func (b *BackupConfig) Copy(newDatabaseID uuid.UUID) *BackupConfig {
return &BackupConfig{
DatabaseID: newDatabaseID,
IsBackupsEnabled: b.IsBackupsEnabled,
StorePeriod: b.StorePeriod,
BackupIntervalID: uuid.Nil,
BackupInterval: b.BackupInterval.Copy(),
StorageID: b.StorageID,
SendNotificationsOn: b.SendNotificationsOn,
IsRetryIfFailed: b.IsRetryIfFailed,
MaxFailedTriesCount: b.MaxFailedTriesCount,
Encryption: b.Encryption,
DatabaseID: newDatabaseID,
IsBackupsEnabled: b.IsBackupsEnabled,
StorePeriod: b.StorePeriod,
BackupIntervalID: uuid.Nil,
BackupInterval: b.BackupInterval.Copy(),
StorageID: b.StorageID,
SendNotificationsOn: b.SendNotificationsOn,
IsRetryIfFailed: b.IsRetryIfFailed,
MaxFailedTriesCount: b.MaxFailedTriesCount,
Encryption: b.Encryption,
MaxBackupSizeMB: b.MaxBackupSizeMB,
MaxBackupsTotalSizeMB: b.MaxBackupsTotalSizeMB,
}
}

View File

@@ -0,0 +1,391 @@
package backups_config
import (
"testing"
"databasus-backend/internal/features/intervals"
plans "databasus-backend/internal/features/plan"
"databasus-backend/internal/util/period"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
func Test_Validate_WhenStoragePeriodIsWeekAndPlanAllowsMonth_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = period.PeriodWeek
plan := createUnlimitedPlan()
plan.MaxStoragePeriod = period.PeriodMonth
err := config.Validate(plan)
assert.NoError(t, err)
}
func Test_Validate_WhenStoragePeriodIsYearAndPlanAllowsMonth_ValidationFails(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = period.PeriodYear
plan := createUnlimitedPlan()
plan.MaxStoragePeriod = period.PeriodMonth
err := config.Validate(plan)
assert.EqualError(t, err, "storage period exceeds plan limit")
}
func Test_Validate_WhenStoragePeriodIsForeverAndPlanAllowsForever_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = period.PeriodForever
plan := createUnlimitedPlan()
plan.MaxStoragePeriod = period.PeriodForever
err := config.Validate(plan)
assert.NoError(t, err)
}
func Test_Validate_WhenStoragePeriodIsForeverAndPlanAllowsYear_ValidationFails(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = period.PeriodForever
plan := createUnlimitedPlan()
plan.MaxStoragePeriod = period.PeriodYear
err := config.Validate(plan)
assert.EqualError(t, err, "storage period exceeds plan limit")
}
func Test_Validate_WhenStoragePeriodEqualsExactPlanLimit_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = period.PeriodMonth
plan := createUnlimitedPlan()
plan.MaxStoragePeriod = period.PeriodMonth
err := config.Validate(plan)
assert.NoError(t, err)
}
func Test_Validate_WhenBackupSize100MBAndPlanAllows500MB_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.MaxBackupSizeMB = 100
plan := createUnlimitedPlan()
plan.MaxBackupSizeMB = 500
err := config.Validate(plan)
assert.NoError(t, err)
}
func Test_Validate_WhenBackupSize500MBAndPlanAllows100MB_ValidationFails(t *testing.T) {
config := createValidBackupConfig()
config.MaxBackupSizeMB = 500
plan := createUnlimitedPlan()
plan.MaxBackupSizeMB = 100
err := config.Validate(plan)
assert.EqualError(t, err, "max backup size exceeds plan limit")
}
func Test_Validate_WhenBackupSizeIsUnlimitedAndPlanAllowsUnlimited_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.MaxBackupSizeMB = 0
plan := createUnlimitedPlan()
plan.MaxBackupSizeMB = 0
err := config.Validate(plan)
assert.NoError(t, err)
}
func Test_Validate_WhenBackupSizeIsUnlimitedAndPlanHas500MBLimit_ValidationFails(t *testing.T) {
config := createValidBackupConfig()
config.MaxBackupSizeMB = 0
plan := createUnlimitedPlan()
plan.MaxBackupSizeMB = 500
err := config.Validate(plan)
assert.EqualError(t, err, "max backup size exceeds plan limit")
}
func Test_Validate_WhenBackupSizeEqualsExactPlanLimit_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.MaxBackupSizeMB = 500
plan := createUnlimitedPlan()
plan.MaxBackupSizeMB = 500
err := config.Validate(plan)
assert.NoError(t, err)
}
func Test_Validate_WhenTotalSize1GBAndPlanAllows5GB_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.MaxBackupsTotalSizeMB = 1000
plan := createUnlimitedPlan()
plan.MaxBackupsTotalSizeMB = 5000
err := config.Validate(plan)
assert.NoError(t, err)
}
func Test_Validate_WhenTotalSize5GBAndPlanAllows1GB_ValidationFails(t *testing.T) {
config := createValidBackupConfig()
config.MaxBackupsTotalSizeMB = 5000
plan := createUnlimitedPlan()
plan.MaxBackupsTotalSizeMB = 1000
err := config.Validate(plan)
assert.EqualError(t, err, "max total backups size exceeds plan limit")
}
func Test_Validate_WhenTotalSizeIsUnlimitedAndPlanAllowsUnlimited_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.MaxBackupsTotalSizeMB = 0
plan := createUnlimitedPlan()
plan.MaxBackupsTotalSizeMB = 0
err := config.Validate(plan)
assert.NoError(t, err)
}
func Test_Validate_WhenTotalSizeIsUnlimitedAndPlanHas1GBLimit_ValidationFails(t *testing.T) {
config := createValidBackupConfig()
config.MaxBackupsTotalSizeMB = 0
plan := createUnlimitedPlan()
plan.MaxBackupsTotalSizeMB = 1000
err := config.Validate(plan)
assert.EqualError(t, err, "max total backups size exceeds plan limit")
}
func Test_Validate_WhenTotalSizeEqualsExactPlanLimit_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.MaxBackupsTotalSizeMB = 5000
plan := createUnlimitedPlan()
plan.MaxBackupsTotalSizeMB = 5000
err := config.Validate(plan)
assert.NoError(t, err)
}
func Test_Validate_WhenAllLimitsAreUnlimitedInPlan_AnyConfigurationPasses(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = period.PeriodForever
config.MaxBackupSizeMB = 0
config.MaxBackupsTotalSizeMB = 0
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.NoError(t, err)
}
func Test_Validate_WhenMultipleLimitsExceeded_ValidationFailsWithFirstError(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = period.PeriodYear
config.MaxBackupSizeMB = 500
config.MaxBackupsTotalSizeMB = 5000
plan := createUnlimitedPlan()
plan.MaxStoragePeriod = period.PeriodMonth
plan.MaxBackupSizeMB = 100
plan.MaxBackupsTotalSizeMB = 1000
err := config.Validate(plan)
assert.Error(t, err)
assert.EqualError(t, err, "storage period exceeds plan limit")
}
func Test_Validate_WhenConfigHasInvalidIntervalButPlanIsValid_ValidationFailsOnInterval(
t *testing.T,
) {
config := createValidBackupConfig()
config.BackupIntervalID = uuid.Nil
config.BackupInterval = nil
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.EqualError(t, err, "backup interval is required")
}
func Test_Validate_WhenIntervalIsMissing_ValidationFailsRegardlessOfPlan(t *testing.T) {
config := createValidBackupConfig()
config.BackupIntervalID = uuid.Nil
config.BackupInterval = nil
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.EqualError(t, err, "backup interval is required")
}
func Test_Validate_WhenRetryEnabledButMaxTriesIsZero_ValidationFailsRegardlessOfPlan(t *testing.T) {
config := createValidBackupConfig()
config.IsRetryIfFailed = true
config.MaxFailedTriesCount = 0
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.EqualError(t, err, "max failed tries count must be greater than 0")
}
func Test_Validate_WhenEncryptionIsInvalid_ValidationFailsRegardlessOfPlan(t *testing.T) {
config := createValidBackupConfig()
config.Encryption = "INVALID"
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.EqualError(t, err, "encryption must be NONE or ENCRYPTED")
}
func Test_Validate_WhenStoragePeriodIsEmpty_ValidationFails(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = ""
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.EqualError(t, err, "store period is required")
}
func Test_Validate_WhenMaxBackupSizeIsNegative_ValidationFails(t *testing.T) {
config := createValidBackupConfig()
config.MaxBackupSizeMB = -100
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.EqualError(t, err, "max backup size must be non-negative")
}
func Test_Validate_WhenMaxTotalSizeIsNegative_ValidationFails(t *testing.T) {
config := createValidBackupConfig()
config.MaxBackupsTotalSizeMB = -1000
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.EqualError(t, err, "max backups total size must be non-negative")
}
func Test_Validate_WhenPlanLimitsAreAtBoundary_ValidationWorks(t *testing.T) {
tests := []struct {
name string
configPeriod period.Period
planPeriod period.Period
configSize int64
planSize int64
configTotal int64
planTotal int64
shouldSucceed bool
}{
{
name: "all values just under limit",
configPeriod: period.PeriodWeek,
planPeriod: period.PeriodMonth,
configSize: 99,
planSize: 100,
configTotal: 999,
planTotal: 1000,
shouldSucceed: true,
},
{
name: "all values equal to limit",
configPeriod: period.PeriodMonth,
planPeriod: period.PeriodMonth,
configSize: 100,
planSize: 100,
configTotal: 1000,
planTotal: 1000,
shouldSucceed: true,
},
{
name: "period just over limit",
configPeriod: period.Period3Month,
planPeriod: period.PeriodMonth,
configSize: 100,
planSize: 100,
configTotal: 1000,
planTotal: 1000,
shouldSucceed: false,
},
{
name: "size just over limit",
configPeriod: period.PeriodMonth,
planPeriod: period.PeriodMonth,
configSize: 101,
planSize: 100,
configTotal: 1000,
planTotal: 1000,
shouldSucceed: false,
},
{
name: "total size just over limit",
configPeriod: period.PeriodMonth,
planPeriod: period.PeriodMonth,
configSize: 100,
planSize: 100,
configTotal: 1001,
planTotal: 1000,
shouldSucceed: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = tt.configPeriod
config.MaxBackupSizeMB = tt.configSize
config.MaxBackupsTotalSizeMB = tt.configTotal
plan := createUnlimitedPlan()
plan.MaxStoragePeriod = tt.planPeriod
plan.MaxBackupSizeMB = tt.planSize
plan.MaxBackupsTotalSizeMB = tt.planTotal
err := config.Validate(plan)
if tt.shouldSucceed {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}
func createValidBackupConfig() *BackupConfig {
intervalID := uuid.New()
return &BackupConfig{
DatabaseID: uuid.New(),
IsBackupsEnabled: true,
StorePeriod: period.PeriodMonth,
BackupIntervalID: intervalID,
BackupInterval: &intervals.Interval{ID: intervalID},
SendNotificationsOn: []BackupNotificationType{},
IsRetryIfFailed: false,
MaxFailedTriesCount: 3,
Encryption: BackupEncryptionNone,
MaxBackupSizeMB: 100,
MaxBackupsTotalSizeMB: 1000,
}
}
func createUnlimitedPlan() *plans.DatabasePlan {
return &plans.DatabasePlan{
DatabaseID: uuid.New(),
MaxBackupSizeMB: 0,
MaxBackupsTotalSizeMB: 0,
MaxStoragePeriod: period.PeriodForever,
}
}

View File

@@ -6,10 +6,10 @@ import (
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/intervals"
"databasus-backend/internal/features/notifiers"
plans "databasus-backend/internal/features/plan"
"databasus-backend/internal/features/storages"
users_models "databasus-backend/internal/features/users/models"
workspaces_services "databasus-backend/internal/features/workspaces/services"
"databasus-backend/internal/util/period"
"github.com/google/uuid"
)
@@ -20,6 +20,7 @@ type BackupConfigService struct {
storageService *storages.StorageService
notifierService *notifiers.NotifierService
workspaceService *workspaces_services.WorkspaceService
databasePlanService *plans.DatabasePlanService
dbStorageChangeListener BackupConfigStorageChangeListener
}
@@ -45,7 +46,12 @@ func (s *BackupConfigService) SaveBackupConfigWithAuth(
user *users_models.User,
backupConfig *BackupConfig,
) (*BackupConfig, error) {
if err := backupConfig.Validate(); err != nil {
plan, err := s.databasePlanService.GetDatabasePlan(backupConfig.DatabaseID)
if err != nil {
return nil, err
}
if err := backupConfig.Validate(plan); err != nil {
return nil, err
}
@@ -71,7 +77,7 @@ func (s *BackupConfigService) SaveBackupConfigWithAuth(
if err != nil {
return nil, err
}
if storage.WorkspaceID != *database.WorkspaceID {
if storage.WorkspaceID != *database.WorkspaceID && !storage.IsSystem {
return nil, errors.New("storage does not belong to the same workspace as the database")
}
}
@@ -82,7 +88,12 @@ func (s *BackupConfigService) SaveBackupConfigWithAuth(
func (s *BackupConfigService) SaveBackupConfig(
backupConfig *BackupConfig,
) (*BackupConfig, error) {
if err := backupConfig.Validate(); err != nil {
plan, err := s.databasePlanService.GetDatabasePlan(backupConfig.DatabaseID)
if err != nil {
return nil, err
}
if err := backupConfig.Validate(plan); err != nil {
return nil, err
}
@@ -120,6 +131,18 @@ func (s *BackupConfigService) GetBackupConfigByDbIdWithAuth(
return s.GetBackupConfigByDbId(databaseID)
}
func (s *BackupConfigService) GetDatabasePlan(
user *users_models.User,
databaseID uuid.UUID,
) (*plans.DatabasePlan, error) {
_, err := s.databaseService.GetDatabase(user, databaseID)
if err != nil {
return nil, err
}
return s.databasePlanService.GetDatabasePlan(databaseID)
}
func (s *BackupConfigService) GetBackupConfigByDbId(
databaseID uuid.UUID,
) (*BackupConfig, error) {
@@ -194,12 +217,19 @@ func (s *BackupConfigService) CreateDisabledBackupConfig(databaseID uuid.UUID) e
func (s *BackupConfigService) initializeDefaultConfig(
databaseID uuid.UUID,
) error {
plan, err := s.databasePlanService.GetDatabasePlan(databaseID)
if err != nil {
return err
}
timeOfDay := "04:00"
_, err := s.backupConfigRepository.Save(&BackupConfig{
DatabaseID: databaseID,
IsBackupsEnabled: false,
StorePeriod: period.PeriodWeek,
_, err = s.backupConfigRepository.Save(&BackupConfig{
DatabaseID: databaseID,
IsBackupsEnabled: false,
StorePeriod: plan.MaxStoragePeriod,
MaxBackupSizeMB: plan.MaxBackupSizeMB,
MaxBackupsTotalSizeMB: plan.MaxBackupsTotalSizeMB,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,

View File

@@ -1,6 +1,7 @@
package databases
import (
"context"
"databasus-backend/internal/features/databases/databases/mariadb"
"databasus-backend/internal/features/databases/databases/mongodb"
"databasus-backend/internal/features/databases/databases/mysql"
@@ -84,6 +85,25 @@ func (d *Database) TestConnection(
return d.getSpecificDatabase().TestConnection(logger, encryptor, d.ID)
}
func (d *Database) IsUserReadOnly(
ctx context.Context,
logger *slog.Logger,
encryptor encryption.FieldEncryptor,
) (bool, []string, error) {
switch d.Type {
case DatabaseTypePostgres:
return d.Postgresql.IsUserReadOnly(ctx, logger, encryptor, d.ID)
case DatabaseTypeMysql:
return d.Mysql.IsUserReadOnly(ctx, logger, encryptor, d.ID)
case DatabaseTypeMariadb:
return d.Mariadb.IsUserReadOnly(ctx, logger, encryptor, d.ID)
case DatabaseTypeMongodb:
return d.Mongodb.IsUserReadOnly(ctx, logger, encryptor, d.ID)
default:
return false, nil, errors.New("read-only check not supported for this database type")
}
}
func (d *Database) HideSensitiveData() {
d.getSpecificDatabase().HideSensitiveData()
}

View File

@@ -7,6 +7,7 @@ import (
"log/slog"
"time"
"databasus-backend/internal/config"
audit_logs "databasus-backend/internal/features/audit_logs"
"databasus-backend/internal/features/databases/databases/mariadb"
"databasus-backend/internal/features/databases/databases/mongodb"
@@ -86,6 +87,23 @@ func (s *DatabaseService) CreateDatabase(
return nil, fmt.Errorf("failed to auto-detect database data: %w", err)
}
if config.GetEnv().IsCloud {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
isReadOnly, permissions, err := database.IsUserReadOnly(ctx, s.logger, s.fieldEncryptor)
if err != nil {
return nil, fmt.Errorf("failed to verify user permissions: %w", err)
}
if !isReadOnly {
return nil, fmt.Errorf(
"in cloud mode, only read-only database users are allowed (user has permissions: %v)",
permissions,
)
}
}
if err := database.EncryptSensitiveFields(s.fieldEncryptor); err != nil {
return nil, fmt.Errorf("failed to encrypt sensitive fields: %w", err)
}
@@ -153,6 +171,27 @@ func (s *DatabaseService) UpdateDatabase(
return fmt.Errorf("failed to auto-detect database data: %w", err)
}
if config.GetEnv().IsCloud {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
isReadOnly, permissions, err := existingDatabase.IsUserReadOnly(
ctx,
s.logger,
s.fieldEncryptor,
)
if err != nil {
return fmt.Errorf("failed to verify user permissions: %w", err)
}
if !isReadOnly {
return fmt.Errorf(
"in cloud mode, only read-only database users are allowed (user has permissions: %v)",
permissions,
)
}
}
if err := existingDatabase.EncryptSensitiveFields(s.fieldEncryptor); err != nil {
return fmt.Errorf("failed to encrypt sensitive fields: %w", err)
}
@@ -649,38 +688,7 @@ func (s *DatabaseService) IsUserReadOnly(
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
switch usingDatabase.Type {
case DatabaseTypePostgres:
return usingDatabase.Postgresql.IsUserReadOnly(
ctx,
s.logger,
s.fieldEncryptor,
usingDatabase.ID,
)
case DatabaseTypeMysql:
return usingDatabase.Mysql.IsUserReadOnly(
ctx,
s.logger,
s.fieldEncryptor,
usingDatabase.ID,
)
case DatabaseTypeMariadb:
return usingDatabase.Mariadb.IsUserReadOnly(
ctx,
s.logger,
s.fieldEncryptor,
usingDatabase.ID,
)
case DatabaseTypeMongodb:
return usingDatabase.Mongodb.IsUserReadOnly(
ctx,
s.logger,
s.fieldEncryptor,
usingDatabase.ID,
)
default:
return false, nil, errors.New("read-only check not supported for this database type")
}
return usingDatabase.IsUserReadOnly(ctx, s.logger, s.fieldEncryptor)
}
func (s *DatabaseService) CreateReadOnlyUser(

View File

@@ -12,6 +12,15 @@ import (
type DiskService struct{}
func (s *DiskService) GetDiskUsage() (*DiskUsage, error) {
if config.GetEnv().IsCloud {
return &DiskUsage{
Platform: PlatformLinux,
TotalSpaceBytes: 100,
UsedSpaceBytes: 0,
FreeSpaceBytes: 100,
}, nil
}
platform := s.detectPlatform()
var path string

View File

@@ -0,0 +1,20 @@
package plans
import (
"databasus-backend/internal/util/logger"
)
var databasePlanRepository = &DatabasePlanRepository{}
var databasePlanService = &DatabasePlanService{
databasePlanRepository,
logger.GetLogger(),
}
func GetDatabasePlanService() *DatabasePlanService {
return databasePlanService
}
func GetDatabasePlanRepository() *DatabasePlanRepository {
return databasePlanRepository
}

View File

@@ -0,0 +1,19 @@
package plans
import (
"databasus-backend/internal/util/period"
"github.com/google/uuid"
)
type DatabasePlan struct {
DatabaseID uuid.UUID `json:"databaseId" gorm:"column:database_id;type:uuid;primaryKey;not null"`
MaxBackupSizeMB int64 `json:"maxBackupSizeMb" gorm:"column:max_backup_size_mb;type:int;not null"`
MaxBackupsTotalSizeMB int64 `json:"maxBackupsTotalSizeMb" gorm:"column:max_backups_total_size_mb;type:int;not null"`
MaxStoragePeriod period.Period `json:"maxStoragePeriod" gorm:"column:max_storage_period;type:text;not null"`
}
func (p *DatabasePlan) TableName() string {
return "database_plans"
}

View File

@@ -0,0 +1,27 @@
package plans
import (
"databasus-backend/internal/storage"
"github.com/google/uuid"
)
type DatabasePlanRepository struct{}
func (r *DatabasePlanRepository) GetDatabasePlan(databaseID uuid.UUID) (*DatabasePlan, error) {
var databasePlan DatabasePlan
if err := storage.GetDb().Where("database_id = ?", databaseID).First(&databasePlan).Error; err != nil {
if err.Error() == "record not found" {
return nil, nil
}
return nil, err
}
return &databasePlan, nil
}
func (r *DatabasePlanRepository) CreateDatabasePlan(databasePlan *DatabasePlan) error {
return storage.GetDb().Create(&databasePlan).Error
}

View File

@@ -0,0 +1,67 @@
package plans
import (
"databasus-backend/internal/config"
"databasus-backend/internal/util/period"
"log/slog"
"github.com/google/uuid"
)
type DatabasePlanService struct {
databasePlanRepository *DatabasePlanRepository
logger *slog.Logger
}
func (s *DatabasePlanService) GetDatabasePlan(databaseID uuid.UUID) (*DatabasePlan, error) {
plan, err := s.databasePlanRepository.GetDatabasePlan(databaseID)
if err != nil {
return nil, err
}
if plan == nil {
s.logger.Info("no database plan found, creating default plan", "databaseID", databaseID)
defaultPlan := s.createDefaultDatabasePlan(databaseID)
err := s.databasePlanRepository.CreateDatabasePlan(defaultPlan)
if err != nil {
s.logger.Error("failed to create default database plan", "error", err)
return nil, err
}
return defaultPlan, nil
}
return plan, nil
}
func (s *DatabasePlanService) createDefaultDatabasePlan(databaseID uuid.UUID) *DatabasePlan {
var plan DatabasePlan
isCloud := config.GetEnv().IsCloud
if isCloud {
s.logger.Info("creating default database plan for cloud", "databaseID", databaseID)
// for playground we set limited storages enough to test,
// but not too expensive to provide it for Databasus
plan = DatabasePlan{
DatabaseID: databaseID,
MaxBackupSizeMB: 100, // ~ 1.5GB database
MaxBackupsTotalSizeMB: 4000, // ~ 30 daily backups + 10 manual backups
MaxStoragePeriod: period.PeriodWeek,
}
} else {
s.logger.Info("creating default database plan for self hosted", "databaseID", databaseID)
// by default - everything is unlimited in self hosted mode
plan = DatabasePlan{
DatabaseID: databaseID,
MaxBackupSizeMB: 0,
MaxBackupsTotalSizeMB: 0,
MaxStoragePeriod: period.PeriodForever,
}
}
return &plan
}

View File

@@ -6,6 +6,7 @@ import (
audit_logs "databasus-backend/internal/features/audit_logs"
"databasus-backend/internal/features/backups/backups"
"databasus-backend/internal/features/backups/backups/backuping"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/disk"
@@ -51,6 +52,7 @@ func SetupDependencies() {
setupOnce.Do(func() {
backups.GetBackupService().AddBackupRemoveListener(restoreService)
backuping.GetBackupCleaner().AddBackupRemoveListener(restoreService)
isSetup.Store(true)
})

View File

@@ -1,6 +1,7 @@
package restores
import (
"databasus-backend/internal/config"
audit_logs "databasus-backend/internal/features/audit_logs"
"databasus-backend/internal/features/backups/backups"
backups_core "databasus-backend/internal/features/backups/backups/core"
@@ -127,6 +128,13 @@ func (s *RestoreService) RestoreBackupWithAuth(
return err
}
if config.GetEnv().IsCloud {
// in cloud mode we use only single thread mode,
// because otherwise we will exhaust local storage
// space (instead of streaming from S3 directly to DB)
requestDTO.PostgresqlDatabase.CpuCount = 1
}
if err := s.validateVersionCompatibility(backupDatabase, requestDTO); err != nil {
return err
}

View File

@@ -65,6 +65,13 @@ func (uc *RestorePostgresqlBackupUsecase) Execute(
return fmt.Errorf("target database name is required for pg_restore")
}
// Validate CPU count constraint for cloud environments
if config.GetEnv().IsCloud && pg.CpuCount > 1 {
return fmt.Errorf(
"parallel restore (CPU count > 1) is not supported in cloud mode due to storage constraints. Please use CPU count = 1",
)
}
pgBin := tools.GetPostgresqlExecutable(
pg.Version,
"pg_restore",

View File

@@ -84,7 +84,7 @@ func Test_SaveNewStorage_StorageReturnedViaGet(t *testing.T) {
assert.Contains(t, storages, savedStorage)
deleteStorage(t, router, savedStorage.ID, workspace.ID, owner.Token)
deleteStorage(t, router, savedStorage.ID, owner.Token)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}
@@ -122,7 +122,169 @@ func Test_UpdateExistingStorage_UpdatedStorageReturnedViaGet(t *testing.T) {
assert.Equal(t, updatedName, updatedStorage.Name)
assert.Equal(t, savedStorage.ID, updatedStorage.ID)
deleteStorage(t, router, updatedStorage.ID, workspace.ID, owner.Token)
deleteStorage(t, router, updatedStorage.ID, owner.Token)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}
func Test_CreateSystemStorage_OnlyAdminCanCreate_MemberGetsForbidden(t *testing.T) {
admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
member := users_testing.CreateTestUser(users_enums.UserRoleMember)
router := createRouter()
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", admin, router)
// Admin can create system storage
systemStorage := createNewStorage(workspace.ID)
systemStorage.IsSystem = true
var savedStorage Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+admin.Token,
*systemStorage,
http.StatusOK,
&savedStorage,
)
assert.True(t, savedStorage.IsSystem)
assert.Equal(t, systemStorage.Name, savedStorage.Name)
// Member cannot create system storage
memberSystemStorage := createNewStorage(workspace.ID)
memberSystemStorage.IsSystem = true
resp := test_utils.MakePostRequest(
t,
router,
"/api/v1/storages",
"Bearer "+member.Token,
*memberSystemStorage,
http.StatusForbidden,
)
assert.Contains(t, string(resp.Body), "insufficient permissions")
deleteStorage(t, router, savedStorage.ID, admin.Token)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}
func Test_UpdateStorageIsSystem_OnlyAdminCanUpdate_MemberGetsForbidden(t *testing.T) {
admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
member := users_testing.CreateTestUser(users_enums.UserRoleMember)
router := createRouter()
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", admin, router)
// Create a regular storage
storage := createNewStorage(workspace.ID)
storage.IsSystem = false
var savedStorage Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+admin.Token,
*storage,
http.StatusOK,
&savedStorage,
)
assert.False(t, savedStorage.IsSystem)
// Admin can update to system
savedStorage.IsSystem = true
var updatedStorage Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+admin.Token,
savedStorage,
http.StatusOK,
&updatedStorage,
)
assert.True(t, updatedStorage.IsSystem)
// Member cannot update system storage
updatedStorage.Name = "Updated by member"
resp := test_utils.MakePostRequest(
t,
router,
"/api/v1/storages",
"Bearer "+member.Token,
updatedStorage,
http.StatusForbidden,
)
assert.Contains(t, string(resp.Body), "insufficient permissions")
deleteStorage(t, router, updatedStorage.ID, admin.Token)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}
func Test_UpdateSystemStorage_CannotChangeToPrivate_ReturnsBadRequest(t *testing.T) {
admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
router := createRouter()
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", admin, router)
// Create system storage
storage := createNewStorage(workspace.ID)
storage.IsSystem = true
var savedStorage Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+admin.Token,
*storage,
http.StatusOK,
&savedStorage,
)
assert.True(t, savedStorage.IsSystem)
// Attempt to change system storage to non-system (should fail)
savedStorage.IsSystem = false
resp := test_utils.MakePostRequest(
t,
router,
"/api/v1/storages",
"Bearer "+admin.Token,
savedStorage,
http.StatusBadRequest,
)
assert.Contains(t, string(resp.Body), "system storage cannot be changed to non-system")
// Verify storage is still system
var retrievedStorage Storage
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf("/api/v1/storages/%s", savedStorage.ID.String()),
"Bearer "+admin.Token,
http.StatusOK,
&retrievedStorage,
)
assert.True(t, retrievedStorage.IsSystem)
// Admin can update other fields while keeping IsSystem=true
savedStorage.IsSystem = true
savedStorage.Name = "Updated System Storage"
var updatedStorage Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+admin.Token,
savedStorage,
http.StatusOK,
&updatedStorage,
)
assert.True(t, updatedStorage.IsSystem)
assert.Equal(t, "Updated System Storage", updatedStorage.Name)
deleteStorage(t, router, updatedStorage.ID, admin.Token)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}
@@ -205,7 +367,7 @@ func Test_TestExistingStorageConnection_ConnectionEstablished(t *testing.T) {
assert.Contains(t, string(response.Body), "successful")
deleteStorage(t, router, savedStorage.ID, workspace.ID, owner.Token)
deleteStorage(t, router, savedStorage.ID, owner.Token)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}
@@ -301,7 +463,14 @@ func Test_WorkspaceRolePermissions(t *testing.T) {
fmt.Sprintf("/api/v1/storages?workspace_id=%s", workspace.ID.String()),
"Bearer "+testUserToken, http.StatusOK, &storages,
)
assert.Len(t, storages, 1)
// Count only non-system storages for this workspace
nonSystemStorages := 0
for _, s := range storages {
if !s.IsSystem {
nonSystemStorages++
}
}
assert.Equal(t, 1, nonSystemStorages)
// Test CREATE storage
createStatusCode := http.StatusOK
@@ -356,16 +525,514 @@ func Test_WorkspaceRolePermissions(t *testing.T) {
// Cleanup
if tt.canCreate {
deleteStorage(t, router, savedStorage.ID, workspace.ID, owner.Token)
deleteStorage(t, router, savedStorage.ID, owner.Token)
}
if !tt.canDelete {
deleteStorage(t, router, ownerStorage.ID, workspace.ID, owner.Token)
deleteStorage(t, router, ownerStorage.ID, owner.Token)
}
workspaces_testing.RemoveTestWorkspace(workspace, router)
})
}
}
func Test_SystemStorage_AdminOnlyOperations(t *testing.T) {
tests := []struct {
name string
operation string
isAdmin bool
expectSuccess bool
expectedStatus int
}{
{
name: "admin can create system storage",
operation: "create",
isAdmin: true,
expectSuccess: true,
expectedStatus: http.StatusOK,
},
{
name: "member cannot create system storage",
operation: "create",
isAdmin: false,
expectSuccess: false,
expectedStatus: http.StatusForbidden,
},
{
name: "admin can update storage to make it system",
operation: "update_to_system",
isAdmin: true,
expectSuccess: true,
expectedStatus: http.StatusOK,
},
{
name: "member cannot update storage to make it system",
operation: "update_to_system",
isAdmin: false,
expectSuccess: false,
expectedStatus: http.StatusForbidden,
},
{
name: "admin can update system storage",
operation: "update_system",
isAdmin: true,
expectSuccess: true,
expectedStatus: http.StatusOK,
},
{
name: "member cannot update system storage",
operation: "update_system",
isAdmin: false,
expectSuccess: false,
expectedStatus: http.StatusForbidden,
},
{
name: "admin can delete system storage",
operation: "delete",
isAdmin: true,
expectSuccess: true,
expectedStatus: http.StatusOK,
},
{
name: "member cannot delete system storage",
operation: "delete",
isAdmin: false,
expectSuccess: false,
expectedStatus: http.StatusForbidden,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
router := createRouter()
GetStorageService().SetStorageDatabaseCounter(&mockStorageDatabaseCounter{})
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
var testUserToken string
if tt.isAdmin {
admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
testUserToken = admin.Token
} else {
member := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspaces_testing.AddMemberToWorkspace(
workspace,
member,
users_enums.WorkspaceRoleMember,
owner.Token,
router,
)
testUserToken = member.Token
}
switch tt.operation {
case "create":
systemStorage := &Storage{
WorkspaceID: workspace.ID,
Type: StorageTypeLocal,
Name: "Test System Storage " + uuid.New().String(),
IsSystem: true,
LocalStorage: &local_storage.LocalStorage{},
}
if tt.expectSuccess {
var savedStorage Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+testUserToken,
*systemStorage,
tt.expectedStatus,
&savedStorage,
)
assert.NotEmpty(t, savedStorage.ID)
assert.True(t, savedStorage.IsSystem)
deleteStorage(t, router, savedStorage.ID, testUserToken)
} else {
resp := test_utils.MakePostRequest(
t,
router,
"/api/v1/storages",
"Bearer "+testUserToken,
*systemStorage,
tt.expectedStatus,
)
assert.Contains(t, string(resp.Body), "insufficient permissions")
}
case "update_to_system":
// Owner creates private storage first
privateStorage := createNewStorage(workspace.ID)
var savedStorage Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+owner.Token,
*privateStorage,
http.StatusOK,
&savedStorage,
)
// Test user attempts to make it system
savedStorage.IsSystem = true
if tt.expectSuccess {
var updatedStorage Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+testUserToken,
savedStorage,
tt.expectedStatus,
&updatedStorage,
)
assert.True(t, updatedStorage.IsSystem)
deleteStorage(t, router, savedStorage.ID, testUserToken)
} else {
resp := test_utils.MakePostRequest(
t,
router,
"/api/v1/storages",
"Bearer "+testUserToken,
savedStorage,
tt.expectedStatus,
)
assert.Contains(t, string(resp.Body), "insufficient permissions")
deleteStorage(t, router, savedStorage.ID, owner.Token)
}
case "update_system":
// Admin creates system storage first
admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
systemStorage := &Storage{
WorkspaceID: workspace.ID,
Type: StorageTypeLocal,
Name: "Test System Storage " + uuid.New().String(),
IsSystem: true,
LocalStorage: &local_storage.LocalStorage{},
}
var savedStorage Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+admin.Token,
*systemStorage,
http.StatusOK,
&savedStorage,
)
// Test user attempts to update system storage
savedStorage.Name = "Updated System Storage " + uuid.New().String()
if tt.expectSuccess {
var updatedStorage Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+testUserToken,
savedStorage,
tt.expectedStatus,
&updatedStorage,
)
assert.Equal(t, savedStorage.Name, updatedStorage.Name)
assert.True(t, updatedStorage.IsSystem)
deleteStorage(t, router, savedStorage.ID, testUserToken)
} else {
resp := test_utils.MakePostRequest(
t,
router,
"/api/v1/storages",
"Bearer "+testUserToken,
savedStorage,
tt.expectedStatus,
)
assert.Contains(t, string(resp.Body), "insufficient permissions")
deleteStorage(t, router, savedStorage.ID, admin.Token)
}
case "delete":
// Admin creates system storage first
admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
systemStorage := &Storage{
WorkspaceID: workspace.ID,
Type: StorageTypeLocal,
Name: "Test System Storage " + uuid.New().String(),
IsSystem: true,
LocalStorage: &local_storage.LocalStorage{},
}
var savedStorage Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+admin.Token,
*systemStorage,
http.StatusOK,
&savedStorage,
)
// Test user attempts to delete system storage
if tt.expectSuccess {
test_utils.MakeDeleteRequest(
t,
router,
fmt.Sprintf("/api/v1/storages/%s", savedStorage.ID.String()),
"Bearer "+testUserToken,
tt.expectedStatus,
)
} else {
resp := test_utils.MakeDeleteRequest(
t,
router,
fmt.Sprintf("/api/v1/storages/%s", savedStorage.ID.String()),
"Bearer "+testUserToken,
tt.expectedStatus,
)
assert.Contains(t, string(resp.Body), "insufficient permissions")
deleteStorage(t, router, savedStorage.ID, admin.Token)
}
}
workspaces_testing.RemoveTestWorkspace(workspace, router)
})
}
}
func Test_GetStorages_SystemStorageIncludedForAllUsers(t *testing.T) {
router := createRouter()
GetStorageService().SetStorageDatabaseCounter(&mockStorageDatabaseCounter{})
// Create two workspaces with different owners
ownerA := users_testing.CreateTestUser(users_enums.UserRoleMember)
ownerB := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspaceA := workspaces_testing.CreateTestWorkspace("Workspace A", ownerA, router)
workspaceB := workspaces_testing.CreateTestWorkspace("Workspace B", ownerB, router)
// Create private storage in workspace A
privateStorageA := createNewStorage(workspaceA.ID)
var savedPrivateStorageA Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+ownerA.Token,
*privateStorageA,
http.StatusOK,
&savedPrivateStorageA,
)
// Admin creates system storage in workspace B
admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
systemStorageB := &Storage{
WorkspaceID: workspaceB.ID,
Type: StorageTypeLocal,
Name: "Test System Storage B " + uuid.New().String(),
IsSystem: true,
LocalStorage: &local_storage.LocalStorage{},
}
var savedSystemStorageB Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+admin.Token,
*systemStorageB,
http.StatusOK,
&savedSystemStorageB,
)
// Test: User from workspace A should see both private storage A and system storage B
var storagesForWorkspaceA []Storage
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf("/api/v1/storages?workspace_id=%s", workspaceA.ID.String()),
"Bearer "+ownerA.Token,
http.StatusOK,
&storagesForWorkspaceA,
)
assert.GreaterOrEqual(t, len(storagesForWorkspaceA), 2)
foundPrivateA := false
foundSystemB := false
for _, s := range storagesForWorkspaceA {
if s.ID == savedPrivateStorageA.ID {
foundPrivateA = true
}
if s.ID == savedSystemStorageB.ID {
foundSystemB = true
}
}
assert.True(t, foundPrivateA, "User from workspace A should see private storage A")
assert.True(t, foundSystemB, "User from workspace A should see system storage B")
// Test: User from workspace B should see system storage B
var storagesForWorkspaceB []Storage
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf("/api/v1/storages?workspace_id=%s", workspaceB.ID.String()),
"Bearer "+ownerB.Token,
http.StatusOK,
&storagesForWorkspaceB,
)
assert.GreaterOrEqual(t, len(storagesForWorkspaceB), 1)
foundSystemBInWorkspaceB := false
for _, s := range storagesForWorkspaceB {
if s.ID == savedSystemStorageB.ID {
foundSystemBInWorkspaceB = true
}
// Should NOT see private storage from workspace A
assert.NotEqual(
t,
savedPrivateStorageA.ID,
s.ID,
"User from workspace B should not see private storage from workspace A",
)
}
assert.True(t, foundSystemBInWorkspaceB, "User from workspace B should see system storage B")
// Test: Outsider (not in any workspace) cannot access storages
outsider := users_testing.CreateTestUser(users_enums.UserRoleMember)
test_utils.MakeGetRequest(
t,
router,
fmt.Sprintf("/api/v1/storages?workspace_id=%s", workspaceA.ID.String()),
"Bearer "+outsider.Token,
http.StatusForbidden,
)
// Cleanup
deleteStorage(t, router, savedPrivateStorageA.ID, ownerA.Token)
deleteStorage(t, router, savedSystemStorageB.ID, admin.Token)
workspaces_testing.RemoveTestWorkspace(workspaceA, router)
workspaces_testing.RemoveTestWorkspace(workspaceB, router)
}
func Test_GetSystemStorage_SensitiveDataHiddenForNonAdmin(t *testing.T) {
router := createRouter()
GetStorageService().SetStorageDatabaseCounter(&mockStorageDatabaseCounter{})
admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
member := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", member, router)
// Admin creates system S3 storage with credentials
systemS3Storage := &Storage{
WorkspaceID: workspace.ID,
Type: StorageTypeS3,
Name: "Test System S3 Storage " + uuid.New().String(),
IsSystem: true,
S3Storage: &s3_storage.S3Storage{
S3Bucket: "test-system-bucket",
S3Region: "us-east-1",
S3AccessKey: "test-access-key-123",
S3SecretKey: "test-secret-key-456",
S3Endpoint: "https://s3.amazonaws.com",
},
}
var savedStorage Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+admin.Token,
*systemS3Storage,
http.StatusOK,
&savedStorage,
)
assert.NotEmpty(t, savedStorage.ID)
assert.True(t, savedStorage.IsSystem)
// Test: Admin retrieves system storage - should see S3Storage object with hidden sensitive fields
var adminView Storage
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf("/api/v1/storages/%s", savedStorage.ID.String()),
"Bearer "+admin.Token,
http.StatusOK,
&adminView,
)
assert.NotNil(t, adminView.S3Storage, "Admin should see S3Storage object")
assert.Equal(t, "test-system-bucket", adminView.S3Storage.S3Bucket)
assert.Equal(t, "us-east-1", adminView.S3Storage.S3Region)
// Sensitive fields should be hidden (empty strings)
assert.Equal(
t,
"",
adminView.S3Storage.S3AccessKey,
"Admin should see hidden (empty) access key",
)
assert.Equal(
t,
"",
adminView.S3Storage.S3SecretKey,
"Admin should see hidden (empty) secret key",
)
// Test: Member retrieves system storage - should see storage but all specific data hidden
var memberView Storage
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf("/api/v1/storages/%s", savedStorage.ID.String()),
"Bearer "+member.Token,
http.StatusOK,
&memberView,
)
assert.Equal(t, savedStorage.ID, memberView.ID)
assert.Equal(t, savedStorage.Name, memberView.Name)
assert.True(t, memberView.IsSystem)
// All storage type objects should be nil for non-admin viewing system storage
assert.Nil(t, memberView.S3Storage, "Non-admin should not see S3Storage object")
assert.Nil(t, memberView.LocalStorage, "Non-admin should not see LocalStorage object")
assert.Nil(
t,
memberView.GoogleDriveStorage,
"Non-admin should not see GoogleDriveStorage object",
)
assert.Nil(t, memberView.NASStorage, "Non-admin should not see NASStorage object")
assert.Nil(t, memberView.AzureBlobStorage, "Non-admin should not see AzureBlobStorage object")
assert.Nil(t, memberView.FTPStorage, "Non-admin should not see FTPStorage object")
assert.Nil(t, memberView.SFTPStorage, "Non-admin should not see SFTPStorage object")
assert.Nil(t, memberView.RcloneStorage, "Non-admin should not see RcloneStorage object")
// Test: Member can also see system storage in GetStorages list
var storages []Storage
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf("/api/v1/storages?workspace_id=%s", workspace.ID.String()),
"Bearer "+member.Token,
http.StatusOK,
&storages,
)
foundSystemStorage := false
for _, s := range storages {
if s.ID == savedStorage.ID {
foundSystemStorage = true
assert.True(t, s.IsSystem)
assert.Nil(t, s.S3Storage, "Non-admin should not see S3Storage in list")
}
}
assert.True(t, foundSystemStorage, "System storage should be in list")
// Cleanup
deleteStorage(t, router, savedStorage.ID, admin.Token)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}
func Test_UserNotInWorkspace_CannotAccessStorages(t *testing.T) {
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
outsider := users_testing.CreateTestUser(users_enums.UserRoleMember)
@@ -417,7 +1084,7 @@ func Test_UserNotInWorkspace_CannotAccessStorages(t *testing.T) {
http.StatusForbidden,
)
deleteStorage(t, router, savedStorage.ID, workspace.ID, owner.Token)
deleteStorage(t, router, savedStorage.ID, owner.Token)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}
@@ -450,7 +1117,7 @@ func Test_CrossWorkspaceSecurity_CannotAccessStorageFromAnotherWorkspace(t *test
)
assert.Contains(t, string(response.Body), "insufficient permissions")
deleteStorage(t, router, savedStorage.ID, workspace1.ID, owner1.Token)
deleteStorage(t, router, savedStorage.ID, owner1.Token)
workspaces_testing.RemoveTestWorkspace(workspace1, router)
workspaces_testing.RemoveTestWorkspace(workspace2, router)
}
@@ -1122,10 +1789,10 @@ func Test_TransferStorage_PermissionsEnforced(t *testing.T) {
)
assert.Equal(t, targetWorkspace.ID, retrievedStorage.WorkspaceID)
deleteStorage(t, router, savedStorage.ID, targetWorkspace.ID, targetOwner.Token)
deleteStorage(t, router, savedStorage.ID, targetOwner.Token)
} else {
assert.Contains(t, string(testResp.Body), "insufficient permissions")
deleteStorage(t, router, savedStorage.ID, sourceWorkspace.ID, sourceOwner.Token)
deleteStorage(t, router, savedStorage.ID, sourceOwner.Token)
}
workspaces_testing.RemoveTestWorkspace(sourceWorkspace, router)
@@ -1175,11 +1842,129 @@ func Test_TransferStorageNotManagableWorkspace_TransferFailed(t *testing.T) {
"insufficient permissions to manage storage in target workspace",
)
deleteStorage(t, router, savedStorage.ID, workspace1.ID, userA.Token)
deleteStorage(t, router, savedStorage.ID, userA.Token)
workspaces_testing.RemoveTestWorkspace(workspace1, router)
workspaces_testing.RemoveTestWorkspace(workspace2, router)
}
func Test_TransferSystemStorage_TransferBlocked(t *testing.T) {
router := createRouter()
GetStorageService().SetStorageDatabaseCounter(&mockStorageDatabaseCounter{})
admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin)
workspaceA := workspaces_testing.CreateTestWorkspace("Workspace A", admin, router)
workspaceB := workspaces_testing.CreateTestWorkspace("Workspace B", admin, router)
// Admin creates system storage in workspace A
systemStorage := &Storage{
WorkspaceID: workspaceA.ID,
Type: StorageTypeLocal,
Name: "Test System Storage " + uuid.New().String(),
IsSystem: true,
LocalStorage: &local_storage.LocalStorage{},
}
var savedSystemStorage Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+admin.Token,
*systemStorage,
http.StatusOK,
&savedSystemStorage,
)
// Admin attempts to transfer system storage to workspace B - should be blocked
transferRequest := TransferStorageRequest{
TargetWorkspaceID: workspaceB.ID,
}
testResp := test_utils.MakePostRequest(
t,
router,
fmt.Sprintf("/api/v1/storages/%s/transfer", savedSystemStorage.ID.String()),
"Bearer "+admin.Token,
transferRequest,
http.StatusBadRequest,
)
assert.Contains(
t,
string(testResp.Body),
"system storage cannot be transferred",
"Transfer should fail with appropriate error message",
)
// Verify storage is still in workspace A
var retrievedStorage Storage
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf("/api/v1/storages/%s", savedSystemStorage.ID.String()),
"Bearer "+admin.Token,
http.StatusOK,
&retrievedStorage,
)
assert.Equal(
t,
workspaceA.ID,
retrievedStorage.WorkspaceID,
"Storage should remain in workspace A",
)
// Test regression: Non-system storage can still be transferred
privateStorage := createNewStorage(workspaceA.ID)
var savedPrivateStorage Storage
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/storages",
"Bearer "+admin.Token,
*privateStorage,
http.StatusOK,
&savedPrivateStorage,
)
privateTransferResp := test_utils.MakePostRequest(
t,
router,
fmt.Sprintf("/api/v1/storages/%s/transfer", savedPrivateStorage.ID.String()),
"Bearer "+admin.Token,
transferRequest,
http.StatusOK,
)
assert.Contains(
t,
string(privateTransferResp.Body),
"transferred successfully",
"Private storage should be transferable",
)
// Verify private storage was transferred to workspace B
var transferredStorage Storage
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf("/api/v1/storages/%s", savedPrivateStorage.ID.String()),
"Bearer "+admin.Token,
http.StatusOK,
&transferredStorage,
)
assert.Equal(
t,
workspaceB.ID,
transferredStorage.WorkspaceID,
"Private storage should be in workspace B",
)
// Cleanup
deleteStorage(t, router, savedSystemStorage.ID, admin.Token)
deleteStorage(t, router, savedPrivateStorage.ID, admin.Token)
workspaces_testing.RemoveTestWorkspace(workspaceA, router)
workspaces_testing.RemoveTestWorkspace(workspaceB, router)
}
func createRouter() *gin.Engine {
gin.SetMode(gin.TestMode)
router := gin.New()
@@ -1212,12 +1997,13 @@ func verifyStorageData(t *testing.T, expected *Storage, actual *Storage) {
assert.Equal(t, expected.Name, actual.Name)
assert.Equal(t, expected.Type, actual.Type)
assert.Equal(t, expected.WorkspaceID, actual.WorkspaceID)
assert.Equal(t, expected.IsSystem, actual.IsSystem)
}
func deleteStorage(
t *testing.T,
router *gin.Engine,
storageID, workspaceID uuid.UUID,
storageID uuid.UUID,
token string,
) {
test_utils.MakeDeleteRequest(

View File

@@ -33,4 +33,10 @@ var (
ErrStorageHasOtherAttachedDatabasesCannotTransfer = errors.New(
"storage has other attached databases and cannot be transferred",
)
ErrSystemStorageCannotBeTransferred = errors.New(
"system storage cannot be transferred between workspaces",
)
ErrSystemStorageCannotBeMadePrivate = errors.New(
"system storage cannot be changed to non-system",
)
)

View File

@@ -24,6 +24,7 @@ type Storage struct {
Type StorageType `json:"type" gorm:"column:type;not null;type:text"`
Name string `json:"name" gorm:"column:name;not null;type:text"`
LastSaveError *string `json:"lastSaveError" gorm:"column:last_save_error;type:text"`
IsSystem bool `json:"isSystem" gorm:"column:is_system;not null;default:false"`
// specific storage
LocalStorage *local_storage.LocalStorage `json:"localStorage" gorm:"foreignKey:StorageID"`
@@ -86,6 +87,17 @@ func (s *Storage) HideSensitiveData() {
s.getSpecificStorage().HideSensitiveData()
}
func (s *Storage) HideAllData() {
s.LocalStorage = nil
s.S3Storage = nil
s.GoogleDriveStorage = nil
s.NASStorage = nil
s.AzureBlobStorage = nil
s.FTPStorage = nil
s.SFTPStorage = nil
s.RcloneStorage = nil
}
func (s *Storage) EncryptSensitiveData(encryptor encryption.FieldEncryptor) error {
return s.getSpecificStorage().EncryptSensitiveData(encryptor)
}
@@ -93,6 +105,7 @@ func (s *Storage) EncryptSensitiveData(encryptor encryption.FieldEncryptor) erro
func (s *Storage) Update(incoming *Storage) {
s.Name = incoming.Name
s.Type = incoming.Type
s.IsSystem = incoming.IsSystem
switch s.Type {
case StorageTypeLocal:

View File

@@ -165,7 +165,7 @@ func (r *StorageRepository) FindByWorkspaceID(workspaceID uuid.UUID) ([]*Storage
Preload("FTPStorage").
Preload("SFTPStorage").
Preload("RcloneStorage").
Where("workspace_id = ?", workspaceID).
Where("workspace_id = ? OR is_system = TRUE", workspaceID).
Order("name ASC").
Find(&storages).Error; err != nil {
return nil, err

View File

@@ -4,6 +4,7 @@ import (
"fmt"
audit_logs "databasus-backend/internal/features/audit_logs"
users_enums "databasus-backend/internal/features/users/enums"
users_models "databasus-backend/internal/features/users/models"
workspaces_services "databasus-backend/internal/features/workspaces/services"
"databasus-backend/internal/util/encryption"
@@ -38,6 +39,11 @@ func (s *StorageService) SaveStorage(
isUpdate := storage.ID != uuid.Nil
if storage.IsSystem && user.Role != users_enums.UserRoleAdmin {
// only admin can manage system storage
return ErrInsufficientPermissionsToManageStorage
}
if isUpdate {
existingStorage, err := s.storageRepository.FindByID(storage.ID)
if err != nil {
@@ -48,6 +54,10 @@ func (s *StorageService) SaveStorage(
return ErrStorageDoesNotBelongToWorkspace
}
if existingStorage.IsSystem && !storage.IsSystem {
return ErrSystemStorageCannotBeMadePrivate
}
existingStorage.Update(storage)
if err := existingStorage.EncryptSensitiveData(s.fieldEncryptor); err != nil {
@@ -111,6 +121,11 @@ func (s *StorageService) DeleteStorage(
return ErrInsufficientPermissionsToManageStorage
}
if storage.IsSystem && user.Role != users_enums.UserRoleAdmin {
// only admin can manage system storage
return ErrInsufficientPermissionsToManageStorage
}
attachedDatabasesIDs, err := s.storageDatabaseCounter.GetStorageAttachedDatabasesIDs(storage.ID)
if err != nil {
return err
@@ -142,16 +157,22 @@ func (s *StorageService) GetStorage(
return nil, err
}
canView, _, err := s.workspaceService.CanUserAccessWorkspace(storage.WorkspaceID, user)
if err != nil {
return nil, err
}
if !canView {
return nil, ErrInsufficientPermissionsToViewStorage
if !storage.IsSystem {
canView, _, err := s.workspaceService.CanUserAccessWorkspace(storage.WorkspaceID, user)
if err != nil {
return nil, err
}
if !canView {
return nil, ErrInsufficientPermissionsToViewStorage
}
}
storage.HideSensitiveData()
if storage.IsSystem && user.Role != users_enums.UserRoleAdmin {
storage.HideAllData()
}
return storage, nil
}
@@ -174,6 +195,10 @@ func (s *StorageService) GetStorages(
for _, storage := range storages {
storage.HideSensitiveData()
if storage.IsSystem && user.Role != users_enums.UserRoleAdmin {
storage.HideAllData()
}
}
return storages, nil
@@ -258,6 +283,10 @@ func (s *StorageService) TransferStorageToWorkspace(
return err
}
if existingStorage.IsSystem {
return ErrSystemStorageCannotBeTransferred
}
canManageSource, err := s.workspaceService.CanUserManageDBs(existingStorage.WorkspaceID, user)
if err != nil {
return err

View File

@@ -23,6 +23,18 @@ func (c *HealthcheckController) RegisterRoutes(router *gin.RouterGroup) {
// @Failure 503 {object} HealthcheckResponse
// @Router /system/health [get]
func (c *HealthcheckController) CheckHealth(ctx *gin.Context) {
// Allow unrestricted CORS for health check endpoint
// This enables monitoring tools from any origin to check system health
ctx.Header("Access-Control-Allow-Origin", "*")
ctx.Header("Access-Control-Allow-Methods", "GET, OPTIONS")
ctx.Header("Access-Control-Allow-Headers", "Content-Type")
// Handle preflight OPTIONS request
if ctx.Request.Method == "OPTIONS" {
ctx.AbortWithStatus(http.StatusNoContent)
return
}
err := c.healthcheckService.IsHealthy()
if err == nil {

View File

@@ -47,3 +47,36 @@ func (p Period) ToDuration() time.Duration {
panic("unknown period: " + string(p))
}
}
// CompareTo compares this period with another and returns:
// -1 if p < other
//
// 0 if p == other
// 1 if p > other
//
// FOREVER is treated as the longest period
func (p Period) CompareTo(other Period) int {
if p == other {
return 0
}
d1 := p.ToDuration()
d2 := other.ToDuration()
// FOREVER has duration 0, but should be treated as longest period
if p == PeriodForever {
return 1
}
if other == PeriodForever {
return -1
}
if d1 < d2 {
return -1
}
if d1 > d2 {
return 1
}
return 0
}

View File

@@ -0,0 +1,23 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE backup_configs
ADD COLUMN max_backup_size_mb BIGINT NOT NULL DEFAULT 0,
ADD COLUMN max_backups_total_size_mb BIGINT NOT NULL DEFAULT 0;
ALTER TABLE backups
ADD COLUMN is_skip_retry BOOLEAN NOT NULL DEFAULT FALSE;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE backup_configs
DROP COLUMN IF EXISTS max_backups_total_size_mb,
DROP COLUMN IF EXISTS max_backup_size_mb;
ALTER TABLE backups
DROP COLUMN IF EXISTS is_skip_retry;
-- +goose StatementEnd

View File

@@ -0,0 +1,30 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE database_plans (
database_id UUID PRIMARY KEY,
max_backup_size_mb BIGINT NOT NULL,
max_backups_total_size_mb BIGINT NOT NULL,
max_storage_period TEXT NOT NULL
);
ALTER TABLE database_plans
ADD CONSTRAINT fk_database_plans_database_id
FOREIGN KEY (database_id)
REFERENCES databases (id)
ON DELETE CASCADE;
CREATE INDEX idx_database_plans_database_id ON database_plans (database_id);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP INDEX IF EXISTS idx_database_plans_database_id;
ALTER TABLE database_plans DROP CONSTRAINT IF EXISTS fk_database_plans_database_id;
DROP TABLE IF EXISTS database_plans;
-- +goose StatementEnd

View File

@@ -0,0 +1,11 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE storages
ADD COLUMN is_system BOOLEAN NOT NULL DEFAULT FALSE;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE storages
DROP COLUMN is_system;
-- +goose StatementEnd

View File

@@ -20,6 +20,7 @@
<body>
<div id="root"></div>
<script src="/runtime-config.js"></script>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>

View File

@@ -14,7 +14,6 @@
"dayjs": "^1.11.13",
"react": "^19.1.0",
"react-dom": "^19.1.0",
"react-github-btn": "^1.4.0",
"react-router": "^7.6.0",
"recharts": "^3.2.0",
"tailwindcss": "^4.1.7"
@@ -4272,12 +4271,6 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/github-buttons": {
"version": "2.29.1",
"resolved": "https://registry.npmjs.org/github-buttons/-/github-buttons-2.29.1.tgz",
"integrity": "sha512-TV3YgAKda5hPz75n7QXmGCsSzgVya1vvmBieebg3EB5ScmashTZ0FldViG1aU2d4V5rcAGrtQ7k5uAaCo0A4PA==",
"license": "BSD-2-Clause"
},
"node_modules/glob": {
"version": "10.5.0",
"resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz",
@@ -6759,18 +6752,6 @@
"react": "^19.1.0"
}
},
"node_modules/react-github-btn": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/react-github-btn/-/react-github-btn-1.4.0.tgz",
"integrity": "sha512-lV4FYClAfjWnBfv0iNlJUGhamDgIq6TayD0kPZED6VzHWdpcHmPfsYOZ/CFwLfPv4Zp+F4m8QKTj0oy2HjiGXg==",
"license": "BSD-2-Clause",
"dependencies": {
"github-buttons": "^2.22.0"
},
"peerDependencies": {
"react": ">=16.3.0"
}
},
"node_modules/react-is": {
"version": "16.13.1",
"resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",

View File

@@ -19,7 +19,6 @@
"dayjs": "^1.11.13",
"react": "^19.1.0",
"react-dom": "^19.1.0",
"react-github-btn": "^1.4.0",
"react-router": "^7.6.0",
"recharts": "^3.2.0",
"tailwindcss": "^4.1.7"

View File

@@ -1,6 +1,7 @@
import { getApplicationServer } from '../../../constants';
import RequestOptions from '../../../shared/api/RequestOptions';
import { apiHelper } from '../../../shared/api/apiHelper';
import type { DatabasePlan } from '../../plan';
import type { BackupConfig } from '../model/BackupConfig';
import type { TransferDatabaseRequest } from '../model/TransferDatabaseRequest';
@@ -54,4 +55,12 @@ export const backupConfigApi = {
requestOptions,
);
},
async getDatabasePlan(databaseId: string) {
return apiHelper.fetchGetJson<DatabasePlan>(
`${getApplicationServer()}/api/v1/backup-configs/database/${databaseId}/plan`,
undefined,
true,
);
},
};

View File

@@ -6,3 +6,4 @@ export type { BackupConfig } from './model/BackupConfig';
export { BackupNotificationType } from './model/BackupNotificationType';
export { BackupEncryption } from './model/BackupEncryption';
export type { TransferDatabaseRequest } from './model/TransferDatabaseRequest';
export type { DatabasePlan } from '../plan';

View File

@@ -15,4 +15,7 @@ export interface BackupConfig {
isRetryIfFailed: boolean;
maxFailedTriesCount: number;
encryption: BackupEncryption;
maxBackupSizeMb: number;
maxBackupsTotalSizeMb: number;
}

View File

@@ -0,0 +1 @@
export type { DatabasePlan } from './model/DatabasePlan';

View File

@@ -0,0 +1,8 @@
import type { Period } from '../../databases/model/Period';
export interface DatabasePlan {
databaseId: string;
maxBackupSizeMb: number;
maxBackupsTotalSizeMb: number;
maxStoragePeriod: Period;
}

View File

@@ -14,6 +14,7 @@ export interface Storage {
name: string;
lastSaveError?: string;
workspaceId: string;
isSystem: boolean;
// specific storage types
localStorage?: LocalStorage;

View File

@@ -1,4 +1,4 @@
import { InfoCircleOutlined } from '@ant-design/icons';
import { DownOutlined, InfoCircleOutlined, UpOutlined } from '@ant-design/icons';
import {
Button,
Checkbox,
@@ -15,12 +15,19 @@ import { CronExpressionParser } from 'cron-parser';
import dayjs, { Dayjs } from 'dayjs';
import { useEffect, useMemo, useState } from 'react';
import { type BackupConfig, BackupEncryption, backupConfigApi } from '../../../entity/backups';
import { IS_CLOUD } from '../../../constants';
import {
type BackupConfig,
BackupEncryption,
type DatabasePlan,
backupConfigApi,
} from '../../../entity/backups';
import { BackupNotificationType } from '../../../entity/backups/model/BackupNotificationType';
import type { Database } from '../../../entity/databases';
import { Period } from '../../../entity/databases/model/Period';
import { type Interval, IntervalType } from '../../../entity/intervals';
import { type Storage, getStorageLogoFromType, storageApi } from '../../../entity/storages';
import type { UserProfile } from '../../../entity/users';
import { getUserTimeFormat } from '../../../shared/time';
import {
getUserTimeFormat as getIs12Hour,
@@ -33,6 +40,7 @@ import { ConfirmationComponent } from '../../../shared/ui';
import { EditStorageComponent } from '../../storages/ui/edit/EditStorageComponent';
interface Props {
user: UserProfile;
database: Database;
isShowBackButton: boolean;
@@ -57,6 +65,7 @@ const weekdayOptions = [
];
export const EditBackupConfigComponent = ({
user,
database,
isShowBackButton,
@@ -73,12 +82,20 @@ export const EditBackupConfigComponent = ({
const [isSaving, setIsSaving] = useState(false);
const [storages, setStorages] = useState<Storage[]>([]);
const [isStoragesLoading, setIsStoragesLoading] = useState(false);
const [isShowCreateStorage, setShowCreateStorage] = useState(false);
const [storageSelectKey, setStorageSelectKey] = useState(0);
const [isShowWarn, setIsShowWarn] = useState(false);
const [databasePlan, setDatabasePlan] = useState<DatabasePlan>();
const [isLoading, setIsLoading] = useState(true);
const hasAdvancedValues =
!!backupConfig?.isRetryIfFailed ||
(backupConfig?.maxBackupSizeMb ?? 0) > 0 ||
(backupConfig?.maxBackupsTotalSizeMb ?? 0) > 0;
const [isShowAdvanced, setShowAdvanced] = useState(hasAdvancedValues);
const timeFormat = useMemo(() => {
const is12 = getIs12Hour();
return { use12Hours: is12, format: is12 ? 'h:mm A' : 'HH:mm' };
@@ -86,6 +103,65 @@ export const EditBackupConfigComponent = ({
const dateTimeFormat = useMemo(() => getUserTimeFormat(), []);
const createDefaultPlan = (databaseId: string, isCloud: boolean): DatabasePlan => {
if (isCloud) {
return {
databaseId,
maxBackupSizeMb: 100,
maxBackupsTotalSizeMb: 4000,
maxStoragePeriod: Period.WEEK,
};
} else {
return {
databaseId,
maxBackupSizeMb: 0,
maxBackupsTotalSizeMb: 0,
maxStoragePeriod: Period.FOREVER,
};
}
};
const isPeriodAllowed = (period: Period, maxPeriod: Period): boolean => {
const periodOrder = [
Period.DAY,
Period.WEEK,
Period.MONTH,
Period.THREE_MONTH,
Period.SIX_MONTH,
Period.YEAR,
Period.TWO_YEARS,
Period.THREE_YEARS,
Period.FOUR_YEARS,
Period.FIVE_YEARS,
Period.FOREVER,
];
const periodIndex = periodOrder.indexOf(period);
const maxIndex = periodOrder.indexOf(maxPeriod);
return periodIndex <= maxIndex;
};
const availablePeriods = useMemo(() => {
const allPeriods = [
{ label: '1 day', value: Period.DAY },
{ label: '1 week', value: Period.WEEK },
{ label: '1 month', value: Period.MONTH },
{ label: '3 months', value: Period.THREE_MONTH },
{ label: '6 months', value: Period.SIX_MONTH },
{ label: '1 year', value: Period.YEAR },
{ label: '2 years', value: Period.TWO_YEARS },
{ label: '3 years', value: Period.THREE_YEARS },
{ label: '4 years', value: Period.FOUR_YEARS },
{ label: '5 years', value: Period.FIVE_YEARS },
{ label: 'Forever', value: Period.FOREVER },
];
if (!databasePlan) {
return allPeriods;
}
return allPeriods.filter((p) => isPeriodAllowed(p.value, databasePlan.maxStoragePeriod));
}, [databasePlan]);
const updateBackupConfig = (patch: Partial<BackupConfig>) => {
setBackupConfig((prev) => (prev ? { ...prev, ...patch } : prev));
setIsUnsaved(true);
@@ -125,48 +201,63 @@ export const EditBackupConfigComponent = ({
};
const loadStorages = async () => {
setIsStoragesLoading(true);
try {
const storages = await storageApi.getStorages(database.workspaceId);
setStorages(storages);
} catch (e) {
alert((e as Error).message);
}
setIsStoragesLoading(false);
};
useEffect(() => {
if (database.id) {
backupConfigApi.getBackupConfigByDbID(database.id).then((res) => {
setBackupConfig(res);
setIsUnsaved(false);
setIsSaving(false);
});
} else {
setBackupConfig({
databaseId: database.id,
isBackupsEnabled: true,
backupInterval: {
id: undefined as unknown as string,
interval: IntervalType.DAILY,
timeOfDay: '00:00',
},
storage: undefined,
storePeriod: Period.THREE_MONTH,
sendNotificationsOn: [],
isRetryIfFailed: true,
maxFailedTriesCount: 3,
encryption: BackupEncryption.ENCRYPTED,
});
}
loadStorages();
const run = async () => {
setIsLoading(true);
try {
if (database.id) {
const config = await backupConfigApi.getBackupConfigByDbID(database.id);
setBackupConfig(config);
setIsUnsaved(false);
setIsSaving(false);
const plan = await backupConfigApi.getDatabasePlan(database.id);
setDatabasePlan(plan);
} else {
const plan = createDefaultPlan('', IS_CLOUD);
setDatabasePlan(plan);
setBackupConfig({
databaseId: database.id,
isBackupsEnabled: true,
backupInterval: {
id: undefined as unknown as string,
interval: IntervalType.DAILY,
timeOfDay: '00:00',
},
storage: undefined,
storePeriod:
plan.maxStoragePeriod === Period.FOREVER ? Period.THREE_MONTH : plan.maxStoragePeriod,
sendNotificationsOn: [BackupNotificationType.BackupFailed],
isRetryIfFailed: true,
maxFailedTriesCount: 3,
encryption: BackupEncryption.ENCRYPTED,
maxBackupSizeMb: plan.maxBackupSizeMb,
maxBackupsTotalSizeMb: plan.maxBackupsTotalSizeMb,
});
}
await loadStorages();
} catch (e) {
alert((e as Error).message);
} finally {
setIsLoading(false);
}
};
run();
}, [database]);
if (!backupConfig) return <div />;
if (isStoragesLoading) {
if (isLoading) {
return (
<div className="mb-5 flex items-center">
<Spin />
@@ -174,6 +265,8 @@ export const EditBackupConfigComponent = ({
);
}
if (!backupConfig) return <div />;
const { backupInterval } = backupConfig;
// UTC → local conversions for display
@@ -363,79 +456,6 @@ export const EditBackupConfigComponent = ({
</div>
)}
<div className="mt-4 mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Retry backup if failed</div>
<div className="flex items-center">
<Switch
size="small"
checked={backupConfig.isRetryIfFailed}
onChange={(checked) => updateBackupConfig({ isRetryIfFailed: checked })}
/>
<Tooltip
className="cursor-pointer"
title="Automatically retry failed backups. Backups can fail due to network failures, storage issues or temporary database unavailability."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
{backupConfig.isRetryIfFailed && (
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Max failed tries count</div>
<div className="flex items-center">
<InputNumber
min={1}
max={10}
value={backupConfig.maxFailedTriesCount}
onChange={(value) => updateBackupConfig({ maxFailedTriesCount: value || 1 })}
size="small"
className="w-full max-w-[200px] grow"
/>
<Tooltip
className="cursor-pointer"
title="Maximum number of retry attempts for failed backups. You will receive a notification when all tries have failed."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
)}
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Store period</div>
<div className="flex items-center">
<Select
value={backupConfig.storePeriod}
onChange={(v) => updateBackupConfig({ storePeriod: v })}
size="small"
className="w-full max-w-[200px] grow"
options={[
{ label: '1 day', value: Period.DAY },
{ label: '1 week', value: Period.WEEK },
{ label: '1 month', value: Period.MONTH },
{ label: '3 months', value: Period.THREE_MONTH },
{ label: '6 months', value: Period.SIX_MONTH },
{ label: '1 year', value: Period.YEAR },
{ label: '2 years', value: Period.TWO_YEARS },
{ label: '3 years', value: Period.THREE_YEARS },
{ label: '4 years', value: Period.FOUR_YEARS },
{ label: '5 years', value: Period.FIVE_YEARS },
{ label: 'Forever', value: Period.FOREVER },
]}
/>
<Tooltip
className="cursor-pointer"
title="How long to keep the backups? Make sure you have enough storage space."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
<div className="mb-3" />
</>
)}
@@ -478,23 +498,45 @@ export const EditBackupConfigComponent = ({
</div>
</div>
{!IS_CLOUD && (
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Encryption</div>
<div className="flex items-center">
<Select
value={backupConfig.encryption}
onChange={(v) => updateBackupConfig({ encryption: v })}
size="small"
className="w-[200px]"
options={[
{ label: 'None', value: BackupEncryption.NONE },
{ label: 'Encrypt backup files', value: BackupEncryption.ENCRYPTED },
]}
/>
<Tooltip
className="cursor-pointer"
title="If backup is encrypted, backup files in your storage (S3, local, etc.) cannot be used directly. You can restore backups through Databasus or download them unencrypted via the 'Download' button."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
)}
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Encryption</div>
<div className="mb-1 min-w-[150px] sm:mb-0">Store period</div>
<div className="flex items-center">
<Select
value={backupConfig.encryption}
onChange={(v) => updateBackupConfig({ encryption: v })}
value={backupConfig.storePeriod}
onChange={(v) => updateBackupConfig({ storePeriod: v })}
size="small"
className="w-full max-w-[200px] grow"
options={[
{ label: 'None', value: BackupEncryption.NONE },
{ label: 'Encrypt backup files', value: BackupEncryption.ENCRYPTED },
]}
className="w-[200px]"
options={availablePeriods}
/>
<Tooltip
className="cursor-pointer"
title="If backup is encrypted, backup files in your storage (S3, local, etc.) cannot be used directly. You can restore backups through Databasus or download them unencrypted via the 'Download' button."
title="How long to keep the backups? Make sure you have enough storage space."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
@@ -546,9 +588,184 @@ export const EditBackupConfigComponent = ({
</>
)}
<div className="mt-4 mb-1 flex items-center">
<div
className="flex cursor-pointer items-center text-sm text-blue-600 hover:text-blue-800"
onClick={() => setShowAdvanced(!isShowAdvanced)}
>
<span className="mr-2">Advanced settings</span>
{isShowAdvanced ? (
<UpOutlined style={{ fontSize: '12px' }} />
) : (
<DownOutlined style={{ fontSize: '12px' }} />
)}
</div>
</div>
{isShowAdvanced && backupConfig.isBackupsEnabled && (
<>
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Retry backup if failed</div>
<div className="flex items-center">
<Switch
size="small"
checked={backupConfig.isRetryIfFailed}
onChange={(checked) => updateBackupConfig({ isRetryIfFailed: checked })}
/>
<Tooltip
className="cursor-pointer"
title="Automatically retry failed backups. Backups can fail due to network failures, storage issues or temporary database unavailability."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
{backupConfig.isRetryIfFailed && (
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Max failed tries count</div>
<div className="flex items-center">
<InputNumber
min={1}
max={10}
value={backupConfig.maxFailedTriesCount}
onChange={(value) => updateBackupConfig({ maxFailedTriesCount: value || 1 })}
size="small"
className="w-full max-w-[75px] grow"
/>
<Tooltip
className="cursor-pointer"
title="Maximum number of retry attempts for failed backups. You will receive a notification when all tries have failed."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
)}
<div className="mt-5 mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Max backup size limit</div>
<div className="flex items-center">
<Switch
size="small"
checked={backupConfig.maxBackupSizeMb > 0}
disabled={IS_CLOUD}
onChange={(checked) => {
updateBackupConfig({
maxBackupSizeMb: checked ? backupConfig.maxBackupSizeMb || 1000 : 0,
});
}}
/>
<Tooltip
className="cursor-pointer"
title="Limits the size of each individual backup. Note that backups are typically 15× smaller than the database size. For example, a 100 MB backup represents approximately 1.5 GB database."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
{backupConfig.maxBackupSizeMb > 0 && (
<div className="mb-5 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Max file size (MB)</div>
<InputNumber
min={1}
max={
databasePlan?.maxBackupSizeMb && databasePlan.maxBackupSizeMb > 0
? databasePlan.maxBackupSizeMb
: undefined
}
value={backupConfig.maxBackupSizeMb}
onChange={(value) => {
const newValue = value || 1;
if (databasePlan?.maxBackupSizeMb && databasePlan.maxBackupSizeMb > 0) {
updateBackupConfig({
maxBackupSizeMb: Math.min(newValue, databasePlan.maxBackupSizeMb),
});
} else {
updateBackupConfig({ maxBackupSizeMb: newValue });
}
}}
size="small"
className="w-full max-w-[75px] grow"
/>
<div className="ml-2 text-xs text-gray-600 dark:text-gray-400">
~{((backupConfig.maxBackupSizeMb / 1024) * 15).toFixed(2)} GB DB size
</div>
</div>
)}
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Limit total backups size</div>
<div className="flex items-center">
<Switch
size="small"
checked={backupConfig.maxBackupsTotalSizeMb > 0}
disabled={IS_CLOUD}
onChange={(checked) => {
updateBackupConfig({
maxBackupsTotalSizeMb: checked
? backupConfig.maxBackupsTotalSizeMb || 1_000_000
: 0,
});
}}
/>
<Tooltip
className="cursor-pointer"
title="Limits the total size of all backups in storage (like S3, local, etc.). Once this limit is exceeded, the oldest backups are automatically removed until the total size is within the limit again."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
{backupConfig.maxBackupsTotalSizeMb > 0 && (
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Backups files size (MB)</div>
<InputNumber
min={1}
max={
databasePlan?.maxBackupsTotalSizeMb && databasePlan.maxBackupsTotalSizeMb > 0
? databasePlan.maxBackupsTotalSizeMb
: undefined
}
value={backupConfig.maxBackupsTotalSizeMb}
onChange={(value) => {
const newValue = value || 1;
if (
databasePlan?.maxBackupsTotalSizeMb &&
databasePlan.maxBackupsTotalSizeMb > 0
) {
updateBackupConfig({
maxBackupsTotalSizeMb: Math.min(newValue, databasePlan.maxBackupsTotalSizeMb),
});
} else {
updateBackupConfig({ maxBackupsTotalSizeMb: newValue });
}
}}
size="small"
className="w-full max-w-[75px] grow"
/>
<div className="ml-2 text-xs text-gray-600 dark:text-gray-400">
{(backupConfig.maxBackupsTotalSizeMb / 1024).toFixed(2)} GB (~
{backupConfig.maxBackupsTotalSizeMb / backupConfig.maxBackupSizeMb} backups)
</div>
</div>
)}
</>
)}
<div className="mt-5 flex">
{isShowBackButton && (
<Button className="mr-1" onClick={onBack}>
<Button className="mr-1" type="primary" ghost onClick={onBack}>
Back
</Button>
)}
@@ -586,6 +803,7 @@ export const EditBackupConfigComponent = ({
</div>
<EditStorageComponent
user={user}
workspaceId={database.workspaceId}
isShowName
isShowClose={false}

View File

@@ -5,6 +5,7 @@ import dayjs from 'dayjs';
import { useMemo } from 'react';
import { useEffect, useState } from 'react';
import { IS_CLOUD } from '../../../constants';
import { type BackupConfig, BackupEncryption, backupConfigApi } from '../../../entity/backups';
import { BackupNotificationType } from '../../../entity/backups/model/BackupNotificationType';
import type { Database } from '../../../entity/databases';
@@ -210,17 +211,21 @@ export const ShowBackupConfigComponent = ({ database }: Props) => {
</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Encryption</div>
<div>{backupConfig.encryption === BackupEncryption.ENCRYPTED ? 'Enabled' : 'None'}</div>
{!IS_CLOUD && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Encryption</div>
<div>
{backupConfig.encryption === BackupEncryption.ENCRYPTED ? 'Enabled' : 'None'}
</div>
<Tooltip
className="cursor-pointer"
title="If backup is encrypted, backup files in your storage (S3, local, etc.) cannot be used directly. You can restore backups through Databasus or download them unencrypted via the 'Download' button."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
<Tooltip
className="cursor-pointer"
title="If backup is encrypted, backup files in your storage (S3, local, etc.) cannot be used directly. You can restore backups through Databasus or download them unencrypted via the 'Download' button."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
)}
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Notifications</div>

View File

@@ -11,6 +11,7 @@ import {
type PostgresqlDatabase,
databaseApi,
} from '../../../entity/databases';
import type { UserProfile } from '../../../entity/users';
import { EditBackupConfigComponent } from '../../backups';
import { CreateReadOnlyComponent } from './edit/CreateReadOnlyComponent';
import { EditDatabaseBaseInfoComponent } from './edit/EditDatabaseBaseInfoComponent';
@@ -18,8 +19,8 @@ import { EditDatabaseNotifiersComponent } from './edit/EditDatabaseNotifiersComp
import { EditDatabaseSpecificDataComponent } from './edit/EditDatabaseSpecificDataComponent';
interface Props {
user: UserProfile;
workspaceId: string;
onCreated: (databaseId: string) => void;
onClose: () => void;
}
@@ -62,7 +63,7 @@ const initializeDatabaseTypeData = (db: Database): Database => {
}
};
export const CreateDatabaseComponent = ({ workspaceId, onCreated, onClose }: Props) => {
export const CreateDatabaseComponent = ({ user, workspaceId, onCreated, onClose }: Props) => {
const [isCreating, setIsCreating] = useState(false);
const [backupConfig, setBackupConfig] = useState<BackupConfig | undefined>();
const [database, setDatabase] = useState<Database>(createInitialDatabase(workspaceId));
@@ -137,9 +138,11 @@ export const CreateDatabaseComponent = ({ workspaceId, onCreated, onClose }: Pro
database={database}
onReadOnlyUserUpdated={(database) => {
setDatabase({ ...database });
setStep('backup-config');
}}
onGoBack={() => setStep('db-settings')}
onContinue={() => setStep('backup-config')}
onSkipped={() => setStep('backup-config')}
onAlreadyExists={() => setStep('backup-config')}
/>
);
}
@@ -147,6 +150,7 @@ export const CreateDatabaseComponent = ({ workspaceId, onCreated, onClose }: Pro
if (step === 'backup-config') {
return (
<EditBackupConfigComponent
user={user}
database={database}
isShowCancelButton={false}
onCancel={() => onClose()}

View File

@@ -3,6 +3,7 @@ import { useRef, useState } from 'react';
import { useEffect } from 'react';
import { type Database, databaseApi } from '../../../entity/databases';
import type { UserProfile } from '../../../entity/users';
import { BackupsComponent } from '../../backups';
import { HealthckeckAttemptsComponent } from '../../healthcheck';
import { DatabaseConfigComponent } from './DatabaseConfigComponent';
@@ -10,6 +11,7 @@ import { DatabaseConfigComponent } from './DatabaseConfigComponent';
interface Props {
contentHeight: number;
databaseId: string;
user: UserProfile;
onDatabaseChanged: (database: Database) => void;
onDatabaseDeleted: () => void;
isCanManageDBs: boolean;
@@ -18,6 +20,7 @@ interface Props {
export const DatabaseComponent = ({
contentHeight,
databaseId,
user,
onDatabaseChanged,
onDatabaseDeleted,
isCanManageDBs,
@@ -68,6 +71,7 @@ export const DatabaseComponent = ({
{currentTab === 'config' && (
<DatabaseConfigComponent
database={database}
user={user}
setDatabase={setDatabase}
onDatabaseChanged={onDatabaseChanged}
onDatabaseDeleted={onDatabaseDeleted}

View File

@@ -10,6 +10,7 @@ import { useEffect, useState } from 'react';
import { backupConfigApi } from '../../../entity/backups';
import { type Database, databaseApi } from '../../../entity/databases';
import type { UserProfile } from '../../../entity/users';
import { ToastHelper } from '../../../shared/toast';
import { ConfirmationComponent } from '../../../shared/ui';
import { EditBackupConfigComponent, ShowBackupConfigComponent } from '../../backups';
@@ -22,6 +23,7 @@ import { ShowDatabaseSpecificDataComponent } from './show/ShowDatabaseSpecificDa
interface Props {
database: Database;
user: UserProfile;
setDatabase: (database?: Database | undefined) => void;
onDatabaseChanged: (database: Database) => void;
onDatabaseDeleted: () => void;
@@ -33,6 +35,7 @@ interface Props {
export const DatabaseConfigComponent = ({
database,
user,
setDatabase,
onDatabaseChanged,
onDatabaseDeleted,
@@ -311,6 +314,7 @@ export const DatabaseConfigComponent = ({
{isEditBackupConfig ? (
<EditBackupConfigComponent
database={database}
user={user}
isShowCancelButton
onCancel={() => {
setIsEditBackupConfig(false);
@@ -464,6 +468,7 @@ export const DatabaseConfigComponent = ({
{isShowTransferDialog && (
<DatabaseTransferDialogComponent
database={database}
user={user}
currentStorageId={currentStorageId}
onClose={() => setIsShowTransferDialog(false)}
onTransferred={() => {

View File

@@ -8,6 +8,7 @@ import { type Database, databaseApi } from '../../../entity/databases';
import type { Notifier } from '../../../entity/notifiers';
import { notifierApi } from '../../../entity/notifiers';
import { type Storage, getStorageLogoFromType, storageApi } from '../../../entity/storages';
import type { UserProfile } from '../../../entity/users';
import { type WorkspaceResponse, workspaceApi } from '../../../entity/workspaces';
import { ToastHelper } from '../../../shared/toast';
import { EditNotifierComponent } from '../../notifiers/ui/edit/EditNotifierComponent';
@@ -15,6 +16,7 @@ import { EditStorageComponent } from '../../storages/ui/edit/EditStorageComponen
interface Props {
database: Database;
user: UserProfile;
currentStorageId?: string;
onClose: () => void;
onTransferred: () => void;
@@ -28,6 +30,7 @@ interface NotifierUsageInfo {
export const DatabaseTransferDialogComponent = ({
database,
user,
currentStorageId,
onClose,
onTransferred,
@@ -419,6 +422,7 @@ export const DatabaseTransferDialogComponent = ({
<EditStorageComponent
workspaceId={selectedWorkspaceId}
user={user}
isShowName
isShowClose={false}
onClose={() => setIsShowCreateStorage(false)}

View File

@@ -3,6 +3,7 @@ import { useEffect, useState } from 'react';
import { databaseApi } from '../../../entity/databases';
import type { Database } from '../../../entity/databases';
import type { UserProfile } from '../../../entity/users';
import type { WorkspaceResponse } from '../../../entity/workspaces';
import { useIsMobile } from '../../../shared/hooks';
import { CreateDatabaseComponent } from './CreateDatabaseComponent';
@@ -12,12 +13,13 @@ import { DatabaseComponent } from './DatabaseComponent';
interface Props {
contentHeight: number;
workspace: WorkspaceResponse;
user: UserProfile;
isCanManageDBs: boolean;
}
const SELECTED_DATABASE_STORAGE_KEY = 'selectedDatabaseId';
export const DatabasesComponent = ({ contentHeight, workspace, isCanManageDBs }: Props) => {
export const DatabasesComponent = ({ contentHeight, workspace, user, isCanManageDBs }: Props) => {
const isMobile = useIsMobile();
const [isLoading, setIsLoading] = useState(true);
const [databases, setDatabases] = useState<Database[]>([]);
@@ -157,6 +159,7 @@ export const DatabasesComponent = ({ contentHeight, workspace, isCanManageDBs }:
<DatabaseComponent
contentHeight={isMobile ? contentHeight - 50 : contentHeight}
databaseId={selectedDatabaseId}
user={user}
onDatabaseChanged={() => {
loadDatabases();
}}
@@ -185,6 +188,7 @@ export const DatabasesComponent = ({ contentHeight, workspace, isCanManageDBs }:
<div className="mt-5" />
<CreateDatabaseComponent
user={user}
workspaceId={workspace.id}
onCreated={(databaseId) => {
loadDatabases(false, databaseId);

View File

@@ -1,6 +1,7 @@
import { Button, Modal, Spin } from 'antd';
import { useEffect, useState } from 'react';
import { IS_CLOUD } from '../../../../constants';
import { type Database, DatabaseType, databaseApi } from '../../../../entity/databases';
interface Props {
@@ -8,7 +9,8 @@ interface Props {
onReadOnlyUserUpdated: (database: Database) => void;
onGoBack: () => void;
onContinue: () => void;
onSkipped: () => void;
onAlreadyExists: () => void;
}
const PRIVILEGES_TRUNCATE_LENGTH = 50;
@@ -17,7 +19,8 @@ export const CreateReadOnlyComponent = ({
database,
onReadOnlyUserUpdated,
onGoBack,
onContinue,
onSkipped,
onAlreadyExists,
}: Props) => {
const [isCheckingReadOnlyUser, setIsCheckingReadOnlyUser] = useState(false);
const [isCreatingReadOnlyUser, setIsCreatingReadOnlyUser] = useState(false);
@@ -87,7 +90,6 @@ export const CreateReadOnlyComponent = ({
}
onReadOnlyUserUpdated(database);
onContinue();
} catch (e) {
alert((e as Error).message);
}
@@ -101,7 +103,7 @@ export const CreateReadOnlyComponent = ({
const handleSkipConfirmed = () => {
setShowSkipConfirmation(false);
onContinue();
onSkipped();
};
useEffect(() => {
@@ -110,7 +112,7 @@ export const CreateReadOnlyComponent = ({
const isReadOnly = await checkReadOnlyUser();
if (isReadOnly) {
onContinue();
onAlreadyExists();
}
setIsCheckingReadOnlyUser(false);
@@ -192,9 +194,11 @@ export const CreateReadOnlyComponent = ({
Back
</Button>
<Button className="mr-2 ml-auto" danger ghost onClick={handleSkip}>
Skip
</Button>
{!IS_CLOUD && (
<Button className="mr-2 ml-auto" danger ghost onClick={handleSkip}>
Skip
</Button>
)}
<Button
type="primary"
@@ -228,7 +232,7 @@ export const CreateReadOnlyComponent = ({
</div>
<div className="flex justify-end">
<Button className="mr-2" danger onClick={handleSkipConfirmed}>
<Button className="mr-2" danger ghost onClick={handleSkipConfirmed}>
Yes, I accept risks
</Button>

View File

@@ -115,20 +115,20 @@ export const EditDatabaseBaseInfoComponent = ({
<div>
{isShowName && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Name</div>
<div className="min-w-[100px] md:min-w-[150px]">Name</div>
<Input
value={editingDatabase.name || ''}
onChange={(e) => updateDatabase({ name: e.target.value })}
size="small"
placeholder="My favourite DB"
className="max-w-[200px] grow"
className="max-w-[150px] grow md:max-w-[200px]"
/>
</div>
)}
{isShowType && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Database type</div>
<div className="min-w-[100px] md:min-w-[150px]">Database type</div>
<div className="flex items-center">
<Select
@@ -136,7 +136,7 @@ export const EditDatabaseBaseInfoComponent = ({
onChange={handleTypeChange}
options={databaseTypeOptions}
size="small"
className="w-[200px] grow"
className="w-[150px] grow md:w-[200px]"
/>
<img

View File

@@ -1,4 +1,8 @@
import { type Database, DatabaseType } from '../../../../entity/databases';
import { Modal } from 'antd';
import { useState } from 'react';
import { type Database, DatabaseType, databaseApi } from '../../../../entity/databases';
import { CreateReadOnlyComponent } from './CreateReadOnlyComponent';
import { EditMariaDbSpecificDataComponent } from './EditMariaDbSpecificDataComponent';
import { EditMongoDbSpecificDataComponent } from './EditMongoDbSpecificDataComponent';
import { EditMySqlSpecificDataComponent } from './EditMySqlSpecificDataComponent';
@@ -36,19 +40,84 @@ export const EditDatabaseSpecificDataComponent = ({
isShowDbName = true,
isRestoreMode = false,
}: Props) => {
const [isShowReadOnlyDialog, setIsShowReadOnlyDialog] = useState(false);
const [editingDatabase, setEditingDatabase] = useState<Database>(database);
const saveDb = async (databaseToSave: Database) => {
setEditingDatabase(databaseToSave);
if (!isSaveToApi) {
onSaved(databaseToSave);
return;
}
try {
const result = await databaseApi.isUserReadOnly(databaseToSave);
if (result.isReadOnly) {
onSaved(databaseToSave);
} else {
setIsShowReadOnlyDialog(true);
}
} catch (e) {
alert((e as Error).message);
}
};
const onReadOnlyUserCreated = (updatedDatabase: Database) => {
setEditingDatabase(updatedDatabase);
setIsShowReadOnlyDialog(false);
};
const skipReadOnlyUser = () => {
setIsShowReadOnlyDialog(false);
onSaved(editingDatabase);
};
if (isShowReadOnlyDialog) {
return (
<Modal
title="Create read-only user"
footer={<div />}
open={isShowReadOnlyDialog}
onCancel={() => setIsShowReadOnlyDialog(false)}
maskClosable={false}
width={450}
>
<CreateReadOnlyComponent
database={editingDatabase}
onReadOnlyUserUpdated={(db) => {
console.log('onReadOnlyUserUpdated', db);
onReadOnlyUserCreated(db);
}}
onGoBack={() => {
setIsShowReadOnlyDialog(false);
}}
onSkipped={() => {
skipReadOnlyUser();
}}
onAlreadyExists={() => {
console.log('onAlreadyExists');
onSaved(editingDatabase);
}}
/>
</Modal>
);
}
const commonProps = {
database,
database: editingDatabase,
isShowCancelButton,
onCancel,
isShowBackButton,
onBack,
saveButtonText,
isSaveToApi,
onSaved,
onSaved: saveDb,
isShowDbName,
};
switch (database.type) {
switch (editingDatabase.type) {
case DatabaseType.POSTGRES:
return <EditPostgreSqlSpecificDataComponent {...commonProps} isRestoreMode={isRestoreMode} />;
case DatabaseType.MYSQL:

View File

@@ -2,6 +2,7 @@ import { CopyOutlined, DownOutlined, InfoCircleOutlined, UpOutlined } from '@ant
import { App, Button, Checkbox, Input, InputNumber, Switch, Tooltip } from 'antd';
import { useEffect, useState } from 'react';
import { IS_CLOUD } from '../../../../constants';
import { type Database, databaseApi } from '../../../../entity/databases';
import { MariadbConnectionStringParser } from '../../../../entity/databases/model/mariadb/MariadbConnectionStringParser';
import { ToastHelper } from '../../../../shared/toast';
@@ -199,7 +200,7 @@ export const EditMariaDbSpecificDataComponent = ({
/>
</div>
{isLocalhostDb && (
{isLocalhostDb && !IS_CLOUD && (
<div className="mb-1 flex">
<div className="min-w-[150px]" />
<div className="max-w-[200px] text-xs text-gray-500 dark:text-gray-400">

View File

@@ -2,6 +2,7 @@ import { CopyOutlined, DownOutlined, InfoCircleOutlined, UpOutlined } from '@ant
import { App, Button, Input, InputNumber, Switch, Tooltip } from 'antd';
import { useEffect, useState } from 'react';
import { IS_CLOUD } from '../../../../constants';
import { type Database, databaseApi } from '../../../../entity/databases';
import { MongodbConnectionStringParser } from '../../../../entity/databases/model/mongodb/MongodbConnectionStringParser';
import { ToastHelper } from '../../../../shared/toast';
@@ -201,7 +202,7 @@ export const EditMongoDbSpecificDataComponent = ({
/>
</div>
{isLocalhostDb && (
{isLocalhostDb && !IS_CLOUD && (
<div className="mb-1 flex">
<div className="min-w-[150px]" />
<div className="max-w-[200px] text-xs text-gray-500 dark:text-gray-400">

View File

@@ -2,6 +2,7 @@ import { CopyOutlined } from '@ant-design/icons';
import { App, Button, Input, InputNumber, Switch } from 'antd';
import { useEffect, useState } from 'react';
import { IS_CLOUD } from '../../../../constants';
import { type Database, databaseApi } from '../../../../entity/databases';
import { MySqlConnectionStringParser } from '../../../../entity/databases/model/mysql/MySqlConnectionStringParser';
import { ToastHelper } from '../../../../shared/toast';
@@ -196,7 +197,7 @@ export const EditMySqlSpecificDataComponent = ({
/>
</div>
{isLocalhostDb && (
{isLocalhostDb && !IS_CLOUD && (
<div className="mb-1 flex">
<div className="min-w-[150px]" />
<div className="max-w-[200px] text-xs text-gray-500 dark:text-gray-400">

View File

@@ -2,6 +2,7 @@ import { CopyOutlined, DownOutlined, InfoCircleOutlined, UpOutlined } from '@ant
import { App, Button, Checkbox, Input, InputNumber, Select, Switch, Tooltip } from 'antd';
import { useEffect, useState } from 'react';
import { IS_CLOUD } from '../../../../constants';
import { type Database, databaseApi } from '../../../../entity/databases';
import { ConnectionStringParser } from '../../../../entity/databases/model/postgresql/ConnectionStringParser';
import { ToastHelper } from '../../../../shared/toast';
@@ -235,7 +236,7 @@ export const EditPostgreSqlSpecificDataComponent = ({
/>
</div>
{isLocalhostDb && (
{isLocalhostDb && !IS_CLOUD && (
<div className="mb-1 flex">
<div className="min-w-[150px]" />
<div className="max-w-[200px] text-xs text-gray-500 dark:text-gray-400">
@@ -372,7 +373,7 @@ export const EditPostgreSqlSpecificDataComponent = ({
/>
</div>
{isRestoreMode && (
{isRestoreMode && !IS_CLOUD && (
<div className="mb-5 flex w-full items-center">
<div className="min-w-[150px]">CPU count</div>
<div className="flex items-center">

View File

@@ -0,0 +1 @@
export { PlaygroundWarningComponent } from './ui/PlaygroundWarningComponent';

View File

@@ -0,0 +1,146 @@
import { Modal } from 'antd';
import type { JSX } from 'react';
import { useEffect, useState } from 'react';
import { IS_CLOUD } from '../../../constants';
const STORAGE_KEY = 'databasus_playground_info_dismissed';
const TIMEOUT_SECONDS = 30;
export const PlaygroundWarningComponent = (): JSX.Element => {
const [isVisible, setIsVisible] = useState(false);
const [remainingSeconds, setRemainingSeconds] = useState(TIMEOUT_SECONDS);
const [isButtonEnabled, setIsButtonEnabled] = useState(false);
const handleClose = () => {
try {
localStorage.setItem(STORAGE_KEY, 'true');
} catch (e) {
console.warn('Failed to save playground modal state to localStorage:', e);
}
setIsVisible(false);
};
useEffect(() => {
if (!IS_CLOUD) {
return;
}
try {
const isDismissed = localStorage.getItem(STORAGE_KEY) === 'true';
if (!isDismissed) {
setIsVisible(true);
}
} catch (e) {
console.warn('Failed to read playground modal state from localStorage:', e);
setIsVisible(true);
}
}, []);
useEffect(() => {
if (!isVisible) {
return;
}
const interval = setInterval(() => {
setRemainingSeconds((prev) => {
if (prev <= 1) {
setIsButtonEnabled(true);
clearInterval(interval);
return 0;
}
return prev - 1;
});
}, 1000);
return () => clearInterval(interval);
}, [isVisible]);
return (
<Modal
title="Welcome to Databasus Playground"
open={isVisible}
onOk={handleClose}
okText={
<div className="min-w-[100px]">
{isButtonEnabled ? 'Understood' : `${remainingSeconds}`}
</div>
}
okButtonProps={{ disabled: !isButtonEnabled }}
closable={false}
cancelButtonProps={{ style: { display: 'none' } }}
width={500}
centered
maskClosable={false}
>
<div className="space-y-6 py-4">
<div>
<h3 className="mb-2 text-lg font-semibold">What is Playground?</h3>
<p className="text-gray-700 dark:text-gray-300">
Playground is a dev environment where you can test small databases backup and see
Databasus in action. Databasus dev team can test new features and see issues which hard
to detect when using self hosted (without logs or reports)
</p>
</div>
<div>
<h3 className="mb-2 text-lg font-semibold">What is limit?</h3>
<ul className="list-disc space-y-1 pl-5 text-gray-700 dark:text-gray-300">
<li>Single backup size - 100 MB (~1.5 GB database)</li>
<li>Store period - 7 days</li>
</ul>
</div>
<div>
<h3 className="mb-2 text-lg font-semibold">Is it secure?</h3>
<p className="text-gray-700 dark:text-gray-300">
Yes, it&apos;s regular Databasus installation, secured and maintained by Databasus team.
More about security{' '}
<a
href="https://databasus.com/security"
target="_blank"
rel="noopener noreferrer"
className="text-blue-600 hover:underline dark:text-blue-400"
>
you can read here
</a>
</p>
</div>
<div>
<h3 className="mb-2 text-lg font-semibold">Can my data be currepted?</h3>
<p className="text-gray-700 dark:text-gray-300">
No, because playground use only read-only users and cannot affect your DB. Only issue
you can face is instability: playground background workers frequently reloaded so backup
can be slower or be restarted due to app restart
</p>
</div>
<div>
<h3 className="mb-2 text-lg font-semibold">What if I see an issue?</h3>
<p className="text-gray-700 dark:text-gray-300">
Create{' '}
<a
href="https://github.com/databasus/databasus/issues"
target="_blank"
rel="noopener noreferrer"
className="text-blue-600 hover:underline dark:text-blue-400"
>
GitHub issue
</a>{' '}
or write{' '}
<a
href="https://t.me/databasus_community"
target="_blank"
rel="noopener noreferrer"
className="text-blue-600 hover:underline dark:text-blue-400"
>
to the community
</a>
</p>
</div>
</div>
</Modal>
);
};

View File

@@ -91,7 +91,7 @@ export function SettingsComponent({ contentHeight }: Props) {
console.log(`isCloud = ${IS_CLOUD}`);
return (
<div className="flex grow sm:pl-5">
<div className="flex grow">
<div className="w-full">
<div
ref={scrollContainerRef}

View File

@@ -40,6 +40,12 @@ export const StorageCardComponent = ({
Has save error
</div>
)}
{storage.isSystem && (
<div className="mt-2 inline-block rounded-lg bg-[#ffffff10] px-2 py-1 text-xs text-gray-700 dark:text-gray-300">
System storage
</div>
)}
</div>
);
};

View File

@@ -11,6 +11,7 @@ import { useEffect } from 'react';
import { backupConfigApi } from '../../../entity/backups';
import { storageApi } from '../../../entity/storages';
import type { Storage } from '../../../entity/storages';
import { type UserProfile, UserRole } from '../../../entity/users';
import { ToastHelper } from '../../../shared/toast';
import { ConfirmationComponent } from '../../../shared/ui';
import { StorageTransferDialogComponent } from './StorageTransferDialogComponent';
@@ -23,6 +24,7 @@ interface Props {
onStorageDeleted: () => void;
onStorageTransferred: () => void;
isCanManageStorages: boolean;
user: UserProfile;
}
export const StorageComponent = ({
@@ -31,6 +33,7 @@ export const StorageComponent = ({
onStorageDeleted,
onStorageTransferred,
isCanManageStorages,
user,
}: Props) => {
const [storage, setStorage] = useState<Storage | undefined>();
@@ -142,11 +145,12 @@ export const StorageComponent = ({
{!isEditName ? (
<div className="mb-5 flex items-center text-2xl font-bold">
{storage.name}
{isCanManageStorages && (
<div className="ml-2 cursor-pointer" onClick={() => startEdit('name')}>
<img src="/icons/pen-gray.svg" />
</div>
)}
{(storage.isSystem && user.role === UserRole.ADMIN) ||
(isCanManageStorages && (
<div className="ml-2 cursor-pointer" onClick={() => startEdit('name')}>
<img src="/icons/pen-gray.svg" />
</div>
))}
</div>
) : (
<div>
@@ -219,7 +223,9 @@ export const StorageComponent = ({
<div className="mt-5 flex items-center font-bold">
<div>Storage settings</div>
{!isEditSettings && isCanManageStorages ? (
{!isEditSettings &&
isCanManageStorages &&
!(storage.isSystem && user.role !== UserRole.ADMIN) ? (
<div className="ml-2 h-4 w-4 cursor-pointer" onClick={() => startEdit('settings')}>
<img src="/icons/pen-gray.svg" />
</div>
@@ -241,9 +247,10 @@ export const StorageComponent = ({
isShowName={false}
editingStorage={storage}
onChanged={onStorageChanged}
user={user}
/>
) : (
<ShowStorageComponent storage={storage} />
<ShowStorageComponent storage={storage} user={user} />
)}
</div>
@@ -261,23 +268,27 @@ export const StorageComponent = ({
{isCanManageStorages && (
<>
<Button
type="primary"
ghost
icon={<ArrowRightOutlined />}
onClick={() => setIsShowTransferDialog(true)}
className="mr-1"
/>
{!storage.isSystem && (
<Button
type="primary"
ghost
icon={<ArrowRightOutlined />}
onClick={() => setIsShowTransferDialog(true)}
className="mr-1"
/>
)}
<Button
type="primary"
ghost
danger
icon={<DeleteOutlined />}
onClick={() => setIsShowRemoveConfirm(true)}
loading={isRemoving}
disabled={isRemoving}
/>
{!(storage.isSystem && user.role !== UserRole.ADMIN) && (
<Button
type="primary"
ghost
danger
icon={<DeleteOutlined />}
onClick={() => setIsShowRemoveConfirm(true)}
loading={isRemoving}
disabled={isRemoving}
/>
)}
</>
)}
</div>

View File

@@ -3,6 +3,7 @@ import { useEffect, useState } from 'react';
import { storageApi } from '../../../entity/storages';
import type { Storage } from '../../../entity/storages';
import type { UserProfile } from '../../../entity/users';
import type { WorkspaceResponse } from '../../../entity/workspaces';
import { useIsMobile } from '../../../shared/hooks';
import { StorageCardComponent } from './StorageCardComponent';
@@ -10,6 +11,7 @@ import { StorageComponent } from './StorageComponent';
import { EditStorageComponent } from './edit/EditStorageComponent';
interface Props {
user: UserProfile;
contentHeight: number;
workspace: WorkspaceResponse;
isCanManageStorages: boolean;
@@ -17,7 +19,12 @@ interface Props {
const SELECTED_STORAGE_STORAGE_KEY = 'selectedStorageId';
export const StoragesComponent = ({ contentHeight, workspace, isCanManageStorages }: Props) => {
export const StoragesComponent = ({
user,
contentHeight,
workspace,
isCanManageStorages,
}: Props) => {
const isMobile = useIsMobile();
const [isLoading, setIsLoading] = useState(true);
const [storages, setStorages] = useState<Storage[]>([]);
@@ -144,6 +151,7 @@ export const StoragesComponent = ({ contentHeight, workspace, isCanManageStorage
loadStorages();
}}
isCanManageStorages={isCanManageStorages}
user={user}
/>
</div>
)}
@@ -170,6 +178,7 @@ export const StoragesComponent = ({ contentHeight, workspace, isCanManageStorage
loadStorages(false, storage.id);
setIsShowAddStorage(false);
}}
user={user}
/>
</Modal>
)}

View File

@@ -1,12 +1,15 @@
import { Button, Input, Select } from 'antd';
import { InfoCircleOutlined } from '@ant-design/icons';
import { Button, Input, Select, Switch, Tooltip } from 'antd';
import { useEffect, useState } from 'react';
import { IS_CLOUD } from '../../../../constants';
import {
type Storage,
StorageType,
getStorageLogoFromType,
storageApi,
} from '../../../../entity/storages';
import { type UserProfile, UserRole } from '../../../../entity/users';
import { ToastHelper } from '../../../../shared/toast';
import { EditAzureBlobStorageComponent } from './storages/EditAzureBlobStorageComponent';
import { EditFTPStorageComponent } from './storages/EditFTPStorageComponent';
@@ -26,6 +29,8 @@ interface Props {
editingStorage?: Storage;
onChanged: (storage: Storage) => void;
user: UserProfile;
}
export function EditStorageComponent({
@@ -35,6 +40,7 @@ export function EditStorageComponent({
isShowName,
editingStorage,
onChanged,
user,
}: Props) {
const [storage, setStorage] = useState<Storage | undefined>();
const [isUnsaved, setIsUnsaved] = useState(false);
@@ -188,6 +194,7 @@ export function EditStorageComponent({
workspaceId,
name: '',
type: StorageType.LOCAL,
isSystem: false,
localStorage: {},
},
);
@@ -357,6 +364,31 @@ export function EditStorageComponent({
</div>
</div>
{IS_CLOUD && user.role === UserRole.ADMIN && (
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[110px] sm:mb-0">Is system?</div>
<div className="flex items-center">
<Switch
checked={storage?.isSystem || false}
disabled={!!storage?.id && storage?.isSystem}
onChange={(checked) => {
setStorage({ ...storage, isSystem: checked });
setIsUnsaved(true);
}}
size="small"
/>
<Tooltip
className="cursor-pointer"
title="System storage is accessible by all workspaces in this instance. Regular storage is only accessible by the current workspace."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
)}
<div className="mt-5" />
<div>

View File

@@ -1,6 +1,7 @@
import { type Storage, StorageType } from '../../../../entity/storages';
import { getStorageLogoFromType } from '../../../../entity/storages/models/getStorageLogoFromType';
import { getStorageNameFromType } from '../../../../entity/storages/models/getStorageNameFromType';
import { type UserProfile, UserRole } from '../../../../entity/users';
import { ShowAzureBlobStorageComponent } from './storages/ShowAzureBlobStorageComponent';
import { ShowFTPStorageComponent } from './storages/ShowFTPStorageComponent';
import { ShowGoogleDriveStorageComponent } from './storages/ShowGoogleDriveStorageComponent';
@@ -11,9 +12,10 @@ import { ShowSFTPStorageComponent } from './storages/ShowSFTPStorageComponent';
interface Props {
storage?: Storage;
user: UserProfile;
}
export function ShowStorageComponent({ storage }: Props) {
export function ShowStorageComponent({ storage, user }: Props) {
if (!storage) return null;
return (
@@ -30,6 +32,13 @@ export function ShowStorageComponent({ storage }: Props) {
/>
</div>
{storage.isSystem && user.role === UserRole.ADMIN && (
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">System storage</div>
<div>Yes</div>
</div>
)}
<div>{storage?.type === StorageType.S3 && <ShowS3StorageComponent storage={storage} />}</div>
<div>

View File

@@ -1,6 +1,5 @@
import GitHubButton from 'react-github-btn';
import { ThemeToggleComponent } from '../../../widgets/main/ThemeToggleComponent';
import { StarButtonComponent } from '../../../shared/ui/StarButtonComponent';
import { ThemeToggleComponent } from '../../../shared/ui/ThemeToggleComponent';
export function AuthNavbarComponent() {
return (
@@ -32,19 +31,11 @@ export function AuthNavbarComponent() {
Community
</a>
<div className="mt-[7px]">
<GitHubButton
href="https://github.com/databasus/databasus"
data-icon="octicon-star"
data-size="large"
data-show-count="true"
aria-label="Star Databasus on GitHub"
>
&nbsp;Star on GitHub
</GitHubButton>
</div>
<div className="flex items-center gap-2">
<StarButtonComponent />
<ThemeToggleComponent />
<ThemeToggleComponent />
</div>
</div>
</div>
);

View File

@@ -197,7 +197,7 @@ export function ProfileComponent({ contentHeight }: Props) {
};
return (
<div className="flex grow sm:pl-5">
<div className="flex grow">
<div className="w-full">
<div
className="grow overflow-y-auto rounded bg-white p-5 shadow dark:bg-gray-800"

View File

@@ -366,7 +366,7 @@ export function UsersComponent({ contentHeight }: Props) {
};
return (
<div className="flex grow sm:pl-5">
<div className="flex grow">
<div className="w-full">
<div
ref={scrollContainerRef}

View File

@@ -165,7 +165,7 @@ export function WorkspaceSettingsComponent({ workspaceResponse, user, contentHei
};
return (
<div className="flex grow sm:pl-2">
<div className="flex grow">
<div className="w-full">
<div
ref={scrollContainerRef}

View File

@@ -3,6 +3,7 @@ import { Spin } from 'antd';
import { useEffect, useState } from 'react';
import { userApi } from '../entity/users';
import { PlaygroundWarningComponent } from '../features/playground';
import {
AdminPasswordComponent,
AuthNavbarComponent,
@@ -60,6 +61,8 @@ export function AuthPageComponent() {
</div>
</div>
)}
<PlaygroundWarningComponent />
</div>
);
}

View File

@@ -4,10 +4,13 @@ import { useEffect, useState } from 'react';
import { GOOGLE_DRIVE_OAUTH_REDIRECT_URL } from '../constants';
import { type Storage, StorageType } from '../entity/storages';
import type { StorageOauthDto } from '../entity/storages/models/StorageOauthDto';
import type { UserProfile } from '../entity/users';
import { userApi } from '../entity/users';
import { EditStorageComponent } from '../features/storages/ui/edit/EditStorageComponent';
export function OauthStorageComponent() {
const [storage, setStorage] = useState<Storage | undefined>();
const [user, setUser] = useState<UserProfile | undefined>();
const exchangeGoogleOauthCode = async (oauthDto: StorageOauthDto) => {
if (!oauthDto.storage.googleDriveStorage) {
@@ -73,6 +76,13 @@ export function OauthStorageComponent() {
};
useEffect(() => {
userApi
.getCurrentUser()
.then(setUser)
.catch(() => {
window.location.href = '/';
});
const urlParams = new URLSearchParams(window.location.search);
// Attempt 1: Check for the 'oauthDto' param (Third-party/Legacy way)
@@ -116,7 +126,7 @@ export function OauthStorageComponent() {
alert('OAuth param not found. Ensure the redirect URL is configured correctly.');
}, []);
if (!storage) {
if (!storage || !user) {
return (
<div className="mt-20 flex justify-center">
<Spin />
@@ -140,6 +150,7 @@ export function OauthStorageComponent() {
<EditStorageComponent
workspaceId={storage.workspaceId}
user={user}
isShowClose={false}
onClose={() => {}}
isShowName={false}

View File

@@ -0,0 +1,36 @@
export class RateLimiter {
private tokens: number;
private readonly queue: Array<() => void>;
constructor(
private readonly capacity: number,
private readonly refillMs: number,
) {
this.tokens = capacity;
this.queue = [];
setInterval(() => {
this.tokens = this.capacity;
this.releaseQueued();
}, this.refillMs);
}
private releaseQueued() {
while (this.tokens > 0 && this.queue.length > 0) {
this.tokens -= 1;
const resolve = this.queue.shift();
if (resolve) resolve();
}
}
async acquire(): Promise<void> {
if (this.tokens > 0) {
this.tokens -= 1;
return;
}
return new Promise<void>((resolve) => {
this.queue.push(resolve);
});
}
}

View File

@@ -1,8 +1,11 @@
import { accessTokenHelper } from '.';
import { IS_CLOUD } from '../../constants';
import { RateLimiter } from './RateLimiter';
import RequestOptions from './RequestOptions';
const REPEAT_TRIES_COUNT = 10;
const REPEAT_TRIES_COUNT = 30;
const REPEAT_INTERVAL_MS = 3_000;
const rateLimiter = new RateLimiter(IS_CLOUD ? 5 : 30, 1_000);
const handleOrThrowMessageIfResponseError = async (
url: string,
@@ -41,6 +44,8 @@ const makeRequest = async (
optionsWrapper: RequestOptions,
currentTry = 0,
): Promise<Response> => {
await rateLimiter.acquire();
try {
const response = await fetch(url, optionsWrapper.toRequestInit());
await handleOrThrowMessageIfResponseError(url, response);

View File

@@ -0,0 +1,61 @@
import { useEffect, useState } from 'react';
const StarIcon = () => (
<svg
xmlns="http://www.w3.org/2000/svg"
width="16"
height="16"
viewBox="0 0 24 24"
fill="none"
stroke="currentColor"
strokeWidth="2"
strokeLinecap="round"
strokeLinejoin="round"
>
<polygon points="12 2 15.09 8.26 22 9.27 17 14.14 18.18 21.02 12 17.77 5.82 21.02 7 14.14 2 9.27 8.91 8.26 12 2" />
</svg>
);
export function StarButtonComponent() {
const [starCount, setStarCount] = useState<number | null>(null);
const [isLoading, setIsLoading] = useState(true);
const fetchStarCount = async () => {
try {
const response = await fetch('https://api.github.com/repos/databasus/databasus');
if (response.ok) {
const data = (await response.json()) as { stargazers_count: number };
setStarCount(data.stargazers_count);
}
} catch (error) {
console.error('Failed to fetch GitHub star count:', error);
} finally {
setIsLoading(false);
}
};
useEffect(() => {
fetchStarCount();
}, []);
return (
<a
href="https://github.com/databasus/databasus"
target="_blank"
rel="noopener noreferrer"
className="flex cursor-pointer items-center rounded-md border !border-gray-200 !bg-white text-sm !text-gray-700 transition-colors hover:!bg-gray-50 dark:!border-gray-600 dark:!bg-gray-700 dark:!text-gray-200 dark:hover:!bg-gray-600"
aria-label="Star databasus/databasus on GitHub"
>
<div className="flex items-center gap-2 border-r border-gray-200 px-2.5 py-1 !text-black dark:border-gray-600 dark:!text-white">
<StarIcon />
<span>Star on GitHub</span>
</div>
{!isLoading && starCount !== null && (
<span className="px-2.5 py-1 !text-black dark:!text-white">
{starCount.toLocaleString()}
</span>
)}
</a>
);
}

View File

@@ -1,7 +1,7 @@
import { Dropdown } from 'antd';
import type { MenuProps } from 'antd';
import { type ThemeMode, useTheme } from '../../shared/theme';
import { type ThemeMode, useTheme } from '../theme';
const SunIcon = () => (
<svg

View File

@@ -1 +1,3 @@
export { ConfirmationComponent } from './ConfirmationComponent';
export { StarButtonComponent } from './StarButtonComponent';
export { ThemeToggleComponent } from './ThemeToggleComponent';

View File

@@ -1,7 +1,6 @@
import { LoadingOutlined, MenuOutlined } from '@ant-design/icons';
import { App, Button, Spin, Tooltip } from 'antd';
import { useEffect, useState } from 'react';
import GitHubButton from 'react-github-btn';
import { APP_VERSION } from '../../constants';
import { type DiskUsage, diskApi } from '../../entity/disk';
@@ -15,6 +14,7 @@ import {
import { type WorkspaceResponse, workspaceApi } from '../../entity/workspaces';
import { DatabasesComponent } from '../../features/databases/ui/DatabasesComponent';
import { NotifiersComponent } from '../../features/notifiers/ui/NotifiersComponent';
import { PlaygroundWarningComponent } from '../../features/playground';
import { SettingsComponent } from '../../features/settings';
import { StoragesComponent } from '../../features/storages/ui/StoragesComponent';
import { ProfileComponent } from '../../features/users';
@@ -24,8 +24,9 @@ import {
WorkspaceSettingsComponent,
} from '../../features/workspaces';
import { useIsMobile, useScreenHeight } from '../../shared/hooks';
import { StarButtonComponent } from '../../shared/ui/StarButtonComponent';
import { ThemeToggleComponent } from '../../shared/ui/ThemeToggleComponent';
import { SidebarComponent } from './SidebarComponent';
import { ThemeToggleComponent } from './ThemeToggleComponent';
import { WorkspaceSelectionComponent } from './WorkspaceSelectionComponent';
export const MainScreenComponent = () => {
@@ -113,6 +114,9 @@ export const MainScreenComponent = () => {
const isUsedMoreThan95Percent =
diskUsage && diskUsage.usedSpaceBytes / diskUsage.totalSpaceBytes > 0.95;
const isUsedMoreThan85Percent =
diskUsage && diskUsage.usedSpaceBytes / diskUsage.totalSpaceBytes > 0.85;
const isCanManageDBs = selectedWorkspace?.userRole !== WorkspaceRole.VIEWER;
const tabs = [
@@ -197,7 +201,7 @@ export const MainScreenComponent = () => {
</a>
</div>
<div className="ml-2 flex-1 pr-2 md:ml-5 md:flex-initial md:pr-0">
<div className="ml-2 flex-1 pr-2 md:ml-4 md:flex-initial md:pr-0">
{!isLoading && (
<WorkspaceSelectionComponent
workspaces={workspaces}
@@ -218,14 +222,6 @@ export const MainScreenComponent = () => {
Docs
</a>
<a
className="!text-black hover:opacity-80 dark:!text-gray-200"
href="https://databasus.com/contribute"
target="_blank"
rel="noreferrer"
>
Contribute
</a>
<a
className="!text-black hover:opacity-80 dark:!text-gray-200"
href="https://t.me/databasus_community"
@@ -235,19 +231,7 @@ export const MainScreenComponent = () => {
Community
</a>
<div className="mt-1">
<GitHubButton
href="https://github.com/databasus/databasus"
data-icon="octicon-star"
data-size="large"
data-show-count="true"
aria-label="Star databasus/databasus on GitHub"
>
&nbsp;Star Databasus on GitHub
</GitHubButton>
</div>
{diskUsage && (
{isUsedMoreThan85Percent && (
<Tooltip title="To make backups locally and restore them, you need to have enough space on your disk. For restore, you need to have same amount of space that the backup size.">
<div
className={`cursor-pointer text-center text-xs ${isUsedMoreThan95Percent ? 'text-red-500' : 'text-gray-500 dark:text-gray-400'}`}
@@ -261,7 +245,11 @@ export const MainScreenComponent = () => {
</Tooltip>
)}
<ThemeToggleComponent />
<div className="flex items-center gap-2">
<StarButtonComponent />
<ThemeToggleComponent />
</div>
</div>
<div className="ml-auto flex items-center gap-2 md:hidden">
@@ -273,7 +261,8 @@ export const MainScreenComponent = () => {
/>
</div>
</div>
{isLoading ? (
{isLoading || !user ? (
<div className="flex items-center justify-center py-2" style={{ height: contentHeight }}>
<Spin indicator={<LoadingOutlined spin />} size="large" />
</div>
@@ -289,13 +278,23 @@ export const MainScreenComponent = () => {
contentHeight={contentHeight}
/>
{selectedTab === 'profile' && <ProfileComponent contentHeight={contentHeight} />}
{selectedTab === 'databasus-settings' && (
<SettingsComponent contentHeight={contentHeight} />
{selectedTab === 'profile' && (
<div className="flex-1 md:pl-4">
<ProfileComponent contentHeight={contentHeight} />
</div>
)}
{selectedTab === 'users' && <UsersComponent contentHeight={contentHeight} />}
{selectedTab === 'databasus-settings' && (
<div className="flex-1 md:pl-4">
<SettingsComponent contentHeight={contentHeight} />
</div>
)}
{selectedTab === 'users' && (
<div className="flex-1 md:pl-4">
<UsersComponent contentHeight={contentHeight} />
</div>
)}
{(selectedTab === 'databases' ||
selectedTab === 'storages' ||
@@ -320,7 +319,7 @@ export const MainScreenComponent = () => {
</div>
) : (
<>
<div className="flex-1 md:pl-3">
<div className="flex-1 md:pl-1">
{selectedTab === 'notifiers' && selectedWorkspace && (
<NotifiersComponent
contentHeight={contentHeight}
@@ -331,6 +330,7 @@ export const MainScreenComponent = () => {
)}
{selectedTab === 'storages' && selectedWorkspace && (
<StoragesComponent
user={user}
contentHeight={contentHeight}
workspace={selectedWorkspace}
isCanManageStorages={isCanManageDBs}
@@ -341,18 +341,22 @@ export const MainScreenComponent = () => {
<DatabasesComponent
contentHeight={contentHeight}
workspace={selectedWorkspace}
user={user}
isCanManageDBs={isCanManageDBs}
key={`databases-${selectedWorkspace.id}`}
/>
)}
{selectedTab === 'settings' && selectedWorkspace && user && (
<WorkspaceSettingsComponent
workspaceResponse={selectedWorkspace}
contentHeight={contentHeight}
user={user}
key={`settings-${selectedWorkspace.id}`}
/>
)}
<div className="flex-1 md:pl-3">
{selectedTab === 'settings' && selectedWorkspace && user && (
<WorkspaceSettingsComponent
workspaceResponse={selectedWorkspace}
contentHeight={contentHeight}
user={user}
key={`settings-${selectedWorkspace.id}`}
/>
)}
</div>
</div>
</>
)}
@@ -375,6 +379,8 @@ export const MainScreenComponent = () => {
workspacesCount={workspaces.length}
/>
)}
<PlaygroundWarningComponent />
</div>
);
};

View File

@@ -1,13 +1,13 @@
import { CloseOutlined } from '@ant-design/icons';
import { Drawer, Tooltip } from 'antd';
import { useEffect } from 'react';
import GitHubButton from 'react-github-btn';
import { type DiskUsage } from '../../entity/disk';
import { type UserProfile, UserRole } from '../../entity/users';
import { useIsMobile } from '../../shared/hooks';
import { useTheme } from '../../shared/theme';
import { ThemeToggleComponent } from './ThemeToggleComponent';
import { StarButtonComponent } from '../../shared/ui/StarButtonComponent';
import { ThemeToggleComponent } from '../../shared/ui/ThemeToggleComponent';
interface TabItem {
text: string;
@@ -194,15 +194,6 @@ export const SidebarComponent = ({
Documentation
</a>
<a
className="block rounded text-sm font-medium !text-gray-700 hover:bg-gray-100 hover:!text-blue-600 dark:!text-gray-300 dark:hover:bg-gray-700"
href="https://databasus.com/contribute"
target="_blank"
rel="noreferrer"
>
Contribute
</a>
<a
className="block rounded text-sm font-medium !text-gray-700 hover:bg-gray-100 hover:!text-blue-600 dark:!text-gray-300 dark:hover:bg-gray-700"
href="https://t.me/databasus_community"
@@ -212,16 +203,8 @@ export const SidebarComponent = ({
Community
</a>
<div className="pt-2">
<GitHubButton
href="https://github.com/databasus/databasus"
data-icon="octicon-star"
data-size="large"
data-show-count="true"
aria-label="Star databasus/databasus on GitHub"
>
Star on GitHub
</GitHubButton>
<div className="flex pt-2">
<StarButtonComponent />
</div>
</div>
</div>

View File

@@ -62,7 +62,7 @@ export const WorkspaceSelectionComponent = ({
return (
<div
className="my-1 flex-1 select-none md:ml-2 md:w-[250px] md:max-w-[250px]"
className="my-1 flex-1 select-none md:ml-2 md:w-[250px] md:max-w-[242px]"
ref={dropdownRef}
>
<div className="mb-1 hidden text-xs text-gray-400 md:block" style={{ lineHeight: 0.7 }}>