diff --git a/Dockerfile b/Dockerfile index 23c8fbd..520e947 100644 --- a/Dockerfile +++ b/Dockerfile @@ -251,6 +251,18 @@ fi # PostgreSQL 17 binary paths PG_BIN="/usr/lib/postgresql/17/bin" +# Generate runtime configuration for frontend +echo "Generating runtime configuration..." +cat > /app/ui/build/runtime-config.js << 'JSEOF' +// Runtime configuration injected at container startup +// This file is generated dynamically and should not be edited manually +window.__RUNTIME_CONFIG__ = { + IS_CLOUD: '\${IS_CLOUD:-false}', + GITHUB_CLIENT_ID: '\${GITHUB_CLIENT_ID:-}', + GOOGLE_CLIENT_ID: '\${GOOGLE_CLIENT_ID:-}' +}; +JSEOF + # Ensure proper ownership of data directory echo "Setting up data directory permissions..." mkdir -p /databasus-data/pgdata diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index c710570..558b41b 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -29,6 +29,7 @@ type EnvVariables struct { MariadbInstallDir string `env:"MARIADB_INSTALL_DIR"` MongodbInstallDir string `env:"MONGODB_INSTALL_DIR"` + IsCloud bool `env:"IS_CLOUD"` TestLocalhost string `env:"TEST_LOCALHOST"` ShowDbInstallationVerificationLogs bool `env:"SHOW_DB_INSTALLATION_VERIFICATION_LOGS"` @@ -182,6 +183,11 @@ func loadEnvVariables() { env.IsSkipExternalResourcesTests = false } + // Set default value for IsCloud if not defined + if os.Getenv("IS_CLOUD") == "" { + env.IsCloud = false + } + for _, arg := range os.Args { if strings.Contains(arg, "test") { env.IsTesting = true diff --git a/backend/internal/features/backups/config/controller.go b/backend/internal/features/backups/config/controller.go index 222158e..8c82be2 100644 --- a/backend/internal/features/backups/config/controller.go +++ b/backend/internal/features/backups/config/controller.go @@ -16,6 +16,7 @@ type BackupConfigController struct { func (c *BackupConfigController) RegisterRoutes(router *gin.RouterGroup) { router.POST("/backup-configs/save", c.SaveBackupConfig) + router.GET("/backup-configs/database/:id/plan", c.GetDatabasePlan) router.GET("/backup-configs/database/:id", c.GetBackupConfigByDbID) router.GET("/backup-configs/storage/:id/is-using", c.IsStorageUsing) router.GET("/backup-configs/storage/:id/databases-count", c.CountDatabasesForStorage) @@ -92,6 +93,39 @@ func (c *BackupConfigController) GetBackupConfigByDbID(ctx *gin.Context) { ctx.JSON(http.StatusOK, backupConfig) } +// GetDatabasePlan +// @Summary Get database plan by database ID +// @Description Get the plan limits for a specific database (max backup size, max total size, max storage period) +// @Tags backup-configs +// @Produce json +// @Param id path string true "Database ID" +// @Success 200 {object} plans.DatabasePlan +// @Failure 400 {object} map[string]string "Invalid database ID" +// @Failure 401 {object} map[string]string "User not authenticated" +// @Failure 404 {object} map[string]string "Database not found or access denied" +// @Router /backup-configs/database/{id}/plan [get] +func (c *BackupConfigController) GetDatabasePlan(ctx *gin.Context) { + user, ok := users_middleware.GetUserFromContext(ctx) + if !ok { + ctx.JSON(http.StatusUnauthorized, gin.H{"error": "User not authenticated"}) + return + } + + id, err := uuid.Parse(ctx.Param("id")) + if err != nil { + ctx.JSON(http.StatusBadRequest, gin.H{"error": "invalid database ID"}) + return + } + + plan, err := c.backupConfigService.GetDatabasePlan(user, id) + if err != nil { + ctx.JSON(http.StatusNotFound, gin.H{"error": "database plan not found"}) + return + } + + ctx.JSON(http.StatusOK, plan) +} + // IsStorageUsing // @Summary Check if storage is being used // @Description Check if a storage is currently being used by any backup configuration diff --git a/backend/internal/features/backups/config/controller_test.go b/backend/internal/features/backups/config/controller_test.go index ea89b92..662487e 100644 --- a/backend/internal/features/backups/config/controller_test.go +++ b/backend/internal/features/backups/config/controller_test.go @@ -16,11 +16,14 @@ import ( "databasus-backend/internal/features/databases/databases/postgresql" "databasus-backend/internal/features/intervals" "databasus-backend/internal/features/notifiers" + plans "databasus-backend/internal/features/plan" "databasus-backend/internal/features/storages" + local_storage "databasus-backend/internal/features/storages/models/local" users_enums "databasus-backend/internal/features/users/enums" users_testing "databasus-backend/internal/features/users/testing" workspaces_controllers "databasus-backend/internal/features/workspaces/controllers" workspaces_testing "databasus-backend/internal/features/workspaces/testing" + "databasus-backend/internal/storage" "databasus-backend/internal/util/period" test_utils "databasus-backend/internal/util/testing" "databasus-backend/internal/util/tools" @@ -300,14 +303,204 @@ func Test_GetBackupConfigByDbID_ReturnsDefaultConfigForNewDatabase(t *testing.T) &response, ) + var plan plans.DatabasePlan + + test_utils.MakeGetRequestAndUnmarshal( + t, + router, + "/api/v1/backup-configs/database/"+database.ID.String()+"/plan", + "Bearer "+owner.Token, + http.StatusOK, + &plan, + ) + assert.Equal(t, database.ID, response.DatabaseID) assert.False(t, response.IsBackupsEnabled) - assert.Equal(t, period.PeriodWeek, response.StorePeriod) + assert.Equal(t, plan.MaxStoragePeriod, response.StorePeriod) + assert.Equal(t, plan.MaxBackupSizeMB, response.MaxBackupSizeMB) + assert.Equal(t, plan.MaxBackupsTotalSizeMB, response.MaxBackupsTotalSizeMB) assert.True(t, response.IsRetryIfFailed) assert.Equal(t, 3, response.MaxFailedTriesCount) assert.NotNil(t, response.BackupInterval) } +func Test_GetDatabasePlan_ForNewDatabase_PlanAlwaysReturned(t *testing.T) { + router := createTestRouter() + owner := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) + + database := createTestDatabaseViaAPI("Test Database", workspace.ID, owner.Token, router) + + var response plans.DatabasePlan + test_utils.MakeGetRequestAndUnmarshal( + t, + router, + "/api/v1/backup-configs/database/"+database.ID.String()+"/plan", + "Bearer "+owner.Token, + http.StatusOK, + &response, + ) + + assert.Equal(t, database.ID, response.DatabaseID) + assert.NotNil(t, response.MaxBackupSizeMB) + assert.NotNil(t, response.MaxBackupsTotalSizeMB) + assert.NotEmpty(t, response.MaxStoragePeriod) +} + +func Test_SaveBackupConfig_WhenPlanLimitsAreAdjusted_ValidationEnforced(t *testing.T) { + router := createTestRouter() + owner := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) + + database := createTestDatabaseViaAPI("Test Database", workspace.ID, owner.Token, router) + + // Get plan via API (triggers auto-creation) + var plan plans.DatabasePlan + test_utils.MakeGetRequestAndUnmarshal( + t, + router, + "/api/v1/backup-configs/database/"+database.ID.String()+"/plan", + "Bearer "+owner.Token, + http.StatusOK, + &plan, + ) + + assert.Equal(t, database.ID, plan.DatabaseID) + + // Adjust plan limits directly in database to fixed restrictive values + err := storage.GetDb().Model(&plans.DatabasePlan{}). + Where("database_id = ?", database.ID). + Updates(map[string]any{ + "max_backup_size_mb": 100, + "max_backups_total_size_mb": 1000, + "max_storage_period": period.PeriodMonth, + }).Error + assert.NoError(t, err) + + // Test 1: Try to save backup config with exceeded backup size limit + timeOfDay := "04:00" + backupConfigExceededSize := BackupConfig{ + DatabaseID: database.ID, + IsBackupsEnabled: true, + StorePeriod: period.PeriodWeek, + BackupInterval: &intervals.Interval{ + Interval: intervals.IntervalDaily, + TimeOfDay: &timeOfDay, + }, + SendNotificationsOn: []BackupNotificationType{ + NotificationBackupFailed, + }, + IsRetryIfFailed: true, + MaxFailedTriesCount: 3, + Encryption: BackupEncryptionNone, + MaxBackupSizeMB: 200, // Exceeds limit of 100 + MaxBackupsTotalSizeMB: 800, + } + + respExceededSize := test_utils.MakePostRequest( + t, + router, + "/api/v1/backup-configs/save", + "Bearer "+owner.Token, + backupConfigExceededSize, + http.StatusBadRequest, + ) + assert.Contains(t, string(respExceededSize.Body), "max backup size exceeds plan limit") + + // Test 2: Try to save backup config with exceeded total size limit + backupConfigExceededTotal := BackupConfig{ + DatabaseID: database.ID, + IsBackupsEnabled: true, + StorePeriod: period.PeriodWeek, + BackupInterval: &intervals.Interval{ + Interval: intervals.IntervalDaily, + TimeOfDay: &timeOfDay, + }, + SendNotificationsOn: []BackupNotificationType{ + NotificationBackupFailed, + }, + IsRetryIfFailed: true, + MaxFailedTriesCount: 3, + Encryption: BackupEncryptionNone, + MaxBackupSizeMB: 50, + MaxBackupsTotalSizeMB: 2000, // Exceeds limit of 1000 + } + + respExceededTotal := test_utils.MakePostRequest( + t, + router, + "/api/v1/backup-configs/save", + "Bearer "+owner.Token, + backupConfigExceededTotal, + http.StatusBadRequest, + ) + assert.Contains(t, string(respExceededTotal.Body), "max total backups size exceeds plan limit") + + // Test 3: Try to save backup config with exceeded storage period limit + backupConfigExceededPeriod := BackupConfig{ + DatabaseID: database.ID, + IsBackupsEnabled: true, + StorePeriod: period.PeriodYear, // Exceeds limit of Month + BackupInterval: &intervals.Interval{ + Interval: intervals.IntervalDaily, + TimeOfDay: &timeOfDay, + }, + SendNotificationsOn: []BackupNotificationType{ + NotificationBackupFailed, + }, + IsRetryIfFailed: true, + MaxFailedTriesCount: 3, + Encryption: BackupEncryptionNone, + MaxBackupSizeMB: 80, + MaxBackupsTotalSizeMB: 800, + } + + respExceededPeriod := test_utils.MakePostRequest( + t, + router, + "/api/v1/backup-configs/save", + "Bearer "+owner.Token, + backupConfigExceededPeriod, + http.StatusBadRequest, + ) + assert.Contains(t, string(respExceededPeriod.Body), "storage period exceeds plan limit") + + // Test 4: Save backup config within all limits - should succeed + backupConfigValid := BackupConfig{ + DatabaseID: database.ID, + IsBackupsEnabled: true, + StorePeriod: period.PeriodWeek, // Within Month limit + BackupInterval: &intervals.Interval{ + Interval: intervals.IntervalDaily, + TimeOfDay: &timeOfDay, + }, + SendNotificationsOn: []BackupNotificationType{ + NotificationBackupFailed, + }, + IsRetryIfFailed: true, + MaxFailedTriesCount: 3, + Encryption: BackupEncryptionNone, + MaxBackupSizeMB: 80, // Within 100 limit + MaxBackupsTotalSizeMB: 800, // Within 1000 limit + } + + var responseValid BackupConfig + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/backup-configs/save", + "Bearer "+owner.Token, + backupConfigValid, + http.StatusOK, + &responseValid, + ) + + assert.Equal(t, database.ID, responseValid.DatabaseID) + assert.Equal(t, int64(80), responseValid.MaxBackupSizeMB) + assert.Equal(t, int64(800), responseValid.MaxBackupsTotalSizeMB) + assert.Equal(t, period.PeriodWeek, responseValid.StorePeriod) +} + func Test_IsStorageUsing_PermissionsEnforced(t *testing.T) { tests := []struct { name string @@ -1443,6 +1636,110 @@ func Test_TransferDatabase_TargetStorageFromDifferentWorkspace_ReturnsBadRequest assert.Contains(t, string(testResp.Body), "target storage does not belong to target workspace") } +func Test_SaveBackupConfig_WithSystemStorage_CanBeUsedByAnyDatabase(t *testing.T) { + router := createTestRouterWithStorageForTransfer() + + owner1 := users_testing.CreateTestUser(users_enums.UserRoleMember) + owner2 := users_testing.CreateTestUser(users_enums.UserRoleMember) + admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin) + + workspaceA := workspaces_testing.CreateTestWorkspace("Workspace A", owner1, router) + workspaceB := workspaces_testing.CreateTestWorkspace("Workspace B", owner2, router) + + databaseA := createTestDatabaseViaAPI("Database A", workspaceA.ID, owner1.Token, router) + + // Test 1: Regular storage from workspace B cannot be used by database in workspace A + regularStorageB := createTestStorage(workspaceB.ID) + + timeOfDay := "04:00" + backupConfigWithRegularStorage := BackupConfig{ + DatabaseID: databaseA.ID, + IsBackupsEnabled: true, + StorePeriod: period.PeriodWeek, + BackupInterval: &intervals.Interval{ + Interval: intervals.IntervalDaily, + TimeOfDay: &timeOfDay, + }, + StorageID: ®ularStorageB.ID, + Storage: regularStorageB, + SendNotificationsOn: []BackupNotificationType{ + NotificationBackupFailed, + }, + IsRetryIfFailed: true, + MaxFailedTriesCount: 3, + Encryption: BackupEncryptionNone, + } + + respRegular := test_utils.MakePostRequest( + t, + router, + "/api/v1/backup-configs/save", + "Bearer "+owner1.Token, + backupConfigWithRegularStorage, + http.StatusBadRequest, + ) + + assert.Contains(t, string(respRegular.Body), "storage does not belong to the same workspace") + + // Test 2: System storage from workspace B CAN be used by database in workspace A + systemStorageB := &storages.Storage{ + WorkspaceID: workspaceB.ID, + Type: storages.StorageTypeLocal, + Name: "Test System Storage " + uuid.New().String(), + IsSystem: true, + LocalStorage: &local_storage.LocalStorage{}, + } + + var savedSystemStorage storages.Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+admin.Token, + *systemStorageB, + http.StatusOK, + &savedSystemStorage, + ) + + assert.True(t, savedSystemStorage.IsSystem) + + backupConfigWithSystemStorage := BackupConfig{ + DatabaseID: databaseA.ID, + IsBackupsEnabled: true, + StorePeriod: period.PeriodWeek, + BackupInterval: &intervals.Interval{ + Interval: intervals.IntervalDaily, + TimeOfDay: &timeOfDay, + }, + StorageID: &savedSystemStorage.ID, + Storage: &savedSystemStorage, + SendNotificationsOn: []BackupNotificationType{ + NotificationBackupFailed, + }, + IsRetryIfFailed: true, + MaxFailedTriesCount: 3, + Encryption: BackupEncryptionNone, + } + + var savedConfig BackupConfig + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/backup-configs/save", + "Bearer "+owner1.Token, + backupConfigWithSystemStorage, + http.StatusOK, + &savedConfig, + ) + + assert.Equal(t, databaseA.ID, savedConfig.DatabaseID) + assert.NotNil(t, savedConfig.StorageID) + assert.Equal(t, savedSystemStorage.ID, *savedConfig.StorageID) + assert.True(t, savedConfig.IsBackupsEnabled) + + storages.RemoveTestStorage(regularStorageB.ID) +} + func createTestDatabaseViaAPI( name string, workspaceID uuid.UUID, diff --git a/backend/internal/features/backups/config/di.go b/backend/internal/features/backups/config/di.go index fe215dc..822a76f 100644 --- a/backend/internal/features/backups/config/di.go +++ b/backend/internal/features/backups/config/di.go @@ -6,6 +6,7 @@ import ( "databasus-backend/internal/features/databases" "databasus-backend/internal/features/notifiers" + plans "databasus-backend/internal/features/plan" "databasus-backend/internal/features/storages" workspaces_services "databasus-backend/internal/features/workspaces/services" "databasus-backend/internal/util/logger" @@ -18,6 +19,7 @@ var backupConfigService = &BackupConfigService{ storages.GetStorageService(), notifiers.GetNotifierService(), workspaces_services.GetWorkspaceService(), + plans.GetDatabasePlanService(), nil, } var backupConfigController = &BackupConfigController{ diff --git a/backend/internal/features/backups/config/model.go b/backend/internal/features/backups/config/model.go index 7e63b2b..b5567f6 100644 --- a/backend/internal/features/backups/config/model.go +++ b/backend/internal/features/backups/config/model.go @@ -1,7 +1,9 @@ package backups_config import ( + "databasus-backend/internal/config" "databasus-backend/internal/features/intervals" + plans "databasus-backend/internal/features/plan" "databasus-backend/internal/features/storages" "databasus-backend/internal/util/period" "errors" @@ -75,7 +77,7 @@ func (b *BackupConfig) AfterFind(tx *gorm.DB) error { return nil } -func (b *BackupConfig) Validate() error { +func (b *BackupConfig) Validate(plan *plans.DatabasePlan) error { // Backup interval is required either as ID or as object if b.BackupIntervalID == uuid.Nil && b.BackupInterval == nil { return errors.New("backup interval is required") @@ -94,6 +96,12 @@ func (b *BackupConfig) Validate() error { return errors.New("encryption must be NONE or ENCRYPTED") } + if config.GetEnv().IsCloud { + if b.Encryption != BackupEncryptionEncrypted { + return errors.New("encryption is mandatory for cloud storage") + } + } + if b.MaxBackupSizeMB < 0 { return errors.New("max backup size must be non-negative") } @@ -102,6 +110,29 @@ func (b *BackupConfig) Validate() error { return errors.New("max backups total size must be non-negative") } + // Validate against plan limits + // Check storage period limit + if plan.MaxStoragePeriod != period.PeriodForever { + if b.StorePeriod.CompareTo(plan.MaxStoragePeriod) > 0 { + return errors.New("storage period exceeds plan limit") + } + } + + // Check max backup size limit (0 in plan means unlimited) + if plan.MaxBackupSizeMB > 0 { + if b.MaxBackupSizeMB == 0 || b.MaxBackupSizeMB > plan.MaxBackupSizeMB { + return errors.New("max backup size exceeds plan limit") + } + } + + // Check max total backups size limit (0 in plan means unlimited) + if plan.MaxBackupsTotalSizeMB > 0 { + if b.MaxBackupsTotalSizeMB == 0 || + b.MaxBackupsTotalSizeMB > plan.MaxBackupsTotalSizeMB { + return errors.New("max total backups size exceeds plan limit") + } + } + return nil } diff --git a/backend/internal/features/backups/config/model_test.go b/backend/internal/features/backups/config/model_test.go new file mode 100644 index 0000000..a09dbb9 --- /dev/null +++ b/backend/internal/features/backups/config/model_test.go @@ -0,0 +1,401 @@ +package backups_config + +import ( + "testing" + + "databasus-backend/internal/features/intervals" + plans "databasus-backend/internal/features/plan" + "databasus-backend/internal/util/period" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func Test_Validate_WhenStoragePeriodIsWeekAndPlanAllowsMonth_ValidationPasses(t *testing.T) { + config := createValidBackupConfig() + config.StorePeriod = period.PeriodWeek + + plan := createUnlimitedPlan() + plan.MaxStoragePeriod = period.PeriodMonth + + err := config.Validate(plan) + assert.NoError(t, err) +} + +func Test_Validate_WhenStoragePeriodIsYearAndPlanAllowsMonth_ValidationFails(t *testing.T) { + config := createValidBackupConfig() + config.StorePeriod = period.PeriodYear + + plan := createUnlimitedPlan() + plan.MaxStoragePeriod = period.PeriodMonth + + err := config.Validate(plan) + assert.EqualError(t, err, "storage period exceeds plan limit") +} + +func Test_Validate_WhenStoragePeriodIsForeverAndPlanAllowsForever_ValidationPasses(t *testing.T) { + config := createValidBackupConfig() + config.StorePeriod = period.PeriodForever + + plan := createUnlimitedPlan() + plan.MaxStoragePeriod = period.PeriodForever + + err := config.Validate(plan) + assert.NoError(t, err) +} + +func Test_Validate_WhenStoragePeriodIsForeverAndPlanAllowsYear_ValidationFails(t *testing.T) { + config := createValidBackupConfig() + config.StorePeriod = period.PeriodForever + + plan := createUnlimitedPlan() + plan.MaxStoragePeriod = period.PeriodYear + + err := config.Validate(plan) + assert.EqualError(t, err, "storage period exceeds plan limit") +} + +func Test_Validate_WhenStoragePeriodEqualsExactPlanLimit_ValidationPasses(t *testing.T) { + config := createValidBackupConfig() + config.StorePeriod = period.PeriodMonth + + plan := createUnlimitedPlan() + plan.MaxStoragePeriod = period.PeriodMonth + + err := config.Validate(plan) + assert.NoError(t, err) +} + +func Test_Validate_WhenBackupSize100MBAndPlanAllows500MB_ValidationPasses(t *testing.T) { + config := createValidBackupConfig() + config.MaxBackupSizeMB = 100 + + plan := createUnlimitedPlan() + plan.MaxBackupSizeMB = 500 + + err := config.Validate(plan) + assert.NoError(t, err) +} + +func Test_Validate_WhenBackupSize500MBAndPlanAllows100MB_ValidationFails(t *testing.T) { + config := createValidBackupConfig() + config.MaxBackupSizeMB = 500 + + plan := createUnlimitedPlan() + plan.MaxBackupSizeMB = 100 + + err := config.Validate(plan) + assert.EqualError(t, err, "max backup size exceeds plan limit") +} + +func Test_Validate_WhenBackupSizeIsUnlimitedAndPlanAllowsUnlimited_ValidationPasses(t *testing.T) { + config := createValidBackupConfig() + config.MaxBackupSizeMB = 0 + + plan := createUnlimitedPlan() + plan.MaxBackupSizeMB = 0 + + err := config.Validate(plan) + assert.NoError(t, err) +} + +func Test_Validate_WhenBackupSizeIsUnlimitedAndPlanHas500MBLimit_ValidationFails(t *testing.T) { + config := createValidBackupConfig() + config.MaxBackupSizeMB = 0 + + plan := createUnlimitedPlan() + plan.MaxBackupSizeMB = 500 + + err := config.Validate(plan) + assert.EqualError(t, err, "max backup size exceeds plan limit") +} + +func Test_Validate_WhenBackupSizeEqualsExactPlanLimit_ValidationPasses(t *testing.T) { + config := createValidBackupConfig() + config.MaxBackupSizeMB = 500 + + plan := createUnlimitedPlan() + plan.MaxBackupSizeMB = 500 + + err := config.Validate(plan) + assert.NoError(t, err) +} + +func Test_Validate_WhenTotalSize1GBAndPlanAllows5GB_ValidationPasses(t *testing.T) { + config := createValidBackupConfig() + config.MaxBackupsTotalSizeMB = 1000 + + plan := createUnlimitedPlan() + plan.MaxBackupsTotalSizeMB = 5000 + + err := config.Validate(plan) + assert.NoError(t, err) +} + +func Test_Validate_WhenTotalSize5GBAndPlanAllows1GB_ValidationFails(t *testing.T) { + config := createValidBackupConfig() + config.MaxBackupsTotalSizeMB = 5000 + + plan := createUnlimitedPlan() + plan.MaxBackupsTotalSizeMB = 1000 + + err := config.Validate(plan) + assert.EqualError(t, err, "max total backups size exceeds plan limit") +} + +func Test_Validate_WhenTotalSizeIsUnlimitedAndPlanAllowsUnlimited_ValidationPasses(t *testing.T) { + config := createValidBackupConfig() + config.MaxBackupsTotalSizeMB = 0 + + plan := createUnlimitedPlan() + plan.MaxBackupsTotalSizeMB = 0 + + err := config.Validate(plan) + assert.NoError(t, err) +} + +func Test_Validate_WhenTotalSizeIsUnlimitedAndPlanHas1GBLimit_ValidationFails(t *testing.T) { + config := createValidBackupConfig() + config.MaxBackupsTotalSizeMB = 0 + + plan := createUnlimitedPlan() + plan.MaxBackupsTotalSizeMB = 1000 + + err := config.Validate(plan) + assert.EqualError(t, err, "max total backups size exceeds plan limit") +} + +func Test_Validate_WhenTotalSizeEqualsExactPlanLimit_ValidationPasses(t *testing.T) { + config := createValidBackupConfig() + config.MaxBackupsTotalSizeMB = 5000 + + plan := createUnlimitedPlan() + plan.MaxBackupsTotalSizeMB = 5000 + + err := config.Validate(plan) + assert.NoError(t, err) +} + +func Test_Validate_WhenAllLimitsAreUnlimitedInPlan_AnyConfigurationPasses(t *testing.T) { + config := createValidBackupConfig() + config.StorePeriod = period.PeriodForever + config.MaxBackupSizeMB = 0 + config.MaxBackupsTotalSizeMB = 0 + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.NoError(t, err) +} + +func Test_Validate_WhenMultipleLimitsExceeded_ValidationFailsWithFirstError(t *testing.T) { + config := createValidBackupConfig() + config.StorePeriod = period.PeriodYear + config.MaxBackupSizeMB = 500 + config.MaxBackupsTotalSizeMB = 5000 + + plan := createUnlimitedPlan() + plan.MaxStoragePeriod = period.PeriodMonth + plan.MaxBackupSizeMB = 100 + plan.MaxBackupsTotalSizeMB = 1000 + + err := config.Validate(plan) + assert.Error(t, err) + assert.EqualError(t, err, "storage period exceeds plan limit") +} + +func Test_Validate_WhenConfigHasInvalidIntervalButPlanIsValid_ValidationFailsOnInterval( + t *testing.T, +) { + config := createValidBackupConfig() + config.BackupIntervalID = uuid.Nil + config.BackupInterval = nil + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.EqualError(t, err, "backup interval is required") +} + +func Test_Validate_WhenIntervalIsMissing_ValidationFailsRegardlessOfPlan(t *testing.T) { + config := createValidBackupConfig() + config.BackupIntervalID = uuid.Nil + config.BackupInterval = nil + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.EqualError(t, err, "backup interval is required") +} + +func Test_Validate_WhenRetryEnabledButMaxTriesIsZero_ValidationFailsRegardlessOfPlan(t *testing.T) { + config := createValidBackupConfig() + config.IsRetryIfFailed = true + config.MaxFailedTriesCount = 0 + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.EqualError(t, err, "max failed tries count must be greater than 0") +} + +func Test_Validate_WhenEncryptionIsInvalid_ValidationFailsRegardlessOfPlan(t *testing.T) { + config := createValidBackupConfig() + config.Encryption = "INVALID" + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.EqualError(t, err, "encryption must be NONE or ENCRYPTED") +} + +func Test_Validate_WhenStoragePeriodIsEmpty_ValidationFails(t *testing.T) { + config := createValidBackupConfig() + config.StorePeriod = "" + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.EqualError(t, err, "store period is required") +} + +func Test_Validate_WhenMaxBackupSizeIsNegative_ValidationFails(t *testing.T) { + config := createValidBackupConfig() + config.MaxBackupSizeMB = -100 + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.EqualError(t, err, "max backup size must be non-negative") +} + +func Test_Validate_WhenMaxTotalSizeIsNegative_ValidationFails(t *testing.T) { + config := createValidBackupConfig() + config.MaxBackupsTotalSizeMB = -1000 + + plan := createUnlimitedPlan() + + err := config.Validate(plan) + assert.EqualError(t, err, "max backups total size must be non-negative") +} + +func Test_Validate_WhenPlanIsNil_OnlyBasicValidationsApply(t *testing.T) { + config := createValidBackupConfig() + config.StorePeriod = period.PeriodForever + config.MaxBackupSizeMB = 0 + config.MaxBackupsTotalSizeMB = 0 + + err := config.Validate(nil) + assert.NoError(t, err) +} + +func Test_Validate_WhenPlanLimitsAreAtBoundary_ValidationWorks(t *testing.T) { + tests := []struct { + name string + configPeriod period.Period + planPeriod period.Period + configSize int64 + planSize int64 + configTotal int64 + planTotal int64 + shouldSucceed bool + }{ + { + name: "all values just under limit", + configPeriod: period.PeriodWeek, + planPeriod: period.PeriodMonth, + configSize: 99, + planSize: 100, + configTotal: 999, + planTotal: 1000, + shouldSucceed: true, + }, + { + name: "all values equal to limit", + configPeriod: period.PeriodMonth, + planPeriod: period.PeriodMonth, + configSize: 100, + planSize: 100, + configTotal: 1000, + planTotal: 1000, + shouldSucceed: true, + }, + { + name: "period just over limit", + configPeriod: period.Period3Month, + planPeriod: period.PeriodMonth, + configSize: 100, + planSize: 100, + configTotal: 1000, + planTotal: 1000, + shouldSucceed: false, + }, + { + name: "size just over limit", + configPeriod: period.PeriodMonth, + planPeriod: period.PeriodMonth, + configSize: 101, + planSize: 100, + configTotal: 1000, + planTotal: 1000, + shouldSucceed: false, + }, + { + name: "total size just over limit", + configPeriod: period.PeriodMonth, + planPeriod: period.PeriodMonth, + configSize: 100, + planSize: 100, + configTotal: 1001, + planTotal: 1000, + shouldSucceed: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := createValidBackupConfig() + config.StorePeriod = tt.configPeriod + config.MaxBackupSizeMB = tt.configSize + config.MaxBackupsTotalSizeMB = tt.configTotal + + plan := createUnlimitedPlan() + plan.MaxStoragePeriod = tt.planPeriod + plan.MaxBackupSizeMB = tt.planSize + plan.MaxBackupsTotalSizeMB = tt.planTotal + + err := config.Validate(plan) + if tt.shouldSucceed { + assert.NoError(t, err) + } else { + assert.Error(t, err) + } + }) + } +} + +func createValidBackupConfig() *BackupConfig { + intervalID := uuid.New() + return &BackupConfig{ + DatabaseID: uuid.New(), + IsBackupsEnabled: true, + StorePeriod: period.PeriodMonth, + BackupIntervalID: intervalID, + BackupInterval: &intervals.Interval{ID: intervalID}, + SendNotificationsOn: []BackupNotificationType{}, + IsRetryIfFailed: false, + MaxFailedTriesCount: 3, + Encryption: BackupEncryptionNone, + MaxBackupSizeMB: 100, + MaxBackupsTotalSizeMB: 1000, + } +} + +func createUnlimitedPlan() *plans.DatabasePlan { + return &plans.DatabasePlan{ + DatabaseID: uuid.New(), + MaxBackupSizeMB: 0, + MaxBackupsTotalSizeMB: 0, + MaxStoragePeriod: period.PeriodForever, + } +} diff --git a/backend/internal/features/backups/config/service.go b/backend/internal/features/backups/config/service.go index 4fdfc6d..39351d5 100644 --- a/backend/internal/features/backups/config/service.go +++ b/backend/internal/features/backups/config/service.go @@ -6,10 +6,10 @@ import ( "databasus-backend/internal/features/databases" "databasus-backend/internal/features/intervals" "databasus-backend/internal/features/notifiers" + plans "databasus-backend/internal/features/plan" "databasus-backend/internal/features/storages" users_models "databasus-backend/internal/features/users/models" workspaces_services "databasus-backend/internal/features/workspaces/services" - "databasus-backend/internal/util/period" "github.com/google/uuid" ) @@ -20,6 +20,7 @@ type BackupConfigService struct { storageService *storages.StorageService notifierService *notifiers.NotifierService workspaceService *workspaces_services.WorkspaceService + databasePlanService *plans.DatabasePlanService dbStorageChangeListener BackupConfigStorageChangeListener } @@ -45,7 +46,12 @@ func (s *BackupConfigService) SaveBackupConfigWithAuth( user *users_models.User, backupConfig *BackupConfig, ) (*BackupConfig, error) { - if err := backupConfig.Validate(); err != nil { + plan, err := s.databasePlanService.GetDatabasePlan(backupConfig.DatabaseID) + if err != nil { + return nil, err + } + + if err := backupConfig.Validate(plan); err != nil { return nil, err } @@ -71,7 +77,7 @@ func (s *BackupConfigService) SaveBackupConfigWithAuth( if err != nil { return nil, err } - if storage.WorkspaceID != *database.WorkspaceID { + if storage.WorkspaceID != *database.WorkspaceID && !storage.IsSystem { return nil, errors.New("storage does not belong to the same workspace as the database") } } @@ -82,7 +88,12 @@ func (s *BackupConfigService) SaveBackupConfigWithAuth( func (s *BackupConfigService) SaveBackupConfig( backupConfig *BackupConfig, ) (*BackupConfig, error) { - if err := backupConfig.Validate(); err != nil { + plan, err := s.databasePlanService.GetDatabasePlan(backupConfig.DatabaseID) + if err != nil { + return nil, err + } + + if err := backupConfig.Validate(plan); err != nil { return nil, err } @@ -120,6 +131,18 @@ func (s *BackupConfigService) GetBackupConfigByDbIdWithAuth( return s.GetBackupConfigByDbId(databaseID) } +func (s *BackupConfigService) GetDatabasePlan( + user *users_models.User, + databaseID uuid.UUID, +) (*plans.DatabasePlan, error) { + _, err := s.databaseService.GetDatabase(user, databaseID) + if err != nil { + return nil, err + } + + return s.databasePlanService.GetDatabasePlan(databaseID) +} + func (s *BackupConfigService) GetBackupConfigByDbId( databaseID uuid.UUID, ) (*BackupConfig, error) { @@ -194,12 +217,19 @@ func (s *BackupConfigService) CreateDisabledBackupConfig(databaseID uuid.UUID) e func (s *BackupConfigService) initializeDefaultConfig( databaseID uuid.UUID, ) error { + plan, err := s.databasePlanService.GetDatabasePlan(databaseID) + if err != nil { + return err + } + timeOfDay := "04:00" - _, err := s.backupConfigRepository.Save(&BackupConfig{ - DatabaseID: databaseID, - IsBackupsEnabled: false, - StorePeriod: period.PeriodWeek, + _, err = s.backupConfigRepository.Save(&BackupConfig{ + DatabaseID: databaseID, + IsBackupsEnabled: false, + StorePeriod: plan.MaxStoragePeriod, + MaxBackupSizeMB: plan.MaxBackupSizeMB, + MaxBackupsTotalSizeMB: plan.MaxBackupsTotalSizeMB, BackupInterval: &intervals.Interval{ Interval: intervals.IntervalDaily, TimeOfDay: &timeOfDay, diff --git a/backend/internal/features/databases/model.go b/backend/internal/features/databases/model.go index fd00534..e325ce4 100644 --- a/backend/internal/features/databases/model.go +++ b/backend/internal/features/databases/model.go @@ -1,6 +1,7 @@ package databases import ( + "context" "databasus-backend/internal/features/databases/databases/mariadb" "databasus-backend/internal/features/databases/databases/mongodb" "databasus-backend/internal/features/databases/databases/mysql" @@ -84,6 +85,25 @@ func (d *Database) TestConnection( return d.getSpecificDatabase().TestConnection(logger, encryptor, d.ID) } +func (d *Database) IsUserReadOnly( + ctx context.Context, + logger *slog.Logger, + encryptor encryption.FieldEncryptor, +) (bool, []string, error) { + switch d.Type { + case DatabaseTypePostgres: + return d.Postgresql.IsUserReadOnly(ctx, logger, encryptor, d.ID) + case DatabaseTypeMysql: + return d.Mysql.IsUserReadOnly(ctx, logger, encryptor, d.ID) + case DatabaseTypeMariadb: + return d.Mariadb.IsUserReadOnly(ctx, logger, encryptor, d.ID) + case DatabaseTypeMongodb: + return d.Mongodb.IsUserReadOnly(ctx, logger, encryptor, d.ID) + default: + return false, nil, errors.New("read-only check not supported for this database type") + } +} + func (d *Database) HideSensitiveData() { d.getSpecificDatabase().HideSensitiveData() } diff --git a/backend/internal/features/databases/service.go b/backend/internal/features/databases/service.go index a42a5d6..fc7ead4 100644 --- a/backend/internal/features/databases/service.go +++ b/backend/internal/features/databases/service.go @@ -7,6 +7,7 @@ import ( "log/slog" "time" + "databasus-backend/internal/config" audit_logs "databasus-backend/internal/features/audit_logs" "databasus-backend/internal/features/databases/databases/mariadb" "databasus-backend/internal/features/databases/databases/mongodb" @@ -86,6 +87,23 @@ func (s *DatabaseService) CreateDatabase( return nil, fmt.Errorf("failed to auto-detect database data: %w", err) } + if config.GetEnv().IsCloud { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + isReadOnly, permissions, err := database.IsUserReadOnly(ctx, s.logger, s.fieldEncryptor) + if err != nil { + return nil, fmt.Errorf("failed to verify user permissions: %w", err) + } + + if !isReadOnly { + return nil, fmt.Errorf( + "in cloud mode, only read-only database users are allowed (user has permissions: %v)", + permissions, + ) + } + } + if err := database.EncryptSensitiveFields(s.fieldEncryptor); err != nil { return nil, fmt.Errorf("failed to encrypt sensitive fields: %w", err) } @@ -153,6 +171,27 @@ func (s *DatabaseService) UpdateDatabase( return fmt.Errorf("failed to auto-detect database data: %w", err) } + if config.GetEnv().IsCloud { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + isReadOnly, permissions, err := existingDatabase.IsUserReadOnly( + ctx, + s.logger, + s.fieldEncryptor, + ) + if err != nil { + return fmt.Errorf("failed to verify user permissions: %w", err) + } + + if !isReadOnly { + return fmt.Errorf( + "in cloud mode, only read-only database users are allowed (user has permissions: %v)", + permissions, + ) + } + } + if err := existingDatabase.EncryptSensitiveFields(s.fieldEncryptor); err != nil { return fmt.Errorf("failed to encrypt sensitive fields: %w", err) } @@ -649,38 +688,7 @@ func (s *DatabaseService) IsUserReadOnly( ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) defer cancel() - switch usingDatabase.Type { - case DatabaseTypePostgres: - return usingDatabase.Postgresql.IsUserReadOnly( - ctx, - s.logger, - s.fieldEncryptor, - usingDatabase.ID, - ) - case DatabaseTypeMysql: - return usingDatabase.Mysql.IsUserReadOnly( - ctx, - s.logger, - s.fieldEncryptor, - usingDatabase.ID, - ) - case DatabaseTypeMariadb: - return usingDatabase.Mariadb.IsUserReadOnly( - ctx, - s.logger, - s.fieldEncryptor, - usingDatabase.ID, - ) - case DatabaseTypeMongodb: - return usingDatabase.Mongodb.IsUserReadOnly( - ctx, - s.logger, - s.fieldEncryptor, - usingDatabase.ID, - ) - default: - return false, nil, errors.New("read-only check not supported for this database type") - } + return usingDatabase.IsUserReadOnly(ctx, s.logger, s.fieldEncryptor) } func (s *DatabaseService) CreateReadOnlyUser( diff --git a/backend/internal/features/disk/service.go b/backend/internal/features/disk/service.go index fdd40eb..2b712eb 100644 --- a/backend/internal/features/disk/service.go +++ b/backend/internal/features/disk/service.go @@ -12,6 +12,15 @@ import ( type DiskService struct{} func (s *DiskService) GetDiskUsage() (*DiskUsage, error) { + if config.GetEnv().IsCloud { + return &DiskUsage{ + Platform: PlatformLinux, + TotalSpaceBytes: 100, + UsedSpaceBytes: 0, + FreeSpaceBytes: 100, + }, nil + } + platform := s.detectPlatform() var path string diff --git a/backend/internal/features/plan/di.go b/backend/internal/features/plan/di.go new file mode 100644 index 0000000..c84b83e --- /dev/null +++ b/backend/internal/features/plan/di.go @@ -0,0 +1,20 @@ +package plans + +import ( + "databasus-backend/internal/util/logger" +) + +var databasePlanRepository = &DatabasePlanRepository{} + +var databasePlanService = &DatabasePlanService{ + databasePlanRepository, + logger.GetLogger(), +} + +func GetDatabasePlanService() *DatabasePlanService { + return databasePlanService +} + +func GetDatabasePlanRepository() *DatabasePlanRepository { + return databasePlanRepository +} diff --git a/backend/internal/features/plan/model.go b/backend/internal/features/plan/model.go new file mode 100644 index 0000000..0d2f430 --- /dev/null +++ b/backend/internal/features/plan/model.go @@ -0,0 +1,19 @@ +package plans + +import ( + "databasus-backend/internal/util/period" + + "github.com/google/uuid" +) + +type DatabasePlan struct { + DatabaseID uuid.UUID `json:"databaseId" gorm:"column:database_id;type:uuid;primaryKey;not null"` + + MaxBackupSizeMB int64 `json:"maxBackupSizeMb" gorm:"column:max_backup_size_mb;type:int;not null"` + MaxBackupsTotalSizeMB int64 `json:"maxBackupsTotalSizeMb" gorm:"column:max_backups_total_size_mb;type:int;not null"` + MaxStoragePeriod period.Period `json:"maxStoragePeriod" gorm:"column:max_storage_period;type:text;not null"` +} + +func (p *DatabasePlan) TableName() string { + return "database_plans" +} diff --git a/backend/internal/features/plan/repository.go b/backend/internal/features/plan/repository.go new file mode 100644 index 0000000..e92f31e --- /dev/null +++ b/backend/internal/features/plan/repository.go @@ -0,0 +1,27 @@ +package plans + +import ( + "databasus-backend/internal/storage" + + "github.com/google/uuid" +) + +type DatabasePlanRepository struct{} + +func (r *DatabasePlanRepository) GetDatabasePlan(databaseID uuid.UUID) (*DatabasePlan, error) { + var databasePlan DatabasePlan + + if err := storage.GetDb().Where("database_id = ?", databaseID).First(&databasePlan).Error; err != nil { + if err.Error() == "record not found" { + return nil, nil + } + + return nil, err + } + + return &databasePlan, nil +} + +func (r *DatabasePlanRepository) CreateDatabasePlan(databasePlan *DatabasePlan) error { + return storage.GetDb().Create(&databasePlan).Error +} diff --git a/backend/internal/features/plan/service.go b/backend/internal/features/plan/service.go new file mode 100644 index 0000000..1a34235 --- /dev/null +++ b/backend/internal/features/plan/service.go @@ -0,0 +1,67 @@ +package plans + +import ( + "databasus-backend/internal/config" + "databasus-backend/internal/util/period" + "log/slog" + + "github.com/google/uuid" +) + +type DatabasePlanService struct { + databasePlanRepository *DatabasePlanRepository + logger *slog.Logger +} + +func (s *DatabasePlanService) GetDatabasePlan(databaseID uuid.UUID) (*DatabasePlan, error) { + plan, err := s.databasePlanRepository.GetDatabasePlan(databaseID) + if err != nil { + return nil, err + } + + if plan == nil { + s.logger.Info("no database plan found, creating default plan", "databaseID", databaseID) + + defaultPlan := s.createDefaultDatabasePlan(databaseID) + + err := s.databasePlanRepository.CreateDatabasePlan(defaultPlan) + if err != nil { + s.logger.Error("failed to create default database plan", "error", err) + return nil, err + } + + return defaultPlan, nil + } + + return plan, nil +} + +func (s *DatabasePlanService) createDefaultDatabasePlan(databaseID uuid.UUID) *DatabasePlan { + var plan DatabasePlan + + isCloud := config.GetEnv().IsCloud + if isCloud { + s.logger.Info("creating default database plan for cloud", "databaseID", databaseID) + + // for playground we set limited storages enough to test, + // but not too expensive to provide it for Databasus + plan = DatabasePlan{ + DatabaseID: databaseID, + MaxBackupSizeMB: 100, // ~ 1.5GB database + MaxBackupsTotalSizeMB: 4000, // ~ 30 daily backups + 10 manual backups + MaxStoragePeriod: period.PeriodWeek, + } + } else { + s.logger.Info("creating default database plan for self hosted", "databaseID", databaseID) + + // by default - everything is unlimited in self hosted mode + plan = DatabasePlan{ + DatabaseID: databaseID, + MaxBackupSizeMB: 0, + MaxBackupsTotalSizeMB: 0, + MaxStoragePeriod: period.PeriodForever, + } + } + + return &plan +} diff --git a/backend/internal/features/restores/service.go b/backend/internal/features/restores/service.go index 220e8c2..2bfbe1d 100644 --- a/backend/internal/features/restores/service.go +++ b/backend/internal/features/restores/service.go @@ -1,6 +1,7 @@ package restores import ( + "databasus-backend/internal/config" audit_logs "databasus-backend/internal/features/audit_logs" "databasus-backend/internal/features/backups/backups" backups_core "databasus-backend/internal/features/backups/backups/core" @@ -127,6 +128,13 @@ func (s *RestoreService) RestoreBackupWithAuth( return err } + if config.GetEnv().IsCloud { + // in cloud mode we use only single thread mode, + // because otherwise we will exhaust local storage + // space (instead of streaming from S3 directly to DB) + requestDTO.PostgresqlDatabase.CpuCount = 1 + } + if err := s.validateVersionCompatibility(backupDatabase, requestDTO); err != nil { return err } diff --git a/backend/internal/features/restores/usecases/postgresql/restore_backup_uc.go b/backend/internal/features/restores/usecases/postgresql/restore_backup_uc.go index dcc1999..28f3336 100644 --- a/backend/internal/features/restores/usecases/postgresql/restore_backup_uc.go +++ b/backend/internal/features/restores/usecases/postgresql/restore_backup_uc.go @@ -65,6 +65,13 @@ func (uc *RestorePostgresqlBackupUsecase) Execute( return fmt.Errorf("target database name is required for pg_restore") } + // Validate CPU count constraint for cloud environments + if config.GetEnv().IsCloud && pg.CpuCount > 1 { + return fmt.Errorf( + "parallel restore (CPU count > 1) is not supported in cloud mode due to storage constraints. Please use CPU count = 1", + ) + } + pgBin := tools.GetPostgresqlExecutable( pg.Version, "pg_restore", diff --git a/backend/internal/features/storages/controller_test.go b/backend/internal/features/storages/controller_test.go index f772bd0..ec9fda7 100644 --- a/backend/internal/features/storages/controller_test.go +++ b/backend/internal/features/storages/controller_test.go @@ -84,7 +84,7 @@ func Test_SaveNewStorage_StorageReturnedViaGet(t *testing.T) { assert.Contains(t, storages, savedStorage) - deleteStorage(t, router, savedStorage.ID, workspace.ID, owner.Token) + deleteStorage(t, router, savedStorage.ID, owner.Token) workspaces_testing.RemoveTestWorkspace(workspace, router) } @@ -122,7 +122,169 @@ func Test_UpdateExistingStorage_UpdatedStorageReturnedViaGet(t *testing.T) { assert.Equal(t, updatedName, updatedStorage.Name) assert.Equal(t, savedStorage.ID, updatedStorage.ID) - deleteStorage(t, router, updatedStorage.ID, workspace.ID, owner.Token) + deleteStorage(t, router, updatedStorage.ID, owner.Token) + workspaces_testing.RemoveTestWorkspace(workspace, router) +} + +func Test_CreateSystemStorage_OnlyAdminCanCreate_MemberGetsForbidden(t *testing.T) { + admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin) + member := users_testing.CreateTestUser(users_enums.UserRoleMember) + router := createRouter() + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", admin, router) + + // Admin can create system storage + systemStorage := createNewStorage(workspace.ID) + systemStorage.IsSystem = true + + var savedStorage Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+admin.Token, + *systemStorage, + http.StatusOK, + &savedStorage, + ) + + assert.True(t, savedStorage.IsSystem) + assert.Equal(t, systemStorage.Name, savedStorage.Name) + + // Member cannot create system storage + memberSystemStorage := createNewStorage(workspace.ID) + memberSystemStorage.IsSystem = true + + resp := test_utils.MakePostRequest( + t, + router, + "/api/v1/storages", + "Bearer "+member.Token, + *memberSystemStorage, + http.StatusForbidden, + ) + assert.Contains(t, string(resp.Body), "insufficient permissions") + + deleteStorage(t, router, savedStorage.ID, admin.Token) + workspaces_testing.RemoveTestWorkspace(workspace, router) +} + +func Test_UpdateStorageIsSystem_OnlyAdminCanUpdate_MemberGetsForbidden(t *testing.T) { + admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin) + member := users_testing.CreateTestUser(users_enums.UserRoleMember) + router := createRouter() + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", admin, router) + + // Create a regular storage + storage := createNewStorage(workspace.ID) + storage.IsSystem = false + + var savedStorage Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+admin.Token, + *storage, + http.StatusOK, + &savedStorage, + ) + + assert.False(t, savedStorage.IsSystem) + + // Admin can update to system + savedStorage.IsSystem = true + var updatedStorage Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+admin.Token, + savedStorage, + http.StatusOK, + &updatedStorage, + ) + + assert.True(t, updatedStorage.IsSystem) + + // Member cannot update system storage + updatedStorage.Name = "Updated by member" + resp := test_utils.MakePostRequest( + t, + router, + "/api/v1/storages", + "Bearer "+member.Token, + updatedStorage, + http.StatusForbidden, + ) + assert.Contains(t, string(resp.Body), "insufficient permissions") + + deleteStorage(t, router, updatedStorage.ID, admin.Token) + workspaces_testing.RemoveTestWorkspace(workspace, router) +} + +func Test_UpdateSystemStorage_CannotChangeToPrivate_ReturnsBadRequest(t *testing.T) { + admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin) + router := createRouter() + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", admin, router) + + // Create system storage + storage := createNewStorage(workspace.ID) + storage.IsSystem = true + + var savedStorage Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+admin.Token, + *storage, + http.StatusOK, + &savedStorage, + ) + + assert.True(t, savedStorage.IsSystem) + + // Attempt to change system storage to non-system (should fail) + savedStorage.IsSystem = false + resp := test_utils.MakePostRequest( + t, + router, + "/api/v1/storages", + "Bearer "+admin.Token, + savedStorage, + http.StatusBadRequest, + ) + assert.Contains(t, string(resp.Body), "system storage cannot be changed to non-system") + + // Verify storage is still system + var retrievedStorage Storage + test_utils.MakeGetRequestAndUnmarshal( + t, + router, + fmt.Sprintf("/api/v1/storages/%s", savedStorage.ID.String()), + "Bearer "+admin.Token, + http.StatusOK, + &retrievedStorage, + ) + assert.True(t, retrievedStorage.IsSystem) + + // Admin can update other fields while keeping IsSystem=true + savedStorage.IsSystem = true + savedStorage.Name = "Updated System Storage" + var updatedStorage Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+admin.Token, + savedStorage, + http.StatusOK, + &updatedStorage, + ) + assert.True(t, updatedStorage.IsSystem) + assert.Equal(t, "Updated System Storage", updatedStorage.Name) + + deleteStorage(t, router, updatedStorage.ID, admin.Token) workspaces_testing.RemoveTestWorkspace(workspace, router) } @@ -205,7 +367,7 @@ func Test_TestExistingStorageConnection_ConnectionEstablished(t *testing.T) { assert.Contains(t, string(response.Body), "successful") - deleteStorage(t, router, savedStorage.ID, workspace.ID, owner.Token) + deleteStorage(t, router, savedStorage.ID, owner.Token) workspaces_testing.RemoveTestWorkspace(workspace, router) } @@ -301,7 +463,14 @@ func Test_WorkspaceRolePermissions(t *testing.T) { fmt.Sprintf("/api/v1/storages?workspace_id=%s", workspace.ID.String()), "Bearer "+testUserToken, http.StatusOK, &storages, ) - assert.Len(t, storages, 1) + // Count only non-system storages for this workspace + nonSystemStorages := 0 + for _, s := range storages { + if !s.IsSystem { + nonSystemStorages++ + } + } + assert.Equal(t, 1, nonSystemStorages) // Test CREATE storage createStatusCode := http.StatusOK @@ -356,16 +525,514 @@ func Test_WorkspaceRolePermissions(t *testing.T) { // Cleanup if tt.canCreate { - deleteStorage(t, router, savedStorage.ID, workspace.ID, owner.Token) + deleteStorage(t, router, savedStorage.ID, owner.Token) } if !tt.canDelete { - deleteStorage(t, router, ownerStorage.ID, workspace.ID, owner.Token) + deleteStorage(t, router, ownerStorage.ID, owner.Token) } workspaces_testing.RemoveTestWorkspace(workspace, router) }) } } +func Test_SystemStorage_AdminOnlyOperations(t *testing.T) { + tests := []struct { + name string + operation string + isAdmin bool + expectSuccess bool + expectedStatus int + }{ + { + name: "admin can create system storage", + operation: "create", + isAdmin: true, + expectSuccess: true, + expectedStatus: http.StatusOK, + }, + { + name: "member cannot create system storage", + operation: "create", + isAdmin: false, + expectSuccess: false, + expectedStatus: http.StatusForbidden, + }, + { + name: "admin can update storage to make it system", + operation: "update_to_system", + isAdmin: true, + expectSuccess: true, + expectedStatus: http.StatusOK, + }, + { + name: "member cannot update storage to make it system", + operation: "update_to_system", + isAdmin: false, + expectSuccess: false, + expectedStatus: http.StatusForbidden, + }, + { + name: "admin can update system storage", + operation: "update_system", + isAdmin: true, + expectSuccess: true, + expectedStatus: http.StatusOK, + }, + { + name: "member cannot update system storage", + operation: "update_system", + isAdmin: false, + expectSuccess: false, + expectedStatus: http.StatusForbidden, + }, + { + name: "admin can delete system storage", + operation: "delete", + isAdmin: true, + expectSuccess: true, + expectedStatus: http.StatusOK, + }, + { + name: "member cannot delete system storage", + operation: "delete", + isAdmin: false, + expectSuccess: false, + expectedStatus: http.StatusForbidden, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + router := createRouter() + GetStorageService().SetStorageDatabaseCounter(&mockStorageDatabaseCounter{}) + + owner := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) + + var testUserToken string + if tt.isAdmin { + admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin) + testUserToken = admin.Token + } else { + member := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspaces_testing.AddMemberToWorkspace( + workspace, + member, + users_enums.WorkspaceRoleMember, + owner.Token, + router, + ) + testUserToken = member.Token + } + + switch tt.operation { + case "create": + systemStorage := &Storage{ + WorkspaceID: workspace.ID, + Type: StorageTypeLocal, + Name: "Test System Storage " + uuid.New().String(), + IsSystem: true, + LocalStorage: &local_storage.LocalStorage{}, + } + + if tt.expectSuccess { + var savedStorage Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+testUserToken, + *systemStorage, + tt.expectedStatus, + &savedStorage, + ) + assert.NotEmpty(t, savedStorage.ID) + assert.True(t, savedStorage.IsSystem) + deleteStorage(t, router, savedStorage.ID, testUserToken) + } else { + resp := test_utils.MakePostRequest( + t, + router, + "/api/v1/storages", + "Bearer "+testUserToken, + *systemStorage, + tt.expectedStatus, + ) + assert.Contains(t, string(resp.Body), "insufficient permissions") + } + + case "update_to_system": + // Owner creates private storage first + privateStorage := createNewStorage(workspace.ID) + var savedStorage Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+owner.Token, + *privateStorage, + http.StatusOK, + &savedStorage, + ) + + // Test user attempts to make it system + savedStorage.IsSystem = true + if tt.expectSuccess { + var updatedStorage Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+testUserToken, + savedStorage, + tt.expectedStatus, + &updatedStorage, + ) + assert.True(t, updatedStorage.IsSystem) + deleteStorage(t, router, savedStorage.ID, testUserToken) + } else { + resp := test_utils.MakePostRequest( + t, + router, + "/api/v1/storages", + "Bearer "+testUserToken, + savedStorage, + tt.expectedStatus, + ) + assert.Contains(t, string(resp.Body), "insufficient permissions") + deleteStorage(t, router, savedStorage.ID, owner.Token) + } + + case "update_system": + // Admin creates system storage first + admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin) + systemStorage := &Storage{ + WorkspaceID: workspace.ID, + Type: StorageTypeLocal, + Name: "Test System Storage " + uuid.New().String(), + IsSystem: true, + LocalStorage: &local_storage.LocalStorage{}, + } + var savedStorage Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+admin.Token, + *systemStorage, + http.StatusOK, + &savedStorage, + ) + + // Test user attempts to update system storage + savedStorage.Name = "Updated System Storage " + uuid.New().String() + if tt.expectSuccess { + var updatedStorage Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+testUserToken, + savedStorage, + tt.expectedStatus, + &updatedStorage, + ) + assert.Equal(t, savedStorage.Name, updatedStorage.Name) + assert.True(t, updatedStorage.IsSystem) + deleteStorage(t, router, savedStorage.ID, testUserToken) + } else { + resp := test_utils.MakePostRequest( + t, + router, + "/api/v1/storages", + "Bearer "+testUserToken, + savedStorage, + tt.expectedStatus, + ) + assert.Contains(t, string(resp.Body), "insufficient permissions") + deleteStorage(t, router, savedStorage.ID, admin.Token) + } + + case "delete": + // Admin creates system storage first + admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin) + systemStorage := &Storage{ + WorkspaceID: workspace.ID, + Type: StorageTypeLocal, + Name: "Test System Storage " + uuid.New().String(), + IsSystem: true, + LocalStorage: &local_storage.LocalStorage{}, + } + var savedStorage Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+admin.Token, + *systemStorage, + http.StatusOK, + &savedStorage, + ) + + // Test user attempts to delete system storage + if tt.expectSuccess { + test_utils.MakeDeleteRequest( + t, + router, + fmt.Sprintf("/api/v1/storages/%s", savedStorage.ID.String()), + "Bearer "+testUserToken, + tt.expectedStatus, + ) + } else { + resp := test_utils.MakeDeleteRequest( + t, + router, + fmt.Sprintf("/api/v1/storages/%s", savedStorage.ID.String()), + "Bearer "+testUserToken, + tt.expectedStatus, + ) + assert.Contains(t, string(resp.Body), "insufficient permissions") + deleteStorage(t, router, savedStorage.ID, admin.Token) + } + } + + workspaces_testing.RemoveTestWorkspace(workspace, router) + }) + } +} + +func Test_GetStorages_SystemStorageIncludedForAllUsers(t *testing.T) { + router := createRouter() + GetStorageService().SetStorageDatabaseCounter(&mockStorageDatabaseCounter{}) + + // Create two workspaces with different owners + ownerA := users_testing.CreateTestUser(users_enums.UserRoleMember) + ownerB := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspaceA := workspaces_testing.CreateTestWorkspace("Workspace A", ownerA, router) + workspaceB := workspaces_testing.CreateTestWorkspace("Workspace B", ownerB, router) + + // Create private storage in workspace A + privateStorageA := createNewStorage(workspaceA.ID) + var savedPrivateStorageA Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+ownerA.Token, + *privateStorageA, + http.StatusOK, + &savedPrivateStorageA, + ) + + // Admin creates system storage in workspace B + admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin) + systemStorageB := &Storage{ + WorkspaceID: workspaceB.ID, + Type: StorageTypeLocal, + Name: "Test System Storage B " + uuid.New().String(), + IsSystem: true, + LocalStorage: &local_storage.LocalStorage{}, + } + var savedSystemStorageB Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+admin.Token, + *systemStorageB, + http.StatusOK, + &savedSystemStorageB, + ) + + // Test: User from workspace A should see both private storage A and system storage B + var storagesForWorkspaceA []Storage + test_utils.MakeGetRequestAndUnmarshal( + t, + router, + fmt.Sprintf("/api/v1/storages?workspace_id=%s", workspaceA.ID.String()), + "Bearer "+ownerA.Token, + http.StatusOK, + &storagesForWorkspaceA, + ) + + assert.GreaterOrEqual(t, len(storagesForWorkspaceA), 2) + foundPrivateA := false + foundSystemB := false + for _, s := range storagesForWorkspaceA { + if s.ID == savedPrivateStorageA.ID { + foundPrivateA = true + } + if s.ID == savedSystemStorageB.ID { + foundSystemB = true + } + } + assert.True(t, foundPrivateA, "User from workspace A should see private storage A") + assert.True(t, foundSystemB, "User from workspace A should see system storage B") + + // Test: User from workspace B should see system storage B + var storagesForWorkspaceB []Storage + test_utils.MakeGetRequestAndUnmarshal( + t, + router, + fmt.Sprintf("/api/v1/storages?workspace_id=%s", workspaceB.ID.String()), + "Bearer "+ownerB.Token, + http.StatusOK, + &storagesForWorkspaceB, + ) + + assert.GreaterOrEqual(t, len(storagesForWorkspaceB), 1) + foundSystemBInWorkspaceB := false + for _, s := range storagesForWorkspaceB { + if s.ID == savedSystemStorageB.ID { + foundSystemBInWorkspaceB = true + } + // Should NOT see private storage from workspace A + assert.NotEqual( + t, + savedPrivateStorageA.ID, + s.ID, + "User from workspace B should not see private storage from workspace A", + ) + } + assert.True(t, foundSystemBInWorkspaceB, "User from workspace B should see system storage B") + + // Test: Outsider (not in any workspace) cannot access storages + outsider := users_testing.CreateTestUser(users_enums.UserRoleMember) + test_utils.MakeGetRequest( + t, + router, + fmt.Sprintf("/api/v1/storages?workspace_id=%s", workspaceA.ID.String()), + "Bearer "+outsider.Token, + http.StatusForbidden, + ) + + // Cleanup + deleteStorage(t, router, savedPrivateStorageA.ID, ownerA.Token) + deleteStorage(t, router, savedSystemStorageB.ID, admin.Token) + workspaces_testing.RemoveTestWorkspace(workspaceA, router) + workspaces_testing.RemoveTestWorkspace(workspaceB, router) +} + +func Test_GetSystemStorage_SensitiveDataHiddenForNonAdmin(t *testing.T) { + router := createRouter() + GetStorageService().SetStorageDatabaseCounter(&mockStorageDatabaseCounter{}) + + admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin) + member := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", member, router) + + // Admin creates system S3 storage with credentials + systemS3Storage := &Storage{ + WorkspaceID: workspace.ID, + Type: StorageTypeS3, + Name: "Test System S3 Storage " + uuid.New().String(), + IsSystem: true, + S3Storage: &s3_storage.S3Storage{ + S3Bucket: "test-system-bucket", + S3Region: "us-east-1", + S3AccessKey: "test-access-key-123", + S3SecretKey: "test-secret-key-456", + S3Endpoint: "https://s3.amazonaws.com", + }, + } + + var savedStorage Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+admin.Token, + *systemS3Storage, + http.StatusOK, + &savedStorage, + ) + + assert.NotEmpty(t, savedStorage.ID) + assert.True(t, savedStorage.IsSystem) + + // Test: Admin retrieves system storage - should see S3Storage object with hidden sensitive fields + var adminView Storage + test_utils.MakeGetRequestAndUnmarshal( + t, + router, + fmt.Sprintf("/api/v1/storages/%s", savedStorage.ID.String()), + "Bearer "+admin.Token, + http.StatusOK, + &adminView, + ) + + assert.NotNil(t, adminView.S3Storage, "Admin should see S3Storage object") + assert.Equal(t, "test-system-bucket", adminView.S3Storage.S3Bucket) + assert.Equal(t, "us-east-1", adminView.S3Storage.S3Region) + // Sensitive fields should be hidden (empty strings) + assert.Equal( + t, + "", + adminView.S3Storage.S3AccessKey, + "Admin should see hidden (empty) access key", + ) + assert.Equal( + t, + "", + adminView.S3Storage.S3SecretKey, + "Admin should see hidden (empty) secret key", + ) + + // Test: Member retrieves system storage - should see storage but all specific data hidden + var memberView Storage + test_utils.MakeGetRequestAndUnmarshal( + t, + router, + fmt.Sprintf("/api/v1/storages/%s", savedStorage.ID.String()), + "Bearer "+member.Token, + http.StatusOK, + &memberView, + ) + + assert.Equal(t, savedStorage.ID, memberView.ID) + assert.Equal(t, savedStorage.Name, memberView.Name) + assert.True(t, memberView.IsSystem) + + // All storage type objects should be nil for non-admin viewing system storage + assert.Nil(t, memberView.S3Storage, "Non-admin should not see S3Storage object") + assert.Nil(t, memberView.LocalStorage, "Non-admin should not see LocalStorage object") + assert.Nil( + t, + memberView.GoogleDriveStorage, + "Non-admin should not see GoogleDriveStorage object", + ) + assert.Nil(t, memberView.NASStorage, "Non-admin should not see NASStorage object") + assert.Nil(t, memberView.AzureBlobStorage, "Non-admin should not see AzureBlobStorage object") + assert.Nil(t, memberView.FTPStorage, "Non-admin should not see FTPStorage object") + assert.Nil(t, memberView.SFTPStorage, "Non-admin should not see SFTPStorage object") + assert.Nil(t, memberView.RcloneStorage, "Non-admin should not see RcloneStorage object") + + // Test: Member can also see system storage in GetStorages list + var storages []Storage + test_utils.MakeGetRequestAndUnmarshal( + t, + router, + fmt.Sprintf("/api/v1/storages?workspace_id=%s", workspace.ID.String()), + "Bearer "+member.Token, + http.StatusOK, + &storages, + ) + + foundSystemStorage := false + for _, s := range storages { + if s.ID == savedStorage.ID { + foundSystemStorage = true + assert.True(t, s.IsSystem) + assert.Nil(t, s.S3Storage, "Non-admin should not see S3Storage in list") + } + } + assert.True(t, foundSystemStorage, "System storage should be in list") + + // Cleanup + deleteStorage(t, router, savedStorage.ID, admin.Token) + workspaces_testing.RemoveTestWorkspace(workspace, router) +} + func Test_UserNotInWorkspace_CannotAccessStorages(t *testing.T) { owner := users_testing.CreateTestUser(users_enums.UserRoleMember) outsider := users_testing.CreateTestUser(users_enums.UserRoleMember) @@ -417,7 +1084,7 @@ func Test_UserNotInWorkspace_CannotAccessStorages(t *testing.T) { http.StatusForbidden, ) - deleteStorage(t, router, savedStorage.ID, workspace.ID, owner.Token) + deleteStorage(t, router, savedStorage.ID, owner.Token) workspaces_testing.RemoveTestWorkspace(workspace, router) } @@ -450,7 +1117,7 @@ func Test_CrossWorkspaceSecurity_CannotAccessStorageFromAnotherWorkspace(t *test ) assert.Contains(t, string(response.Body), "insufficient permissions") - deleteStorage(t, router, savedStorage.ID, workspace1.ID, owner1.Token) + deleteStorage(t, router, savedStorage.ID, owner1.Token) workspaces_testing.RemoveTestWorkspace(workspace1, router) workspaces_testing.RemoveTestWorkspace(workspace2, router) } @@ -1122,10 +1789,10 @@ func Test_TransferStorage_PermissionsEnforced(t *testing.T) { ) assert.Equal(t, targetWorkspace.ID, retrievedStorage.WorkspaceID) - deleteStorage(t, router, savedStorage.ID, targetWorkspace.ID, targetOwner.Token) + deleteStorage(t, router, savedStorage.ID, targetOwner.Token) } else { assert.Contains(t, string(testResp.Body), "insufficient permissions") - deleteStorage(t, router, savedStorage.ID, sourceWorkspace.ID, sourceOwner.Token) + deleteStorage(t, router, savedStorage.ID, sourceOwner.Token) } workspaces_testing.RemoveTestWorkspace(sourceWorkspace, router) @@ -1175,11 +1842,129 @@ func Test_TransferStorageNotManagableWorkspace_TransferFailed(t *testing.T) { "insufficient permissions to manage storage in target workspace", ) - deleteStorage(t, router, savedStorage.ID, workspace1.ID, userA.Token) + deleteStorage(t, router, savedStorage.ID, userA.Token) workspaces_testing.RemoveTestWorkspace(workspace1, router) workspaces_testing.RemoveTestWorkspace(workspace2, router) } +func Test_TransferSystemStorage_TransferBlocked(t *testing.T) { + router := createRouter() + GetStorageService().SetStorageDatabaseCounter(&mockStorageDatabaseCounter{}) + + admin := users_testing.CreateTestUser(users_enums.UserRoleAdmin) + workspaceA := workspaces_testing.CreateTestWorkspace("Workspace A", admin, router) + workspaceB := workspaces_testing.CreateTestWorkspace("Workspace B", admin, router) + + // Admin creates system storage in workspace A + systemStorage := &Storage{ + WorkspaceID: workspaceA.ID, + Type: StorageTypeLocal, + Name: "Test System Storage " + uuid.New().String(), + IsSystem: true, + LocalStorage: &local_storage.LocalStorage{}, + } + var savedSystemStorage Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+admin.Token, + *systemStorage, + http.StatusOK, + &savedSystemStorage, + ) + + // Admin attempts to transfer system storage to workspace B - should be blocked + transferRequest := TransferStorageRequest{ + TargetWorkspaceID: workspaceB.ID, + } + + testResp := test_utils.MakePostRequest( + t, + router, + fmt.Sprintf("/api/v1/storages/%s/transfer", savedSystemStorage.ID.String()), + "Bearer "+admin.Token, + transferRequest, + http.StatusBadRequest, + ) + + assert.Contains( + t, + string(testResp.Body), + "system storage cannot be transferred", + "Transfer should fail with appropriate error message", + ) + + // Verify storage is still in workspace A + var retrievedStorage Storage + test_utils.MakeGetRequestAndUnmarshal( + t, + router, + fmt.Sprintf("/api/v1/storages/%s", savedSystemStorage.ID.String()), + "Bearer "+admin.Token, + http.StatusOK, + &retrievedStorage, + ) + assert.Equal( + t, + workspaceA.ID, + retrievedStorage.WorkspaceID, + "Storage should remain in workspace A", + ) + + // Test regression: Non-system storage can still be transferred + privateStorage := createNewStorage(workspaceA.ID) + var savedPrivateStorage Storage + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/storages", + "Bearer "+admin.Token, + *privateStorage, + http.StatusOK, + &savedPrivateStorage, + ) + + privateTransferResp := test_utils.MakePostRequest( + t, + router, + fmt.Sprintf("/api/v1/storages/%s/transfer", savedPrivateStorage.ID.String()), + "Bearer "+admin.Token, + transferRequest, + http.StatusOK, + ) + + assert.Contains( + t, + string(privateTransferResp.Body), + "transferred successfully", + "Private storage should be transferable", + ) + + // Verify private storage was transferred to workspace B + var transferredStorage Storage + test_utils.MakeGetRequestAndUnmarshal( + t, + router, + fmt.Sprintf("/api/v1/storages/%s", savedPrivateStorage.ID.String()), + "Bearer "+admin.Token, + http.StatusOK, + &transferredStorage, + ) + assert.Equal( + t, + workspaceB.ID, + transferredStorage.WorkspaceID, + "Private storage should be in workspace B", + ) + + // Cleanup + deleteStorage(t, router, savedSystemStorage.ID, admin.Token) + deleteStorage(t, router, savedPrivateStorage.ID, admin.Token) + workspaces_testing.RemoveTestWorkspace(workspaceA, router) + workspaces_testing.RemoveTestWorkspace(workspaceB, router) +} + func createRouter() *gin.Engine { gin.SetMode(gin.TestMode) router := gin.New() @@ -1212,12 +1997,13 @@ func verifyStorageData(t *testing.T, expected *Storage, actual *Storage) { assert.Equal(t, expected.Name, actual.Name) assert.Equal(t, expected.Type, actual.Type) assert.Equal(t, expected.WorkspaceID, actual.WorkspaceID) + assert.Equal(t, expected.IsSystem, actual.IsSystem) } func deleteStorage( t *testing.T, router *gin.Engine, - storageID, workspaceID uuid.UUID, + storageID uuid.UUID, token string, ) { test_utils.MakeDeleteRequest( diff --git a/backend/internal/features/storages/errors.go b/backend/internal/features/storages/errors.go index ee59b14..5577976 100644 --- a/backend/internal/features/storages/errors.go +++ b/backend/internal/features/storages/errors.go @@ -33,4 +33,10 @@ var ( ErrStorageHasOtherAttachedDatabasesCannotTransfer = errors.New( "storage has other attached databases and cannot be transferred", ) + ErrSystemStorageCannotBeTransferred = errors.New( + "system storage cannot be transferred between workspaces", + ) + ErrSystemStorageCannotBeMadePrivate = errors.New( + "system storage cannot be changed to non-system", + ) ) diff --git a/backend/internal/features/storages/model.go b/backend/internal/features/storages/model.go index c70c5f5..ae2de47 100644 --- a/backend/internal/features/storages/model.go +++ b/backend/internal/features/storages/model.go @@ -24,6 +24,7 @@ type Storage struct { Type StorageType `json:"type" gorm:"column:type;not null;type:text"` Name string `json:"name" gorm:"column:name;not null;type:text"` LastSaveError *string `json:"lastSaveError" gorm:"column:last_save_error;type:text"` + IsSystem bool `json:"isSystem" gorm:"column:is_system;not null;default:false"` // specific storage LocalStorage *local_storage.LocalStorage `json:"localStorage" gorm:"foreignKey:StorageID"` @@ -86,6 +87,17 @@ func (s *Storage) HideSensitiveData() { s.getSpecificStorage().HideSensitiveData() } +func (s *Storage) HideAllData() { + s.LocalStorage = nil + s.S3Storage = nil + s.GoogleDriveStorage = nil + s.NASStorage = nil + s.AzureBlobStorage = nil + s.FTPStorage = nil + s.SFTPStorage = nil + s.RcloneStorage = nil +} + func (s *Storage) EncryptSensitiveData(encryptor encryption.FieldEncryptor) error { return s.getSpecificStorage().EncryptSensitiveData(encryptor) } @@ -93,6 +105,7 @@ func (s *Storage) EncryptSensitiveData(encryptor encryption.FieldEncryptor) erro func (s *Storage) Update(incoming *Storage) { s.Name = incoming.Name s.Type = incoming.Type + s.IsSystem = incoming.IsSystem switch s.Type { case StorageTypeLocal: diff --git a/backend/internal/features/storages/repository.go b/backend/internal/features/storages/repository.go index 2a87386..1aae3dc 100644 --- a/backend/internal/features/storages/repository.go +++ b/backend/internal/features/storages/repository.go @@ -165,7 +165,7 @@ func (r *StorageRepository) FindByWorkspaceID(workspaceID uuid.UUID) ([]*Storage Preload("FTPStorage"). Preload("SFTPStorage"). Preload("RcloneStorage"). - Where("workspace_id = ?", workspaceID). + Where("workspace_id = ? OR is_system = TRUE", workspaceID). Order("name ASC"). Find(&storages).Error; err != nil { return nil, err diff --git a/backend/internal/features/storages/service.go b/backend/internal/features/storages/service.go index 3df411a..3a295d5 100644 --- a/backend/internal/features/storages/service.go +++ b/backend/internal/features/storages/service.go @@ -4,6 +4,7 @@ import ( "fmt" audit_logs "databasus-backend/internal/features/audit_logs" + users_enums "databasus-backend/internal/features/users/enums" users_models "databasus-backend/internal/features/users/models" workspaces_services "databasus-backend/internal/features/workspaces/services" "databasus-backend/internal/util/encryption" @@ -38,6 +39,11 @@ func (s *StorageService) SaveStorage( isUpdate := storage.ID != uuid.Nil + if storage.IsSystem && user.Role != users_enums.UserRoleAdmin { + // only admin can manage system storage + return ErrInsufficientPermissionsToManageStorage + } + if isUpdate { existingStorage, err := s.storageRepository.FindByID(storage.ID) if err != nil { @@ -48,6 +54,10 @@ func (s *StorageService) SaveStorage( return ErrStorageDoesNotBelongToWorkspace } + if existingStorage.IsSystem && !storage.IsSystem { + return ErrSystemStorageCannotBeMadePrivate + } + existingStorage.Update(storage) if err := existingStorage.EncryptSensitiveData(s.fieldEncryptor); err != nil { @@ -111,6 +121,11 @@ func (s *StorageService) DeleteStorage( return ErrInsufficientPermissionsToManageStorage } + if storage.IsSystem && user.Role != users_enums.UserRoleAdmin { + // only admin can manage system storage + return ErrInsufficientPermissionsToManageStorage + } + attachedDatabasesIDs, err := s.storageDatabaseCounter.GetStorageAttachedDatabasesIDs(storage.ID) if err != nil { return err @@ -142,16 +157,22 @@ func (s *StorageService) GetStorage( return nil, err } - canView, _, err := s.workspaceService.CanUserAccessWorkspace(storage.WorkspaceID, user) - if err != nil { - return nil, err - } - if !canView { - return nil, ErrInsufficientPermissionsToViewStorage + if !storage.IsSystem { + canView, _, err := s.workspaceService.CanUserAccessWorkspace(storage.WorkspaceID, user) + if err != nil { + return nil, err + } + if !canView { + return nil, ErrInsufficientPermissionsToViewStorage + } } storage.HideSensitiveData() + if storage.IsSystem && user.Role != users_enums.UserRoleAdmin { + storage.HideAllData() + } + return storage, nil } @@ -174,6 +195,10 @@ func (s *StorageService) GetStorages( for _, storage := range storages { storage.HideSensitiveData() + + if storage.IsSystem && user.Role != users_enums.UserRoleAdmin { + storage.HideAllData() + } } return storages, nil @@ -258,6 +283,10 @@ func (s *StorageService) TransferStorageToWorkspace( return err } + if existingStorage.IsSystem { + return ErrSystemStorageCannotBeTransferred + } + canManageSource, err := s.workspaceService.CanUserManageDBs(existingStorage.WorkspaceID, user) if err != nil { return err diff --git a/backend/internal/util/period/enums.go b/backend/internal/util/period/enums.go index e9ebfd4..b38c261 100644 --- a/backend/internal/util/period/enums.go +++ b/backend/internal/util/period/enums.go @@ -47,3 +47,36 @@ func (p Period) ToDuration() time.Duration { panic("unknown period: " + string(p)) } } + +// CompareTo compares this period with another and returns: +// -1 if p < other +// +// 0 if p == other +// 1 if p > other +// +// FOREVER is treated as the longest period +func (p Period) CompareTo(other Period) int { + if p == other { + return 0 + } + + d1 := p.ToDuration() + d2 := other.ToDuration() + + // FOREVER has duration 0, but should be treated as longest period + if p == PeriodForever { + return 1 + } + if other == PeriodForever { + return -1 + } + + if d1 < d2 { + return -1 + } + if d1 > d2 { + return 1 + } + + return 0 +} diff --git a/backend/migrations/20260122134856_add_database_plans_table.sql b/backend/migrations/20260122134856_add_database_plans_table.sql new file mode 100644 index 0000000..f4d45dd --- /dev/null +++ b/backend/migrations/20260122134856_add_database_plans_table.sql @@ -0,0 +1,30 @@ +-- +goose Up +-- +goose StatementBegin + +CREATE TABLE database_plans ( + database_id UUID PRIMARY KEY, + max_backup_size_mb BIGINT NOT NULL, + max_backups_total_size_mb BIGINT NOT NULL, + max_storage_period TEXT NOT NULL +); + +ALTER TABLE database_plans + ADD CONSTRAINT fk_database_plans_database_id + FOREIGN KEY (database_id) + REFERENCES databases (id) + ON DELETE CASCADE; + +CREATE INDEX idx_database_plans_database_id ON database_plans (database_id); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +DROP INDEX IF EXISTS idx_database_plans_database_id; + +ALTER TABLE database_plans DROP CONSTRAINT IF EXISTS fk_database_plans_database_id; + +DROP TABLE IF EXISTS database_plans; + +-- +goose StatementEnd diff --git a/backend/migrations/20260122173935_add_is_system_to_storages.sql b/backend/migrations/20260122173935_add_is_system_to_storages.sql new file mode 100644 index 0000000..a105908 --- /dev/null +++ b/backend/migrations/20260122173935_add_is_system_to_storages.sql @@ -0,0 +1,11 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE storages + ADD COLUMN is_system BOOLEAN NOT NULL DEFAULT FALSE; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE storages + DROP COLUMN is_system; +-- +goose StatementEnd diff --git a/frontend/index.html b/frontend/index.html index 765fa1f..a1a91dc 100644 --- a/frontend/index.html +++ b/frontend/index.html @@ -20,6 +20,7 @@
+ diff --git a/frontend/src/entity/backups/api/backupConfigApi.ts b/frontend/src/entity/backups/api/backupConfigApi.ts index 26b024c..ed4dfaf 100644 --- a/frontend/src/entity/backups/api/backupConfigApi.ts +++ b/frontend/src/entity/backups/api/backupConfigApi.ts @@ -1,6 +1,7 @@ import { getApplicationServer } from '../../../constants'; import RequestOptions from '../../../shared/api/RequestOptions'; import { apiHelper } from '../../../shared/api/apiHelper'; +import type { DatabasePlan } from '../../plan'; import type { BackupConfig } from '../model/BackupConfig'; import type { TransferDatabaseRequest } from '../model/TransferDatabaseRequest'; @@ -54,4 +55,12 @@ export const backupConfigApi = { requestOptions, ); }, + + async getDatabasePlan(databaseId: string) { + return apiHelper.fetchGetJson( + `${getApplicationServer()}/api/v1/backup-configs/database/${databaseId}/plan`, + undefined, + true, + ); + }, }; diff --git a/frontend/src/entity/backups/index.ts b/frontend/src/entity/backups/index.ts index 59e17b2..ccf195d 100644 --- a/frontend/src/entity/backups/index.ts +++ b/frontend/src/entity/backups/index.ts @@ -6,3 +6,4 @@ export type { BackupConfig } from './model/BackupConfig'; export { BackupNotificationType } from './model/BackupNotificationType'; export { BackupEncryption } from './model/BackupEncryption'; export type { TransferDatabaseRequest } from './model/TransferDatabaseRequest'; +export type { DatabasePlan } from '../plan'; diff --git a/frontend/src/entity/plan/index.ts b/frontend/src/entity/plan/index.ts new file mode 100644 index 0000000..8b3ad15 --- /dev/null +++ b/frontend/src/entity/plan/index.ts @@ -0,0 +1 @@ +export type { DatabasePlan } from './model/DatabasePlan'; diff --git a/frontend/src/entity/plan/model/DatabasePlan.ts b/frontend/src/entity/plan/model/DatabasePlan.ts new file mode 100644 index 0000000..5c97b42 --- /dev/null +++ b/frontend/src/entity/plan/model/DatabasePlan.ts @@ -0,0 +1,8 @@ +import type { Period } from '../../databases/model/Period'; + +export interface DatabasePlan { + databaseId: string; + maxBackupSizeMb: number; + maxBackupsTotalSizeMb: number; + maxStoragePeriod: Period; +} diff --git a/frontend/src/entity/storages/models/Storage.ts b/frontend/src/entity/storages/models/Storage.ts index 6fdabc9..6b44505 100644 --- a/frontend/src/entity/storages/models/Storage.ts +++ b/frontend/src/entity/storages/models/Storage.ts @@ -14,6 +14,7 @@ export interface Storage { name: string; lastSaveError?: string; workspaceId: string; + isSystem: boolean; // specific storage types localStorage?: LocalStorage; diff --git a/frontend/src/features/backups/ui/EditBackupConfigComponent.tsx b/frontend/src/features/backups/ui/EditBackupConfigComponent.tsx index bac77f6..7931544 100644 --- a/frontend/src/features/backups/ui/EditBackupConfigComponent.tsx +++ b/frontend/src/features/backups/ui/EditBackupConfigComponent.tsx @@ -15,12 +15,19 @@ import { CronExpressionParser } from 'cron-parser'; import dayjs, { Dayjs } from 'dayjs'; import { useEffect, useMemo, useState } from 'react'; -import { type BackupConfig, BackupEncryption, backupConfigApi } from '../../../entity/backups'; +import { IS_CLOUD } from '../../../constants'; +import { + type BackupConfig, + BackupEncryption, + type DatabasePlan, + backupConfigApi, +} from '../../../entity/backups'; import { BackupNotificationType } from '../../../entity/backups/model/BackupNotificationType'; import type { Database } from '../../../entity/databases'; import { Period } from '../../../entity/databases/model/Period'; import { type Interval, IntervalType } from '../../../entity/intervals'; import { type Storage, getStorageLogoFromType, storageApi } from '../../../entity/storages'; +import type { UserProfile } from '../../../entity/users'; import { getUserTimeFormat } from '../../../shared/time'; import { getUserTimeFormat as getIs12Hour, @@ -33,6 +40,7 @@ import { ConfirmationComponent } from '../../../shared/ui'; import { EditStorageComponent } from '../../storages/ui/edit/EditStorageComponent'; interface Props { + user: UserProfile; database: Database; isShowBackButton: boolean; @@ -57,6 +65,7 @@ const weekdayOptions = [ ]; export const EditBackupConfigComponent = ({ + user, database, isShowBackButton, @@ -73,12 +82,14 @@ export const EditBackupConfigComponent = ({ const [isSaving, setIsSaving] = useState(false); const [storages, setStorages] = useState([]); - const [isStoragesLoading, setIsStoragesLoading] = useState(false); const [isShowCreateStorage, setShowCreateStorage] = useState(false); const [storageSelectKey, setStorageSelectKey] = useState(0); const [isShowWarn, setIsShowWarn] = useState(false); + const [databasePlan, setDatabasePlan] = useState(); + const [isLoading, setIsLoading] = useState(true); + const hasAdvancedValues = !!backupConfig?.isRetryIfFailed || (backupConfig?.maxBackupSizeMb ?? 0) > 0 || @@ -92,6 +103,65 @@ export const EditBackupConfigComponent = ({ const dateTimeFormat = useMemo(() => getUserTimeFormat(), []); + const createDefaultPlan = (databaseId: string, isCloud: boolean): DatabasePlan => { + if (isCloud) { + return { + databaseId, + maxBackupSizeMb: 100, + maxBackupsTotalSizeMb: 4000, + maxStoragePeriod: Period.WEEK, + }; + } else { + return { + databaseId, + maxBackupSizeMb: 0, + maxBackupsTotalSizeMb: 0, + maxStoragePeriod: Period.FOREVER, + }; + } + }; + + const isPeriodAllowed = (period: Period, maxPeriod: Period): boolean => { + const periodOrder = [ + Period.DAY, + Period.WEEK, + Period.MONTH, + Period.THREE_MONTH, + Period.SIX_MONTH, + Period.YEAR, + Period.TWO_YEARS, + Period.THREE_YEARS, + Period.FOUR_YEARS, + Period.FIVE_YEARS, + Period.FOREVER, + ]; + const periodIndex = periodOrder.indexOf(period); + const maxIndex = periodOrder.indexOf(maxPeriod); + return periodIndex <= maxIndex; + }; + + const availablePeriods = useMemo(() => { + const allPeriods = [ + { label: '1 day', value: Period.DAY }, + { label: '1 week', value: Period.WEEK }, + { label: '1 month', value: Period.MONTH }, + { label: '3 months', value: Period.THREE_MONTH }, + { label: '6 months', value: Period.SIX_MONTH }, + { label: '1 year', value: Period.YEAR }, + { label: '2 years', value: Period.TWO_YEARS }, + { label: '3 years', value: Period.THREE_YEARS }, + { label: '4 years', value: Period.FOUR_YEARS }, + { label: '5 years', value: Period.FIVE_YEARS }, + { label: 'Forever', value: Period.FOREVER }, + ]; + + if (!databasePlan) { + return allPeriods; + } + + return allPeriods.filter((p) => isPeriodAllowed(p.value, databasePlan.maxStoragePeriod)); + }, [databasePlan]); + const updateBackupConfig = (patch: Partial) => { setBackupConfig((prev) => (prev ? { ...prev, ...patch } : prev)); setIsUnsaved(true); @@ -131,51 +201,63 @@ export const EditBackupConfigComponent = ({ }; const loadStorages = async () => { - setIsStoragesLoading(true); - try { const storages = await storageApi.getStorages(database.workspaceId); setStorages(storages); } catch (e) { alert((e as Error).message); } - - setIsStoragesLoading(false); }; useEffect(() => { - if (database.id) { - backupConfigApi.getBackupConfigByDbID(database.id).then((res) => { - setBackupConfig(res); - setIsUnsaved(false); - setIsSaving(false); - }); - } else { - setBackupConfig({ - databaseId: database.id, - isBackupsEnabled: true, - backupInterval: { - id: undefined as unknown as string, - interval: IntervalType.DAILY, - timeOfDay: '00:00', - }, - storage: undefined, - storePeriod: Period.THREE_MONTH, - sendNotificationsOn: [BackupNotificationType.BackupFailed], - isRetryIfFailed: true, - maxFailedTriesCount: 3, - encryption: BackupEncryption.ENCRYPTED, + const run = async () => { + setIsLoading(true); - maxBackupSizeMb: 0, - maxBackupsTotalSizeMb: 0, - }); - } - loadStorages(); + try { + if (database.id) { + const config = await backupConfigApi.getBackupConfigByDbID(database.id); + setBackupConfig(config); + setIsUnsaved(false); + setIsSaving(false); + + const plan = await backupConfigApi.getDatabasePlan(database.id); + setDatabasePlan(plan); + } else { + const plan = createDefaultPlan('', IS_CLOUD); + setDatabasePlan(plan); + + setBackupConfig({ + databaseId: database.id, + isBackupsEnabled: true, + backupInterval: { + id: undefined as unknown as string, + interval: IntervalType.DAILY, + timeOfDay: '00:00', + }, + storage: undefined, + storePeriod: + plan.maxStoragePeriod === Period.FOREVER ? Period.THREE_MONTH : plan.maxStoragePeriod, + sendNotificationsOn: [BackupNotificationType.BackupFailed], + isRetryIfFailed: true, + maxFailedTriesCount: 3, + encryption: BackupEncryption.ENCRYPTED, + maxBackupSizeMb: plan.maxBackupSizeMb, + maxBackupsTotalSizeMb: plan.maxBackupsTotalSizeMb, + }); + } + + await loadStorages(); + } catch (e) { + alert((e as Error).message); + } finally { + setIsLoading(false); + } + }; + + run(); }, [database]); - if (!backupConfig) return
; - - if (isStoragesLoading) { + if (isLoading) { return (
@@ -183,6 +265,8 @@ export const EditBackupConfigComponent = ({ ); } + if (!backupConfig) return
; + const { backupInterval } = backupConfig; // UTC → local conversions for display @@ -414,28 +498,30 @@ export const EditBackupConfigComponent = ({
-
-
Encryption
-
- updateBackupConfig({ encryption: v })} + size="small" + className="w-[200px]" + options={[ + { label: 'None', value: BackupEncryption.NONE }, + { label: 'Encrypt backup files', value: BackupEncryption.ENCRYPTED }, + ]} + /> - - - + + + +
-
+ )}
Store period
@@ -445,19 +531,7 @@ export const EditBackupConfigComponent = ({ onChange={(v) => updateBackupConfig({ storePeriod: v })} size="small" className="w-[200px]" - options={[ - { label: '1 day', value: Period.DAY }, - { label: '1 week', value: Period.WEEK }, - { label: '1 month', value: Period.MONTH }, - { label: '3 months', value: Period.THREE_MONTH }, - { label: '6 months', value: Period.SIX_MONTH }, - { label: '1 year', value: Period.YEAR }, - { label: '2 years', value: Period.TWO_YEARS }, - { label: '3 years', value: Period.THREE_YEARS }, - { label: '4 years', value: Period.FOUR_YEARS }, - { label: '5 years', value: Period.FIVE_YEARS }, - { label: 'Forever', value: Period.FOREVER }, - ]} + options={availablePeriods} /> updateBackupConfig({ maxFailedTriesCount: value || 1 })} size="small" - className="w-full max-w-[200px] grow" + className="w-full max-w-[75px] grow" />
Max backup size limit
- 0} - onChange={(e) => { + disabled={IS_CLOUD} + onChange={(checked) => { updateBackupConfig({ - maxBackupSizeMb: e.target.checked ? backupConfig.maxBackupSizeMb || 1000 : 0, + maxBackupSizeMb: checked ? backupConfig.maxBackupSizeMb || 1000 : 0, }); }} - > - Enable - + /> {backupConfig.maxBackupSizeMb > 0 && ( -
-
Max backup size (MB)
+
+
Max file size (MB)
0 + ? databasePlan.maxBackupSizeMb + : undefined + } value={backupConfig.maxBackupSizeMb} - onChange={(value) => updateBackupConfig({ maxBackupSizeMb: value || 1 })} + onChange={(value) => { + const newValue = value || 1; + if (databasePlan?.maxBackupSizeMb && databasePlan.maxBackupSizeMb > 0) { + updateBackupConfig({ + maxBackupSizeMb: Math.min(newValue, databasePlan.maxBackupSizeMb), + }); + } else { + updateBackupConfig({ maxBackupSizeMb: newValue }); + } + }} size="small" - className="w-full max-w-[100px] grow" + className="w-full max-w-[75px] grow" />
- {(backupConfig.maxBackupSizeMb / 1024).toFixed(2)} GB + ~{((backupConfig.maxBackupSizeMb / 1024) * 15).toFixed(2)} GB DB size
)} @@ -616,22 +704,22 @@ export const EditBackupConfigComponent = ({
Limit total backups size
- 0} - onChange={(e) => { + disabled={IS_CLOUD} + onChange={(checked) => { updateBackupConfig({ - maxBackupsTotalSizeMb: e.target.checked + maxBackupsTotalSizeMb: checked ? backupConfig.maxBackupsTotalSizeMb || 1_000_000 : 0, }); }} - > - Enable - + /> @@ -640,17 +728,35 @@ export const EditBackupConfigComponent = ({ {backupConfig.maxBackupsTotalSizeMb > 0 && (
-
Max total size (MB)
+
Backups files size (MB)
0 + ? databasePlan.maxBackupsTotalSizeMb + : undefined + } value={backupConfig.maxBackupsTotalSizeMb} - onChange={(value) => updateBackupConfig({ maxBackupsTotalSizeMb: value || 1 })} + onChange={(value) => { + const newValue = value || 1; + if ( + databasePlan?.maxBackupsTotalSizeMb && + databasePlan.maxBackupsTotalSizeMb > 0 + ) { + updateBackupConfig({ + maxBackupsTotalSizeMb: Math.min(newValue, databasePlan.maxBackupsTotalSizeMb), + }); + } else { + updateBackupConfig({ maxBackupsTotalSizeMb: newValue }); + } + }} size="small" - className="w-full max-w-[100px] grow" + className="w-full max-w-[75px] grow" />
- {(backupConfig.maxBackupsTotalSizeMb / 1024).toFixed(2)} GB + {(backupConfig.maxBackupsTotalSizeMb / 1024).toFixed(2)} GB (~ + {backupConfig.maxBackupsTotalSizeMb / backupConfig.maxBackupSizeMb} backups)
)} @@ -697,6 +803,7 @@ export const EditBackupConfigComponent = ({
{
-
-
Encryption
-
{backupConfig.encryption === BackupEncryption.ENCRYPTED ? 'Enabled' : 'None'}
+ {!IS_CLOUD && ( +
+
Encryption
+
+ {backupConfig.encryption === BackupEncryption.ENCRYPTED ? 'Enabled' : 'None'} +
- - - -
+ + + +
+ )}
Notifications
diff --git a/frontend/src/features/databases/ui/CreateDatabaseComponent.tsx b/frontend/src/features/databases/ui/CreateDatabaseComponent.tsx index f24cff9..7fa70fc 100644 --- a/frontend/src/features/databases/ui/CreateDatabaseComponent.tsx +++ b/frontend/src/features/databases/ui/CreateDatabaseComponent.tsx @@ -11,6 +11,7 @@ import { type PostgresqlDatabase, databaseApi, } from '../../../entity/databases'; +import type { UserProfile } from '../../../entity/users'; import { EditBackupConfigComponent } from '../../backups'; import { CreateReadOnlyComponent } from './edit/CreateReadOnlyComponent'; import { EditDatabaseBaseInfoComponent } from './edit/EditDatabaseBaseInfoComponent'; @@ -18,8 +19,8 @@ import { EditDatabaseNotifiersComponent } from './edit/EditDatabaseNotifiersComp import { EditDatabaseSpecificDataComponent } from './edit/EditDatabaseSpecificDataComponent'; interface Props { + user: UserProfile; workspaceId: string; - onCreated: (databaseId: string) => void; onClose: () => void; } @@ -62,7 +63,7 @@ const initializeDatabaseTypeData = (db: Database): Database => { } }; -export const CreateDatabaseComponent = ({ workspaceId, onCreated, onClose }: Props) => { +export const CreateDatabaseComponent = ({ user, workspaceId, onCreated, onClose }: Props) => { const [isCreating, setIsCreating] = useState(false); const [backupConfig, setBackupConfig] = useState(); const [database, setDatabase] = useState(createInitialDatabase(workspaceId)); @@ -149,6 +150,7 @@ export const CreateDatabaseComponent = ({ workspaceId, onCreated, onClose }: Pro if (step === 'backup-config') { return ( onClose()} diff --git a/frontend/src/features/databases/ui/edit/CreateReadOnlyComponent.tsx b/frontend/src/features/databases/ui/edit/CreateReadOnlyComponent.tsx index 32e7ebd..2db8ee1 100644 --- a/frontend/src/features/databases/ui/edit/CreateReadOnlyComponent.tsx +++ b/frontend/src/features/databases/ui/edit/CreateReadOnlyComponent.tsx @@ -1,6 +1,7 @@ import { Button, Modal, Spin } from 'antd'; import { useEffect, useState } from 'react'; +import { IS_CLOUD } from '../../../../constants'; import { type Database, DatabaseType, databaseApi } from '../../../../entity/databases'; interface Props { @@ -193,9 +194,11 @@ export const CreateReadOnlyComponent = ({ Back - + {!IS_CLOUD && ( + + )}
- {isLocalhostDb && ( + {isLocalhostDb && !IS_CLOUD && (
diff --git a/frontend/src/features/databases/ui/edit/EditMongoDbSpecificDataComponent.tsx b/frontend/src/features/databases/ui/edit/EditMongoDbSpecificDataComponent.tsx index deb2bd5..c19434d 100644 --- a/frontend/src/features/databases/ui/edit/EditMongoDbSpecificDataComponent.tsx +++ b/frontend/src/features/databases/ui/edit/EditMongoDbSpecificDataComponent.tsx @@ -2,6 +2,7 @@ import { CopyOutlined, DownOutlined, InfoCircleOutlined, UpOutlined } from '@ant import { App, Button, Input, InputNumber, Switch, Tooltip } from 'antd'; import { useEffect, useState } from 'react'; +import { IS_CLOUD } from '../../../../constants'; import { type Database, databaseApi } from '../../../../entity/databases'; import { MongodbConnectionStringParser } from '../../../../entity/databases/model/mongodb/MongodbConnectionStringParser'; import { ToastHelper } from '../../../../shared/toast'; @@ -201,7 +202,7 @@ export const EditMongoDbSpecificDataComponent = ({ />
- {isLocalhostDb && ( + {isLocalhostDb && !IS_CLOUD && (
diff --git a/frontend/src/features/databases/ui/edit/EditMySqlSpecificDataComponent.tsx b/frontend/src/features/databases/ui/edit/EditMySqlSpecificDataComponent.tsx index 88b0cd8..c97c59f 100644 --- a/frontend/src/features/databases/ui/edit/EditMySqlSpecificDataComponent.tsx +++ b/frontend/src/features/databases/ui/edit/EditMySqlSpecificDataComponent.tsx @@ -2,6 +2,7 @@ import { CopyOutlined } from '@ant-design/icons'; import { App, Button, Input, InputNumber, Switch } from 'antd'; import { useEffect, useState } from 'react'; +import { IS_CLOUD } from '../../../../constants'; import { type Database, databaseApi } from '../../../../entity/databases'; import { MySqlConnectionStringParser } from '../../../../entity/databases/model/mysql/MySqlConnectionStringParser'; import { ToastHelper } from '../../../../shared/toast'; @@ -196,7 +197,7 @@ export const EditMySqlSpecificDataComponent = ({ />
- {isLocalhostDb && ( + {isLocalhostDb && !IS_CLOUD && (
diff --git a/frontend/src/features/databases/ui/edit/EditPostgreSqlSpecificDataComponent.tsx b/frontend/src/features/databases/ui/edit/EditPostgreSqlSpecificDataComponent.tsx index 02098b9..db8a618 100644 --- a/frontend/src/features/databases/ui/edit/EditPostgreSqlSpecificDataComponent.tsx +++ b/frontend/src/features/databases/ui/edit/EditPostgreSqlSpecificDataComponent.tsx @@ -2,6 +2,7 @@ import { CopyOutlined, DownOutlined, InfoCircleOutlined, UpOutlined } from '@ant import { App, Button, Checkbox, Input, InputNumber, Select, Switch, Tooltip } from 'antd'; import { useEffect, useState } from 'react'; +import { IS_CLOUD } from '../../../../constants'; import { type Database, databaseApi } from '../../../../entity/databases'; import { ConnectionStringParser } from '../../../../entity/databases/model/postgresql/ConnectionStringParser'; import { ToastHelper } from '../../../../shared/toast'; @@ -235,7 +236,7 @@ export const EditPostgreSqlSpecificDataComponent = ({ />
- {isLocalhostDb && ( + {isLocalhostDb && !IS_CLOUD && (
@@ -372,7 +373,7 @@ export const EditPostgreSqlSpecificDataComponent = ({ />
- {isRestoreMode && ( + {isRestoreMode && !IS_CLOUD && (
CPU count
diff --git a/frontend/src/features/playground/index.ts b/frontend/src/features/playground/index.ts new file mode 100644 index 0000000..428cd8e --- /dev/null +++ b/frontend/src/features/playground/index.ts @@ -0,0 +1 @@ +export { PlaygroundWarningComponent } from './ui/PlaygroundWarningComponent'; diff --git a/frontend/src/features/playground/ui/PlaygroundWarningComponent.tsx b/frontend/src/features/playground/ui/PlaygroundWarningComponent.tsx new file mode 100644 index 0000000..bfb5fc1 --- /dev/null +++ b/frontend/src/features/playground/ui/PlaygroundWarningComponent.tsx @@ -0,0 +1,146 @@ +import { Modal } from 'antd'; +import type { JSX } from 'react'; +import { useEffect, useState } from 'react'; + +import { IS_CLOUD } from '../../../constants'; + +const STORAGE_KEY = 'databasus_playground_info_dismissed'; + +const TIMEOUT_SECONDS = 30; + +export const PlaygroundWarningComponent = (): JSX.Element => { + const [isVisible, setIsVisible] = useState(false); + const [remainingSeconds, setRemainingSeconds] = useState(TIMEOUT_SECONDS); + const [isButtonEnabled, setIsButtonEnabled] = useState(false); + + const handleClose = () => { + try { + localStorage.setItem(STORAGE_KEY, 'true'); + } catch (e) { + console.warn('Failed to save playground modal state to localStorage:', e); + } + setIsVisible(false); + }; + + useEffect(() => { + if (!IS_CLOUD) { + return; + } + + try { + const isDismissed = localStorage.getItem(STORAGE_KEY) === 'true'; + if (!isDismissed) { + setIsVisible(true); + } + } catch (e) { + console.warn('Failed to read playground modal state from localStorage:', e); + setIsVisible(true); + } + }, []); + + useEffect(() => { + if (!isVisible) { + return; + } + + const interval = setInterval(() => { + setRemainingSeconds((prev) => { + if (prev <= 1) { + setIsButtonEnabled(true); + clearInterval(interval); + return 0; + } + return prev - 1; + }); + }, 1000); + + return () => clearInterval(interval); + }, [isVisible]); + + return ( + + {isButtonEnabled ? 'Understood' : `${remainingSeconds}`} +
+ } + okButtonProps={{ disabled: !isButtonEnabled }} + closable={false} + cancelButtonProps={{ style: { display: 'none' } }} + width={500} + centered + maskClosable={false} + > +
+
+

What is Playground?

+

+ Playground is a dev environment where you can test small databases backup and see + Databasus in action. Databasus dev team can test new features and see issues which hard + to detect when using self hosted (without logs or reports) +

+
+ +
+

What is limit?

+
    +
  • Single backup size - 100 MB (~1.5 GB database)
  • +
  • Store period - 7 days
  • +
+
+ +
+

Is it secure?

+

+ Yes, it's regular Databasus installation, secured and maintained by Databasus team. + More about security{' '} + + you can read here + +

+
+ +
+

Can my data be currepted?

+

+ No, because playground use only read-only users and cannot affect your DB. Only issue + you can face is instability: playground background workers frequently reloaded so backup + can be slower or be restarted due to app restart +

+
+ +
+

What if I see an issue?

+

+ Create{' '} + + GitHub issue + {' '} + or write{' '} + + to the community + +

+
+
+ + ); +}; diff --git a/frontend/src/features/storages/ui/StorageCardComponent.tsx b/frontend/src/features/storages/ui/StorageCardComponent.tsx index dc327a6..08a7a00 100644 --- a/frontend/src/features/storages/ui/StorageCardComponent.tsx +++ b/frontend/src/features/storages/ui/StorageCardComponent.tsx @@ -40,6 +40,12 @@ export const StorageCardComponent = ({ Has save error
)} + + {storage.isSystem && ( +
+ System storage +
+ )}
); }; diff --git a/frontend/src/features/storages/ui/StorageComponent.tsx b/frontend/src/features/storages/ui/StorageComponent.tsx index 224d505..43b0404 100644 --- a/frontend/src/features/storages/ui/StorageComponent.tsx +++ b/frontend/src/features/storages/ui/StorageComponent.tsx @@ -11,6 +11,7 @@ import { useEffect } from 'react'; import { backupConfigApi } from '../../../entity/backups'; import { storageApi } from '../../../entity/storages'; import type { Storage } from '../../../entity/storages'; +import { type UserProfile, UserRole } from '../../../entity/users'; import { ToastHelper } from '../../../shared/toast'; import { ConfirmationComponent } from '../../../shared/ui'; import { StorageTransferDialogComponent } from './StorageTransferDialogComponent'; @@ -23,6 +24,7 @@ interface Props { onStorageDeleted: () => void; onStorageTransferred: () => void; isCanManageStorages: boolean; + user: UserProfile; } export const StorageComponent = ({ @@ -31,6 +33,7 @@ export const StorageComponent = ({ onStorageDeleted, onStorageTransferred, isCanManageStorages, + user, }: Props) => { const [storage, setStorage] = useState(); @@ -142,11 +145,12 @@ export const StorageComponent = ({ {!isEditName ? (
{storage.name} - {isCanManageStorages && ( -
startEdit('name')}> - -
- )} + {(storage.isSystem && user.role === UserRole.ADMIN) || + (isCanManageStorages && ( +
startEdit('name')}> + +
+ ))}
) : (
@@ -219,7 +223,9 @@ export const StorageComponent = ({
Storage settings
- {!isEditSettings && isCanManageStorages ? ( + {!isEditSettings && + isCanManageStorages && + !(storage.isSystem && user.role !== UserRole.ADMIN) ? (
startEdit('settings')}>
@@ -241,9 +247,10 @@ export const StorageComponent = ({ isShowName={false} editingStorage={storage} onChanged={onStorageChanged} + user={user} /> ) : ( - + )}
@@ -261,23 +268,27 @@ export const StorageComponent = ({ {isCanManageStorages && ( <> -
diff --git a/frontend/src/features/storages/ui/StoragesComponent.tsx b/frontend/src/features/storages/ui/StoragesComponent.tsx index 8a61535..1b6312b 100644 --- a/frontend/src/features/storages/ui/StoragesComponent.tsx +++ b/frontend/src/features/storages/ui/StoragesComponent.tsx @@ -3,6 +3,7 @@ import { useEffect, useState } from 'react'; import { storageApi } from '../../../entity/storages'; import type { Storage } from '../../../entity/storages'; +import type { UserProfile } from '../../../entity/users'; import type { WorkspaceResponse } from '../../../entity/workspaces'; import { useIsMobile } from '../../../shared/hooks'; import { StorageCardComponent } from './StorageCardComponent'; @@ -10,6 +11,7 @@ import { StorageComponent } from './StorageComponent'; import { EditStorageComponent } from './edit/EditStorageComponent'; interface Props { + user: UserProfile; contentHeight: number; workspace: WorkspaceResponse; isCanManageStorages: boolean; @@ -17,7 +19,12 @@ interface Props { const SELECTED_STORAGE_STORAGE_KEY = 'selectedStorageId'; -export const StoragesComponent = ({ contentHeight, workspace, isCanManageStorages }: Props) => { +export const StoragesComponent = ({ + user, + contentHeight, + workspace, + isCanManageStorages, +}: Props) => { const isMobile = useIsMobile(); const [isLoading, setIsLoading] = useState(true); const [storages, setStorages] = useState([]); @@ -144,6 +151,7 @@ export const StoragesComponent = ({ contentHeight, workspace, isCanManageStorage loadStorages(); }} isCanManageStorages={isCanManageStorages} + user={user} />
)} @@ -170,6 +178,7 @@ export const StoragesComponent = ({ contentHeight, workspace, isCanManageStorage loadStorages(false, storage.id); setIsShowAddStorage(false); }} + user={user} /> )} diff --git a/frontend/src/features/storages/ui/edit/EditStorageComponent.tsx b/frontend/src/features/storages/ui/edit/EditStorageComponent.tsx index 2f9a020..5faf9fc 100644 --- a/frontend/src/features/storages/ui/edit/EditStorageComponent.tsx +++ b/frontend/src/features/storages/ui/edit/EditStorageComponent.tsx @@ -1,12 +1,15 @@ -import { Button, Input, Select } from 'antd'; +import { InfoCircleOutlined } from '@ant-design/icons'; +import { Button, Input, Select, Switch, Tooltip } from 'antd'; import { useEffect, useState } from 'react'; +import { IS_CLOUD } from '../../../../constants'; import { type Storage, StorageType, getStorageLogoFromType, storageApi, } from '../../../../entity/storages'; +import { type UserProfile, UserRole } from '../../../../entity/users'; import { ToastHelper } from '../../../../shared/toast'; import { EditAzureBlobStorageComponent } from './storages/EditAzureBlobStorageComponent'; import { EditFTPStorageComponent } from './storages/EditFTPStorageComponent'; @@ -26,6 +29,8 @@ interface Props { editingStorage?: Storage; onChanged: (storage: Storage) => void; + + user: UserProfile; } export function EditStorageComponent({ @@ -35,6 +40,7 @@ export function EditStorageComponent({ isShowName, editingStorage, onChanged, + user, }: Props) { const [storage, setStorage] = useState(); const [isUnsaved, setIsUnsaved] = useState(false); @@ -188,6 +194,7 @@ export function EditStorageComponent({ workspaceId, name: '', type: StorageType.LOCAL, + isSystem: false, localStorage: {}, }, ); @@ -357,6 +364,31 @@ export function EditStorageComponent({
+ {IS_CLOUD && user.role === UserRole.ADMIN && ( +
+
Is system?
+ +
+ { + setStorage({ ...storage, isSystem: checked }); + setIsUnsaved(true); + }} + size="small" + /> + + + + +
+
+ )} +
diff --git a/frontend/src/features/storages/ui/show/ShowStorageComponent.tsx b/frontend/src/features/storages/ui/show/ShowStorageComponent.tsx index be09618..3fd3ba1 100644 --- a/frontend/src/features/storages/ui/show/ShowStorageComponent.tsx +++ b/frontend/src/features/storages/ui/show/ShowStorageComponent.tsx @@ -1,6 +1,7 @@ import { type Storage, StorageType } from '../../../../entity/storages'; import { getStorageLogoFromType } from '../../../../entity/storages/models/getStorageLogoFromType'; import { getStorageNameFromType } from '../../../../entity/storages/models/getStorageNameFromType'; +import { type UserProfile, UserRole } from '../../../../entity/users'; import { ShowAzureBlobStorageComponent } from './storages/ShowAzureBlobStorageComponent'; import { ShowFTPStorageComponent } from './storages/ShowFTPStorageComponent'; import { ShowGoogleDriveStorageComponent } from './storages/ShowGoogleDriveStorageComponent'; @@ -11,9 +12,10 @@ import { ShowSFTPStorageComponent } from './storages/ShowSFTPStorageComponent'; interface Props { storage?: Storage; + user: UserProfile; } -export function ShowStorageComponent({ storage }: Props) { +export function ShowStorageComponent({ storage, user }: Props) { if (!storage) return null; return ( @@ -30,6 +32,13 @@ export function ShowStorageComponent({ storage }: Props) { />
+ {storage.isSystem && user.role === UserRole.ADMIN && ( +
+
System storage
+
Yes
+
+ )} +
{storage?.type === StorageType.S3 && }
diff --git a/frontend/src/pages/AuthPageComponent.tsx b/frontend/src/pages/AuthPageComponent.tsx index 7d2f79e..173b5c8 100644 --- a/frontend/src/pages/AuthPageComponent.tsx +++ b/frontend/src/pages/AuthPageComponent.tsx @@ -3,6 +3,7 @@ import { Spin } from 'antd'; import { useEffect, useState } from 'react'; import { userApi } from '../entity/users'; +import { PlaygroundWarningComponent } from '../features/playground'; import { AdminPasswordComponent, AuthNavbarComponent, @@ -60,6 +61,8 @@ export function AuthPageComponent() {
)} + +
); } diff --git a/frontend/src/shared/api/RateLimiter.ts b/frontend/src/shared/api/RateLimiter.ts new file mode 100644 index 0000000..6803d0c --- /dev/null +++ b/frontend/src/shared/api/RateLimiter.ts @@ -0,0 +1,36 @@ +export class RateLimiter { + private tokens: number; + private readonly queue: Array<() => void>; + + constructor( + private readonly capacity: number, + private readonly refillMs: number, + ) { + this.tokens = capacity; + this.queue = []; + + setInterval(() => { + this.tokens = this.capacity; + this.releaseQueued(); + }, this.refillMs); + } + + private releaseQueued() { + while (this.tokens > 0 && this.queue.length > 0) { + this.tokens -= 1; + const resolve = this.queue.shift(); + if (resolve) resolve(); + } + } + + async acquire(): Promise { + if (this.tokens > 0) { + this.tokens -= 1; + return; + } + + return new Promise((resolve) => { + this.queue.push(resolve); + }); + } +} diff --git a/frontend/src/shared/api/apiHelper.ts b/frontend/src/shared/api/apiHelper.ts index 8cfa34a..f3f277b 100644 --- a/frontend/src/shared/api/apiHelper.ts +++ b/frontend/src/shared/api/apiHelper.ts @@ -1,8 +1,11 @@ import { accessTokenHelper } from '.'; +import { IS_CLOUD } from '../../constants'; +import { RateLimiter } from './RateLimiter'; import RequestOptions from './RequestOptions'; -const REPEAT_TRIES_COUNT = 10; +const REPEAT_TRIES_COUNT = 30; const REPEAT_INTERVAL_MS = 3_000; +const rateLimiter = new RateLimiter(IS_CLOUD ? 5 : 30, 1_000); const handleOrThrowMessageIfResponseError = async ( url: string, @@ -41,6 +44,8 @@ const makeRequest = async ( optionsWrapper: RequestOptions, currentTry = 0, ): Promise => { + await rateLimiter.acquire(); + try { const response = await fetch(url, optionsWrapper.toRequestInit()); await handleOrThrowMessageIfResponseError(url, response); diff --git a/frontend/src/widgets/main/MainScreenComponent.tsx b/frontend/src/widgets/main/MainScreenComponent.tsx index da9ccd5..f95ccad 100644 --- a/frontend/src/widgets/main/MainScreenComponent.tsx +++ b/frontend/src/widgets/main/MainScreenComponent.tsx @@ -14,6 +14,7 @@ import { import { type WorkspaceResponse, workspaceApi } from '../../entity/workspaces'; import { DatabasesComponent } from '../../features/databases/ui/DatabasesComponent'; import { NotifiersComponent } from '../../features/notifiers/ui/NotifiersComponent'; +import { PlaygroundWarningComponent } from '../../features/playground'; import { SettingsComponent } from '../../features/settings'; import { StoragesComponent } from '../../features/storages/ui/StoragesComponent'; import { ProfileComponent } from '../../features/users'; @@ -261,7 +262,7 @@ export const MainScreenComponent = () => {
- {isLoading ? ( + {isLoading || !user ? (
} size="large" />
@@ -329,6 +330,7 @@ export const MainScreenComponent = () => { )} {selectedTab === 'storages' && selectedWorkspace && ( { workspacesCount={workspaces.length} /> )} + +
); };