From fc88b730d53d85e02c327e1b438f4c48c9944533 Mon Sep 17 00:00:00 2001 From: Rostislav Dugin Date: Tue, 17 Feb 2026 19:52:08 +0300 Subject: [PATCH] FEATURE (backups): Add metadata alongsize with backup files itself to make them recovarable without Databasus --- README.md | 44 ++-------- .../backups/backups/backuping/backuper.go | 42 ++++++++++ .../features/backups/backups/common/dto.go | 40 ++++++--- .../backups/backups/controller_test.go | 82 +++++++++++++++++++ .../usecases/mariadb/create_backup_uc.go | 4 +- .../usecases/mongodb/create_backup_uc.go | 2 + .../usecases/mysql/create_backup_uc.go | 4 +- .../usecases/postgresql/create_backup_uc.go | 4 +- 8 files changed, 171 insertions(+), 51 deletions(-) diff --git a/README.md b/README.md index eca8f94..3c6b07b 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,8 @@ - **Encryption for secrets**: Any sensitive data is encrypted and never exposed, even in logs or error messages - **Read-only user**: Databasus uses a read-only user by default for backups and never stores anything that can modify your data +It is also important for Databasus that you are able to decrypt and restore backups from storages (local, S3, etc.) without Databasus itself. To do so, read our guide on [how to recover directly from storage](https://databasus.com/how-to-recover-without-databasus). We avoid "vendor lock-in" even to open source tool! + ### 👥 **Suitable for teams** (docs) - **Workspaces**: Group databases, notifiers and storages for different projects or teams @@ -231,56 +233,22 @@ docker exec -it databasus ./main --new-password="YourNewSecurePassword123" --ema Replace `admin` with the actual email address of the user whose password you want to reset. +### 💾 Backuping Databasus itself + +After installation, it is also recommended to backup your Databasus itself or, at least, to copy secret key used for encryption (30 seconds is needed). So you are able to restore from your encrypted backups if you lose access to the server with Databasus or it is corrupted. + --- ## 📝 License This project is licensed under the Apache 2.0 License - see the [LICENSE](LICENSE) file for details ---- - ## 🤝 Contributing Contributions are welcome! Read the contributing guide for more details, priorities and rules. If you want to contribute but don't know where to start, message me on Telegram [@rostislav_dugin](https://t.me/rostislav_dugin) Also you can join our large community of developers, DBAs and DevOps engineers on Telegram [@databasus_community](https://t.me/databasus_community). --- - -## 📖 Migration guide - -Databasus is the new name for Postgresus. You can stay with latest version of Postgresus if you wish. If you want to migrate - follow installation steps for Databasus itself. - -Just renaming an image is not enough as Postgresus and Databasus use different data folders and internal database naming. - -You can put a new Databasus image with updated volume near the old Postgresus and run it (stop Postgresus before): - -``` -services: - databasus: - container_name: databasus - image: databasus/databasus:latest - ports: - - "4005:4005" - volumes: - - ./databasus-data:/databasus-data - restart: unless-stopped -``` - -Then manually move databases from Postgresus to Databasus. - -### Why was Postgresus renamed to Databasus? - -Databasus has been developed since 2023. It was internal tool to backup production and home projects databases. In start of 2025 it was released as open source project on GitHub. By the end of 2025 it became popular and the time for renaming has come in December 2025. - -It was an important step for the project to grow. Actually, there are a couple of reasons: - -1. Postgresus is no longer a little tool that just adds UI for pg_dump for little projects. It became a tool both for individual users, DevOps, DBAs, teams, companies and even large enterprises. Tens of thousands of users use Postgresus every day. Postgresus grew into a reliable backup management tool. Initial positioning is no longer suitable: the project is not just a UI wrapper, it's a solid backup management system now (despite it's still easy to use). - -2. New databases are supported: although the primary focus is PostgreSQL (with 100% support in the most efficient way) and always will be, Databasus added support for MySQL, MariaDB and MongoDB. Later more databases will be supported. - -3. Trademark issue: "postgres" is a trademark of PostgreSQL Inc. and cannot be used in the project name. So for safety and legal reasons, we had to rename the project. - ## AI disclaimer There have been questions about AI usage in project development in issues and discussions. As the project focuses on security, reliability and production usage, it's important to explain how AI is used in the development process. diff --git a/backend/internal/features/backups/backups/backuping/backuper.go b/backend/internal/features/backups/backups/backuping/backuper.go index 2f5a73f..b149e7d 100644 --- a/backend/internal/features/backups/backups/backuping/backuper.go +++ b/backend/internal/features/backups/backups/backuping/backuper.go @@ -1,7 +1,9 @@ package backuping import ( + "bytes" "context" + "encoding/json" "errors" "fmt" "log/slog" @@ -311,6 +313,13 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) { // Update backup with encryption metadata if provided if backupMetadata != nil { + backupMetadata.BackupID = backup.ID + + if err := backupMetadata.Validate(); err != nil { + n.logger.Error("Failed to validate backup metadata", "error", err) + return + } + backup.EncryptionSalt = backupMetadata.EncryptionSalt backup.EncryptionIV = backupMetadata.EncryptionIV backup.Encryption = backupMetadata.Encryption @@ -321,6 +330,39 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) { return } + // Save metadata file to storage + if backupMetadata != nil { + metadataJSON, err := json.Marshal(backupMetadata) + if err != nil { + n.logger.Error("Failed to marshal backup metadata to JSON", + "backupId", backup.ID, + "error", err, + ) + } else { + metadataReader := bytes.NewReader(metadataJSON) + metadataFileName := backup.FileName + ".metadata" + + if err := storage.SaveFile( + context.Background(), + n.fieldEncryptor, + n.logger, + metadataFileName, + metadataReader, + ); err != nil { + n.logger.Error("Failed to save backup metadata file to storage", + "backupId", backup.ID, + "fileName", metadataFileName, + "error", err, + ) + } else { + n.logger.Info("Backup metadata file saved successfully", + "backupId", backup.ID, + "fileName", metadataFileName, + ) + } + } + } + // Update database last backup time now := time.Now().UTC() if updateErr := n.databaseService.SetLastBackupTime(databaseID, now); updateErr != nil { diff --git a/backend/internal/features/backups/backups/common/dto.go b/backend/internal/features/backups/backups/common/dto.go index 3075f2b..b453622 100644 --- a/backend/internal/features/backups/backups/common/dto.go +++ b/backend/internal/features/backups/backups/common/dto.go @@ -1,17 +1,37 @@ package common -import backups_config "databasus-backend/internal/features/backups/config" +import ( + backups_config "databasus-backend/internal/features/backups/config" + "errors" -type BackupType string - -const ( - BackupTypeDefault BackupType = "DEFAULT" // For MySQL, MongoDB, PostgreSQL legacy (-Fc) - BackupTypeDirectory BackupType = "DIRECTORY" // PostgreSQL directory type (-Fd) + "github.com/google/uuid" ) type BackupMetadata struct { - EncryptionSalt *string - EncryptionIV *string - Encryption backups_config.BackupEncryption - Type BackupType + BackupID uuid.UUID `json:"backupId"` + EncryptionSalt *string `json:"encryptionSalt"` + EncryptionIV *string `json:"encryptionIV"` + Encryption backups_config.BackupEncryption `json:"encryption"` +} + +func (m *BackupMetadata) Validate() error { + if m.BackupID == uuid.Nil { + return errors.New("backup ID is required") + } + + if m.Encryption == "" { + return errors.New("encryption is required") + } + + if m.Encryption == backups_config.BackupEncryptionEncrypted { + if m.EncryptionSalt == nil { + return errors.New("encryption salt is required when encryption is enabled") + } + + if m.EncryptionIV == nil { + return errors.New("encryption IV is required when encryption is enabled") + } + } + + return nil } diff --git a/backend/internal/features/backups/backups/controller_test.go b/backend/internal/features/backups/backups/controller_test.go index e35f81d..7977105 100644 --- a/backend/internal/features/backups/backups/controller_test.go +++ b/backend/internal/features/backups/backups/controller_test.go @@ -18,6 +18,8 @@ import ( "databasus-backend/internal/config" audit_logs "databasus-backend/internal/features/audit_logs" + "databasus-backend/internal/features/backups/backups/backuping" + backups_common "databasus-backend/internal/features/backups/backups/common" backups_core "databasus-backend/internal/features/backups/backups/core" backups_download "databasus-backend/internal/features/backups/backups/download" backups_config "databasus-backend/internal/features/backups/config" @@ -1245,6 +1247,86 @@ func Test_GenerateDownloadToken_BlockedWhenDownloadInProgress(t *testing.T) { workspaces_testing.RemoveTestWorkspace(workspace, router) } +func Test_MakeBackup_VerifyBackupAndMetadataFilesExistInStorage(t *testing.T) { + router := createTestRouter() + owner := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router) + + database, _, storage := createTestDatabaseWithBackups(workspace, owner, router) + + backuperNode := backuping.CreateTestBackuperNode() + backuperCancel := backuping.StartBackuperNodeForTest(t, backuperNode) + defer backuping.StopBackuperNodeForTest(t, backuperCancel, backuperNode) + + scheduler := backuping.CreateTestScheduler() + schedulerCancel := backuping.StartSchedulerForTest(t, scheduler) + defer schedulerCancel() + + backupRepo := &backups_core.BackupRepository{} + initialBackups, err := backupRepo.FindByDatabaseID(database.ID) + assert.NoError(t, err) + + request := MakeBackupRequest{DatabaseID: database.ID} + test_utils.MakePostRequest( + t, + router, + "/api/v1/backups", + "Bearer "+owner.Token, + request, + http.StatusOK, + ) + + backuping.WaitForBackupCompletion(t, database.ID, len(initialBackups), 30*time.Second) + + backups, err := backupRepo.FindByDatabaseID(database.ID) + assert.NoError(t, err) + assert.Greater(t, len(backups), len(initialBackups)) + + backup := backups[0] + assert.Equal(t, backups_core.BackupStatusCompleted, backup.Status) + + storageService := storages.GetStorageService() + backupStorage, err := storageService.GetStorageByID(backup.StorageID) + assert.NoError(t, err) + + encryptor := encryption.GetFieldEncryptor() + + backupFile, err := backupStorage.GetFile(encryptor, backup.FileName) + assert.NoError(t, err) + backupFile.Close() + + metadataFile, err := backupStorage.GetFile(encryptor, backup.FileName+".metadata") + assert.NoError(t, err) + + metadataContent, err := io.ReadAll(metadataFile) + assert.NoError(t, err) + metadataFile.Close() + + var storageMetadata backups_common.BackupMetadata + err = json.Unmarshal(metadataContent, &storageMetadata) + assert.NoError(t, err) + + assert.Equal(t, backup.ID, storageMetadata.BackupID) + + if backup.EncryptionSalt != nil && storageMetadata.EncryptionSalt != nil { + assert.Equal(t, *backup.EncryptionSalt, *storageMetadata.EncryptionSalt) + } + + if backup.EncryptionIV != nil && storageMetadata.EncryptionIV != nil { + assert.Equal(t, *backup.EncryptionIV, *storageMetadata.EncryptionIV) + } + + assert.Equal(t, backup.Encryption, storageMetadata.Encryption) + + err = backupRepo.DeleteByID(backup.ID) + assert.NoError(t, err) + + databases.RemoveTestDatabase(database) + time.Sleep(50 * time.Millisecond) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) +} + func createTestRouter() *gin.Engine { return CreateTestRouter() } diff --git a/backend/internal/features/backups/backups/usecases/mariadb/create_backup_uc.go b/backend/internal/features/backups/backups/usecases/mariadb/create_backup_uc.go index b7fd5e8..0914ac4 100644 --- a/backend/internal/features/backups/backups/usecases/mariadb/create_backup_uc.go +++ b/backend/internal/features/backups/backups/usecases/mariadb/create_backup_uc.go @@ -427,7 +427,9 @@ func (uc *CreateMariadbBackupUsecase) setupBackupEncryption( backupConfig *backups_config.BackupConfig, storageWriter io.WriteCloser, ) (io.Writer, *backup_encryption.EncryptionWriter, common.BackupMetadata, error) { - metadata := common.BackupMetadata{} + metadata := common.BackupMetadata{ + BackupID: backupID, + } if backupConfig.Encryption != backups_config.BackupEncryptionEncrypted { metadata.Encryption = backups_config.BackupEncryptionNone diff --git a/backend/internal/features/backups/backups/usecases/mongodb/create_backup_uc.go b/backend/internal/features/backups/backups/usecases/mongodb/create_backup_uc.go index e3184db..a2787af 100644 --- a/backend/internal/features/backups/backups/usecases/mongodb/create_backup_uc.go +++ b/backend/internal/features/backups/backups/usecases/mongodb/create_backup_uc.go @@ -269,6 +269,7 @@ func (uc *CreateMongodbBackupUsecase) setupBackupEncryption( storageWriter io.WriteCloser, ) (io.Writer, *backup_encryption.EncryptionWriter, common.BackupMetadata, error) { backupMetadata := common.BackupMetadata{ + BackupID: backupID, Encryption: backups_config.BackupEncryptionNone, } @@ -305,6 +306,7 @@ func (uc *CreateMongodbBackupUsecase) setupBackupEncryption( saltBase64 := base64.StdEncoding.EncodeToString(salt) nonceBase64 := base64.StdEncoding.EncodeToString(nonce) + backupMetadata.BackupID = backupID backupMetadata.Encryption = backups_config.BackupEncryptionEncrypted backupMetadata.EncryptionSalt = &saltBase64 backupMetadata.EncryptionIV = &nonceBase64 diff --git a/backend/internal/features/backups/backups/usecases/mysql/create_backup_uc.go b/backend/internal/features/backups/backups/usecases/mysql/create_backup_uc.go index 4e42450..7350b69 100644 --- a/backend/internal/features/backups/backups/usecases/mysql/create_backup_uc.go +++ b/backend/internal/features/backups/backups/usecases/mysql/create_backup_uc.go @@ -438,7 +438,9 @@ func (uc *CreateMysqlBackupUsecase) setupBackupEncryption( backupConfig *backups_config.BackupConfig, storageWriter io.WriteCloser, ) (io.Writer, *backup_encryption.EncryptionWriter, common.BackupMetadata, error) { - metadata := common.BackupMetadata{} + metadata := common.BackupMetadata{ + BackupID: backupID, + } if backupConfig.Encryption != backups_config.BackupEncryptionEncrypted { metadata.Encryption = backups_config.BackupEncryptionNone diff --git a/backend/internal/features/backups/backups/usecases/postgresql/create_backup_uc.go b/backend/internal/features/backups/backups/usecases/postgresql/create_backup_uc.go index bb06291..be1dc7a 100644 --- a/backend/internal/features/backups/backups/usecases/postgresql/create_backup_uc.go +++ b/backend/internal/features/backups/backups/usecases/postgresql/create_backup_uc.go @@ -482,7 +482,9 @@ func (uc *CreatePostgresqlBackupUsecase) setupBackupEncryption( backupConfig *backups_config.BackupConfig, storageWriter io.WriteCloser, ) (io.Writer, *backup_encryption.EncryptionWriter, common.BackupMetadata, error) { - metadata := common.BackupMetadata{} + metadata := common.BackupMetadata{ + BackupID: backupID, + } if backupConfig.Encryption != backups_config.BackupEncryptionEncrypted { metadata.Encryption = backups_config.BackupEncryptionNone