Compare commits

...

32 Commits

Author SHA1 Message Date
Rostislav Dugin
1697bfbae8 Merge pull request #202 from databasus/develop
Develop
2026-01-02 12:34:58 +03:00
Rostislav Dugin
205cb1ec02 FEATURE (restores): Validate there is enough disk space on restore 2026-01-02 12:33:31 +03:00
Rostislav Dugin
b9668875ef FIX (mongodb): Fix MongoDB build for ARM 2026-01-02 12:21:02 +03:00
Rostislav Dugin
ca3f0281a3 FIX (temp folders): Improve temp folders cleanup over backups and restores 2026-01-02 12:09:43 +03:00
Rostislav Dugin
1b8d783d4e FIX (temp): Add NAS temp directory to .gitignore 2026-01-02 11:50:08 +03:00
Rostislav Dugin
75b0477874 FIX (temp): Remove temp directory for NAS 2026-01-02 11:49:26 +03:00
Rostislav Dugin
19533514c2 FEATURE (postgresql): Move to directory format to speed up parallel backups 2026-01-02 11:46:15 +03:00
github-actions[bot]
b3c3ef136f Update CITATION.cff to v2.18.6 2026-01-01 19:11:46 +00:00
Rostislav Dugin
4a2ada384e Merge pull request #196 from databasus/develop
FIX (assets): Add square logos
2026-01-01 21:51:30 +03:00
Rostislav Dugin
b4fc0cfb56 FIX (assets): Add square logos 2026-01-01 21:51:04 +03:00
github-actions[bot]
a8fca1943b Update CITATION.cff to v2.18.5 2025-12-30 15:37:44 +00:00
Rostislav Dugin
880b635827 Merge pull request #192 from databasus/develop
Develop
2025-12-30 18:17:22 +03:00
Rostislav Dugin
67c14cfa89 FIX (backups): Fix extension when downloading backup depending on compression type 2025-12-30 18:15:49 +03:00
Rostislav Dugin
428a87ae84 FIX (s3): Calculate checksum over streaming to S3 chunk by chunk 2025-12-30 18:12:35 +03:00
github-actions[bot]
1f1e22e69c Update CITATION.cff to v2.18.4 2025-12-30 13:07:47 +00:00
Rostislav Dugin
c325d42b89 Merge pull request #191 from databasus/develop
Develop
2025-12-30 15:46:51 +03:00
Rostislav Dugin
04a19cead1 FIX (readme): Update readme 2025-12-30 15:45:57 +03:00
Rostislav Dugin
648c315312 FIX (readme): Update README 2025-12-30 15:40:54 +03:00
github-actions[bot]
3a205c2f1d Update CITATION.cff to v2.18.3 2025-12-29 17:57:56 +00:00
Rostislav Dugin
49ebb01ffd Merge pull request #186 from databasus/develop
Develop
2025-12-29 20:36:39 +03:00
Rostislav Dugin
e957fb67dd FIX (s3): Include checksum over file upload 2025-12-29 20:35:10 +03:00
Rostislav Dugin
7cda83122a FIX (read-only): Use read-only user via frontend for MariaDB and MongoDB after creation 2025-12-29 20:31:27 +03:00
github-actions[bot]
11195d9078 Update CITATION.cff to v2.18.2 2025-12-29 15:27:04 +00:00
Rostislav Dugin
64d7a12f9f Merge pull request #184 from databasus/develop
Develop
2025-12-29 15:48:09 +03:00
Rostislav Dugin
9853ac425a FIX (sftp): Fix initial value in case of private key 2025-12-29 15:47:00 +03:00
Rostislav Dugin
6ad38228ce Merge pull request #182 from m4tt72/fix/sftp-storage-auth-method-radio-selection
fix(storages): SFTP auth method radio button now correctly switches to Private Key
2025-12-29 15:44:22 +03:00
Rostislav Dugin
7d576b50a9 Merge pull request #183 from databasus/main
Merge changes to develop
2025-12-29 15:43:01 +03:00
Rostislav Dugin
db3bd98425 FIX (readme): Fix installation methods 2025-12-29 15:41:22 +03:00
Yassine Fathi
7d8d0846cb fix(storages): SFTP auth method radio button now correctly switches to Private Key 2025-12-29 12:10:00 +01:00
github-actions[bot]
05540a8d8d Update CITATION.cff to v2.18.1 2025-12-28 17:14:24 +00:00
Rostislav Dugin
8250db9ce5 FIX (readme): Add AI disclaimer 2025-12-28 19:49:16 +03:00
github-actions[bot]
1e8cc46672 Update CITATION.cff to v2.18.0 2025-12-27 20:21:44 +00:00
47 changed files with 1100 additions and 342 deletions

View File

@@ -32,5 +32,5 @@ keywords:
- mongodb
- mariadb
license: Apache-2.0
version: 2.17.0
date-released: "2025-12-27"
version: 2.18.6
date-released: "2026-01-01"

View File

@@ -172,19 +172,23 @@ RUN if [ "$TARGETARCH" = "amd64" ]; then \
# ========= Install MongoDB Database Tools =========
# Note: MongoDB Database Tools are backward compatible - single version supports all server versions (4.0-8.0)
# Use dpkg with apt-get -f install to handle dependencies
# Note: For ARM64, we use Ubuntu 22.04 package as MongoDB doesn't provide Debian 12 ARM64 packages
RUN apt-get update && \
if [ "$TARGETARCH" = "amd64" ]; then \
wget -q https://fastdl.mongodb.org/tools/db/mongodb-database-tools-debian12-x86_64-100.10.0.deb -O /tmp/mongodb-database-tools.deb; \
elif [ "$TARGETARCH" = "arm64" ]; then \
wget -q https://fastdl.mongodb.org/tools/db/mongodb-database-tools-debian12-aarch64-100.10.0.deb -O /tmp/mongodb-database-tools.deb; \
wget -q https://fastdl.mongodb.org/tools/db/mongodb-database-tools-ubuntu2204-arm64-100.10.0.deb -O /tmp/mongodb-database-tools.deb; \
fi && \
dpkg -i /tmp/mongodb-database-tools.deb || true && \
apt-get install -f -y --no-install-recommends && \
rm /tmp/mongodb-database-tools.deb && \
dpkg -i /tmp/mongodb-database-tools.deb || apt-get install -f -y --no-install-recommends && \
rm -f /tmp/mongodb-database-tools.deb && \
rm -rf /var/lib/apt/lists/* && \
ln -sf /usr/bin/mongodump /usr/local/mongodb-database-tools/bin/mongodump && \
ln -sf /usr/bin/mongorestore /usr/local/mongodb-database-tools/bin/mongorestore
mkdir -p /usr/local/mongodb-database-tools/bin && \
if [ -f /usr/bin/mongodump ]; then \
ln -sf /usr/bin/mongodump /usr/local/mongodb-database-tools/bin/mongodump; \
fi && \
if [ -f /usr/bin/mongorestore ]; then \
ln -sf /usr/bin/mongorestore /usr/local/mongodb-database-tools/bin/mongorestore; \
fi
# Create postgres user and set up directories
RUN useradd -m -s /bin/bash postgres || true && \

View File

@@ -114,7 +114,7 @@ You have four ways to install Databasus:
## 📦 Installation
You have three ways to install Databasus: automated script (recommended), simple Docker run, or Docker Compose setup.
You have four ways to install Databasus: automated script (recommended), simple Docker run, or Docker Compose setup.
### Option 1: Automated installation script (recommended, Linux only)
@@ -245,6 +245,8 @@ This project is licensed under the Apache 2.0 License - see the [LICENSE](LICENS
Contributions are welcome! Read the <a href="https://databasus.com/contribute">contributing guide</a> for more details, priorities and rules. If you want to contribute but don't know where to start, message me on Telegram [@rostislav_dugin](https://t.me/rostislav_dugin)
Also you can join our large community of developers, DBAs and DevOps engineers on Telegram [@databasus_community](https://t.me/databasus_community).
--
## 📖 Migration guide
@@ -271,6 +273,8 @@ Then manually move databases from Postgresus to Databasus.
### Why was Postgresus renamed to Databasus?
Databasus has been developed since 2023. It was internal tool to backup production and home projects databases. In start of 2025 it was released as open source project on GitHub. By the end of 2025 it became popular and the time for renaming has come in December 2025.
It was an important step for the project to grow. Actually, there are a couple of reasons:
1. Postgresus is no longer a little tool that just adds UI for pg_dump for little projects. It became a tool both for individual users, DevOps, DBAs, teams, companies and even large enterprises. Tens of thousands of users use Postgresus every day. Postgresus grew into a reliable backup management tool. Initial positioning is no longer suitable: the project is not just a UI wrapper, it's a solid backup management system now (despite it's still easy to use).
@@ -278,3 +282,35 @@ It was an important step for the project to grow. Actually, there are a couple o
2. New databases are supported: although the primary focus is PostgreSQL (with 100% support in the most efficient way) and always will be, Databasus added support for MySQL, MariaDB and MongoDB. Later more databases will be supported.
3. Trademark issue: "postgres" is a trademark of PostgreSQL Inc. and cannot be used in the project name. So for safety and legal reasons, we had to rename the project.
## AI disclaimer
There have been questions about AI usage in project development in issues and discussions. As the project focuses on security, reliability and production usage, it's important to explain how AI is used in the development process.
AI is used as a helper for:
- verification of code quality and searching for vulnerabilities
- cleaning up and improving documentation, comments and code
- assistance during development
- double-checking PRs and commits after human review
AI is not used for:
- writing entire code
- "vibe code" approach
- code without line-by-line verification by a human
- code without tests
The project has:
- solid test coverage (both unit and integration tests)
- CI/CD pipeline automation with tests and linting to ensure code quality
- verification by experienced developers with experience in large and secure projects
So AI is just an assistant and a tool for developers to increase productivity and ensure code quality. The work is done by developers.
Moreover, it's important to note that we do not differentiate between bad human code and AI vibe code. There are strict requirements for any code to be merged to keep the codebase maintainable.
Even if code is written manually by a human, it's not guaranteed to be merged. Vibe code is not allowed at all and all such PRs are rejected by default (see [contributing guide](https://databasus.com/contribute)).
We also draw attention to fast issue resolution and security [vulnerability reporting](https://github.com/databasus/databasus?tab=security-ov-file#readme).

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 767 KiB

After

Width:  |  Height:  |  Size: 766 KiB

BIN
assets/logo-square.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

12
assets/logo-square.svg Normal file
View File

@@ -0,0 +1,12 @@
<svg width="128" height="128" viewBox="0 0 128 128" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_287_1020)">
<path d="M50.1522 115.189C50.1522 121.189 57.1564 121.193 59 118C60.1547 116 61 114 61 108C61 102 58.1044 96.9536 55.3194 91.5175C54.6026 90.1184 53.8323 88.6149 53.0128 86.9234C51.6073 84.0225 49.8868 81.3469 47.3885 79.2139C47.0053 78.8867 46.8935 78.0093 46.9624 77.422C47.2351 75.1036 47.5317 72.7876 47.8283 70.4718C48.3186 66.6436 48.8088 62.8156 49.1909 58.9766C49.459 56.2872 49.4542 53.5119 49.1156 50.8329C48.3833 45.0344 45.1292 40.7783 40.1351 37.9114C38.6818 37.0771 38.2533 36.1455 38.4347 34.5853C38.9402 30.2473 40.6551 26.3306 42.8342 22.6642C44.8356 19.297 47.1037 16.0858 49.3676 12.8804C49.6576 12.4699 49.9475 12.0594 50.2367 11.6488C50.6069 11.1231 51.5231 10.7245 52.1971 10.7075C60.4129 10.5017 68.6303 10.3648 76.8477 10.2636C77.4123 10.2563 78.1584 10.5196 78.5221 10.9246C83.6483 16.634 88.2284 22.712 90.9778 29.9784C91.1658 30.4758 91.3221 30.9869 91.4655 31.4997C92.4976 35.1683 92.4804 35.1803 89.5401 37.2499L89.4071 37.3436C83.8702 41.2433 81.8458 46.8198 82.0921 53.349C82.374 60.8552 84.0622 68.1313 85.9869 75.3539C86.3782 76.8218 86.6318 77.9073 85.2206 79.2609C82.3951 81.9698 81.2196 85.6872 80.6575 89.4687C80.0724 93.4081 79.599 97.3637 79.1254 101.32C78.8627 103.515 78.8497 105.368 78.318 107.904C76.2819 117.611 71 128 63 128H50.1522C45 128 41 123.189 41 115.189H50.1522Z" fill="#155DFC"/>
<path d="M46.2429 6.56033C43.3387 11.1 40.3642 15.4031 37.7614 19.9209C35.413 23.9964 33.8487 28.4226 33.0913 33.1211C32.0998 39.2728 33.694 44.7189 38.0765 48.9775C41.6846 52.4835 42.6153 56.4472 42.152 61.1675C41.1426 71.4587 39.1174 81.5401 36.2052 91.4522C36.1769 91.5477 36.0886 91.6255 35.8974 91.8977C34.1517 91.3525 32.3161 90.8446 30.5266 90.2095C5.53011 81.3376 -12.7225 64.953 -24.1842 41.0298C-25.175 38.9625 -26.079 36.8498 -26.9263 34.7202C-27.0875 34.3151 -26.9749 33.5294 -26.6785 33.2531C-17.1479 24.3723 -7.64007 15.4647 2.00468 6.70938C8.64568 0.681612 16.5812 -1.21558 25.2457 0.739942C31.9378 2.24992 38.5131 4.27834 45.1363 6.09048C45.5843 6.2128 45.9998 6.45502 46.2429 6.56033Z" fill="#155DFC"/>
<path d="M96.9586 89.3257C95.5888 84.7456 94.0796 80.4011 93.0111 75.9514C91.6065 70.0978 90.4683 64.1753 89.3739 58.2529C88.755 54.9056 89.3998 51.8176 91.89 49.2108C98.2669 42.5358 98.3933 34.7971 95.3312 26.7037C92.7471 19.8739 88.593 13.9904 83.7026 8.60904C83.1298 7.9788 82.5693 7.33641 81.918 6.60491C82.2874 6.40239 82.5709 6.18773 82.8909 6.07999C90.1281 3.64085 97.4495 1.54842 105.041 0.488845C112.781 -0.591795 119.379 1.81818 125.045 6.97592C130.017 11.5018 134.805 16.2327 139.812 20.7188C143.822 24.3115 148.013 27.7066 152.19 31.1073C152.945 31.7205 153.137 32.2154 152.913 33.1041C149.059 48.4591 141.312 61.4883 129.457 71.9877C120.113 80.2626 109.35 85.9785 96.9586 89.3265V89.3257Z" fill="#155DFC"/>
</g>
<defs>
<clipPath id="clip0_287_1020">
<rect width="128" height="128" rx="6" fill="white"/>
</clipPath>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 3.0 KiB

17
assets/tools/README.md Normal file
View File

@@ -0,0 +1,17 @@
We keep binaries here to speed up CI \ CD tasks and building.
Docker image needs:
- PostgreSQL client tools (versions 12-18)
- MySQL client tools (versions 5.7, 8.0, 8.4, 9)
- MariaDB client tools (versions 10.6, 12.1)
- MongoDB Database Tools (latest)
For the most of tools, we need a couple of binaries for each version. However, if we download them on each run, it will download a couple of GBs each time.
So, for speed up we keep only required executables (like pg_dump, mysqldump, mariadb-dump, mongodump, etc.).
It takes:
- ~ 100MB for ARM
- ~ 100MB for x64
Instead of GBs. See Dockefile for usage details.

3
backend/.gitignore vendored
View File

@@ -16,4 +16,5 @@ databasus-backend.exe
ui/build/*
pgdata-for-restore/
temp/
cmd.exe
cmd.exe
temp/

View File

@@ -0,0 +1,17 @@
package common
import backups_config "databasus-backend/internal/features/backups/config"
type BackupType string
const (
BackupTypeDefault BackupType = "DEFAULT" // For MySQL, MongoDB, PostgreSQL legacy (-Fc)
BackupTypeDirectory BackupType = "DIRECTORY" // PostgreSQL directory type (-Fd)
)
type BackupMetadata struct {
EncryptionSalt *string
EncryptionIV *string
Encryption backups_config.BackupEncryption
Type BackupType
}

View File

@@ -1,6 +1,7 @@
package backups
import (
"databasus-backend/internal/features/backups/backups/common"
"databasus-backend/internal/features/databases"
users_middleware "databasus-backend/internal/features/users/middleware"
"fmt"
@@ -182,7 +183,7 @@ func (c *BackupController) GetFile(ctx *gin.Context) {
return
}
fileReader, dbType, err := c.backupService.GetBackupFile(user, id)
fileReader, backup, database, err := c.backupService.GetBackupFile(user, id)
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
@@ -193,15 +194,12 @@ func (c *BackupController) GetFile(ctx *gin.Context) {
}
}()
extension := ".dump.zst"
if dbType == databases.DatabaseTypeMysql {
extension = ".sql.zst"
}
filename := c.generateBackupFilename(backup, database)
ctx.Header("Content-Type", "application/octet-stream")
ctx.Header(
"Content-Disposition",
fmt.Sprintf("attachment; filename=\"backup_%s%s\"", id.String(), extension),
fmt.Sprintf("attachment; filename=\"%s\"", filename),
)
_, err = io.Copy(ctx.Writer, fileReader)
@@ -214,3 +212,66 @@ func (c *BackupController) GetFile(ctx *gin.Context) {
type MakeBackupRequest struct {
DatabaseID uuid.UUID `json:"database_id" binding:"required"`
}
func (c *BackupController) generateBackupFilename(
backup *Backup,
database *databases.Database,
) string {
// Format timestamp as YYYY-MM-DD_HH-mm-ss
timestamp := backup.CreatedAt.Format("2006-01-02_15-04-05")
// Sanitize database name for filename (replace spaces and special chars)
safeName := sanitizeFilename(database.Name)
// Determine extension based on database type and backup type
extension := c.getBackupExtension(database.Type, backup.Type)
return fmt.Sprintf("%s_backup_%s%s", safeName, timestamp, extension)
}
func (c *BackupController) getBackupExtension(
dbType databases.DatabaseType,
backupType common.BackupType,
) string {
switch dbType {
case databases.DatabaseTypeMysql, databases.DatabaseTypeMariadb:
return ".sql.zst"
case databases.DatabaseTypePostgres:
// For PostgreSQL, use .tar for directory type, .dump for custom type
if backupType == common.BackupTypeDirectory {
return ".tar"
}
return ".dump"
case databases.DatabaseTypeMongodb:
return ".archive"
default:
return ".backup"
}
}
func sanitizeFilename(name string) string {
// Replace characters that are invalid in filenames
replacer := map[rune]rune{
' ': '_',
'/': '-',
'\\': '-',
':': '-',
'*': '-',
'?': '-',
'"': '-',
'<': '-',
'>': '-',
'|': '-',
}
result := make([]rune, 0, len(name))
for _, char := range name {
if replacement, exists := replacer[char]; exists {
result = append(result, replacement)
} else {
result = append(result, char)
}
}
return string(result)
}

View File

@@ -16,6 +16,7 @@ import (
"github.com/stretchr/testify/assert"
audit_logs "databasus-backend/internal/features/audit_logs"
common "databasus-backend/internal/features/backups/backups/common"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/databases/databases/postgresql"
@@ -494,6 +495,123 @@ func Test_DownloadBackup_AuditLogWritten(t *testing.T) {
assert.True(t, found, "Audit log for backup download not found")
}
func Test_DownloadBackup_ProperFilenameForPostgreSQL(t *testing.T) {
tests := []struct {
name string
databaseName string
backupType string
expectedExt string
expectedInName string
}{
{
name: "PostgreSQL with directory type",
databaseName: "my_postgres_db",
backupType: "DIRECTORY",
expectedExt: ".tar",
expectedInName: "my_postgres_db_backup_",
},
{
name: "PostgreSQL with default type",
databaseName: "my_postgres_db",
backupType: "DEFAULT",
expectedExt: ".dump",
expectedInName: "my_postgres_db_backup_",
},
{
name: "Database name with spaces",
databaseName: "my test db",
backupType: "DIRECTORY",
expectedExt: ".tar",
expectedInName: "my_test_db_backup_",
},
{
name: "Database name with special characters",
databaseName: "my:db/test",
backupType: "DEFAULT",
expectedExt: ".dump",
expectedInName: "my-db-test_backup_",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
database := createTestDatabase(tt.databaseName, workspace.ID, owner.Token, router)
storage := createTestStorage(workspace.ID)
configService := backups_config.GetBackupConfigService()
config, err := configService.GetBackupConfigByDbId(database.ID)
assert.NoError(t, err)
config.IsBackupsEnabled = true
config.StorageID = &storage.ID
config.Storage = storage
_, err = configService.SaveBackupConfig(config)
assert.NoError(t, err)
backup := createTestBackupWithType(database, owner, tt.backupType)
resp := test_utils.MakeGetRequest(
t,
router,
fmt.Sprintf("/api/v1/backups/%s/file", backup.ID.String()),
"Bearer "+owner.Token,
http.StatusOK,
)
contentDisposition := resp.Headers.Get("Content-Disposition")
assert.NotEmpty(t, contentDisposition, "Content-Disposition header should be present")
// Verify the filename contains expected parts
assert.Contains(
t,
contentDisposition,
tt.expectedInName,
"Filename should contain sanitized database name",
)
assert.Contains(
t,
contentDisposition,
tt.expectedExt,
"Filename should have correct extension",
)
assert.Contains(t, contentDisposition, "attachment", "Should be an attachment")
// Verify timestamp format (YYYY-MM-DD_HH-mm-ss)
assert.Regexp(
t,
`\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}`,
contentDisposition,
"Filename should contain timestamp",
)
})
}
}
func Test_SanitizeFilename(t *testing.T) {
tests := []struct {
input string
expected string
}{
{input: "simple_name", expected: "simple_name"},
{input: "name with spaces", expected: "name_with_spaces"},
{input: "name/with\\slashes", expected: "name-with-slashes"},
{input: "name:with*special?chars", expected: "name-with-special-chars"},
{input: "name<with>pipes|", expected: "name-with-pipes-"},
{input: `name"with"quotes`, expected: "name-with-quotes"},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
result := sanitizeFilename(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}
func Test_CancelBackup_InProgressBackup_SuccessfullyCancelled(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
@@ -709,3 +827,20 @@ func createTestBackup(
return backup
}
func createTestBackupWithType(
database *databases.Database,
owner *users_dto.SignInResponseDTO,
backupType string,
) *Backup {
backup := createTestBackup(database, owner)
// Update the format field
repo := &BackupRepository{}
backup.Type = common.BackupType(backupType)
if err := repo.Save(backup); err != nil {
panic(err)
}
return backup
}

View File

@@ -3,7 +3,7 @@ package backups
import (
"context"
usecases_common "databasus-backend/internal/features/backups/backups/usecases/common"
usecases_common "databasus-backend/internal/features/backups/backups/common"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/notifiers"

View File

@@ -1,6 +1,7 @@
package backups
import (
common "databasus-backend/internal/features/backups/backups/common"
backups_config "databasus-backend/internal/features/backups/config"
"time"
@@ -24,5 +25,6 @@ type Backup struct {
EncryptionIV *string `json:"-" gorm:"column:encryption_iv"`
Encryption backups_config.BackupEncryption `json:"encryption" gorm:"column:encryption;type:text;not null;default:'NONE'"`
CreatedAt time.Time `json:"createdAt" gorm:"column:created_at"`
Type common.BackupType `json:"type" gorm:"column:type;type:text;not null;default:'DEFAULT'"`
CreatedAt time.Time `json:"createdAt" gorm:"column:created_at"`
}

View File

@@ -343,6 +343,7 @@ func (s *BackupService) MakeBackup(databaseID uuid.UUID, isLastTry bool) {
backup.EncryptionSalt = backupMetadata.EncryptionSalt
backup.EncryptionIV = backupMetadata.EncryptionIV
backup.Encryption = backupMetadata.Encryption
backup.Type = backupMetadata.Type
}
if err := s.backupRepository.Save(backup); err != nil {
@@ -502,19 +503,19 @@ func (s *BackupService) CancelBackup(
func (s *BackupService) GetBackupFile(
user *users_models.User,
backupID uuid.UUID,
) (io.ReadCloser, databases.DatabaseType, error) {
) (io.ReadCloser, *Backup, *databases.Database, error) {
backup, err := s.backupRepository.FindByID(backupID)
if err != nil {
return nil, "", err
return nil, nil, nil, err
}
database, err := s.databaseService.GetDatabaseByID(backup.DatabaseID)
if err != nil {
return nil, "", err
return nil, nil, nil, err
}
if database.WorkspaceID == nil {
return nil, "", errors.New("cannot download backup for database without workspace")
return nil, nil, nil, errors.New("cannot download backup for database without workspace")
}
canAccess, _, err := s.workspaceService.CanUserAccessWorkspace(
@@ -522,10 +523,12 @@ func (s *BackupService) GetBackupFile(
user,
)
if err != nil {
return nil, "", err
return nil, nil, nil, err
}
if !canAccess {
return nil, "", errors.New("insufficient permissions to download backup for this database")
return nil, nil, nil, errors.New(
"insufficient permissions to download backup for this database",
)
}
s.auditLogService.WriteAuditLog(
@@ -540,10 +543,10 @@ func (s *BackupService) GetBackupFile(
reader, err := s.getBackupReader(backupID)
if err != nil {
return nil, "", err
return nil, nil, nil, err
}
return reader, database.Type, nil
return reader, backup, database, nil
}
func (s *BackupService) deleteBackup(backup *Backup) error {

View File

@@ -7,7 +7,7 @@ import (
"testing"
"time"
"databasus-backend/internal/features/backups/backups/usecases/common"
common "databasus-backend/internal/features/backups/backups/common"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
encryption_secrets "databasus-backend/internal/features/encryption/secrets"

View File

@@ -1,9 +0,0 @@
package common
import backups_config "databasus-backend/internal/features/backups/config"
type BackupMetadata struct {
EncryptionSalt *string
EncryptionIV *string
Encryption backups_config.BackupEncryption
}

View File

@@ -4,7 +4,7 @@ import (
"context"
"errors"
usecases_common "databasus-backend/internal/features/backups/backups/usecases/common"
common "databasus-backend/internal/features/backups/backups/common"
usecases_mariadb "databasus-backend/internal/features/backups/backups/usecases/mariadb"
usecases_mongodb "databasus-backend/internal/features/backups/backups/usecases/mongodb"
usecases_mysql "databasus-backend/internal/features/backups/backups/usecases/mysql"
@@ -30,7 +30,7 @@ func (uc *CreateBackupUsecase) Execute(
database *databases.Database,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*usecases_common.BackupMetadata, error) {
) (*common.BackupMetadata, error) {
switch database.Type {
case databases.DatabaseTypePostgres:
return uc.CreatePostgresqlBackupUsecase.Execute(

View File

@@ -18,8 +18,8 @@ import (
"github.com/klauspost/compress/zstd"
"databasus-backend/internal/config"
common "databasus-backend/internal/features/backups/backups/common"
backup_encryption "databasus-backend/internal/features/backups/backups/encryption"
usecases_common "databasus-backend/internal/features/backups/backups/usecases/common"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
mariadbtypes "databasus-backend/internal/features/databases/databases/mariadb"
@@ -30,7 +30,7 @@ import (
)
const (
backupTimeout = 23 * time.Hour
backupTimeout = 6 * time.Hour
shutdownCheckInterval = 1 * time.Second
copyBufferSize = 8 * 1024 * 1024
progressReportIntervalMB = 1.0
@@ -57,7 +57,7 @@ func (uc *CreateMariadbBackupUsecase) Execute(
db *databases.Database,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*usecases_common.BackupMetadata, error) {
) (*common.BackupMetadata, error) {
uc.logger.Info(
"Creating MariaDB backup via mariadb-dump",
"databaseId", db.ID,
@@ -140,7 +140,7 @@ func (uc *CreateMariadbBackupUsecase) streamToStorage(
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
mdbConfig *mariadbtypes.MariadbDatabase,
) (*usecases_common.BackupMetadata, error) {
) (*common.BackupMetadata, error) {
uc.logger.Info("Streaming MariaDB backup to storage", "mariadbBin", mariadbBin)
ctx, cancel := uc.createBackupContext(parentCtx)
@@ -196,7 +196,7 @@ func (uc *CreateMariadbBackupUsecase) streamToStorage(
if err != nil {
return nil, fmt.Errorf("failed to create zstd writer: %w", err)
}
countingWriter := usecases_common.NewCountingWriter(zstdWriter)
countingWriter := common.NewCountingWriter(zstdWriter)
saveErrCh := make(chan error, 1)
go func() {
@@ -264,7 +264,7 @@ func (uc *CreateMariadbBackupUsecase) createTempMyCnfFile(
mdbConfig *mariadbtypes.MariadbDatabase,
password string,
) (string, error) {
tempDir, err := os.MkdirTemp("", "mycnf")
tempDir, err := os.MkdirTemp(config.GetEnv().TempFolder, "mycnf_"+uuid.New().String())
if err != nil {
return "", fmt.Errorf("failed to create temp directory: %w", err)
}
@@ -401,8 +401,8 @@ func (uc *CreateMariadbBackupUsecase) setupBackupEncryption(
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
storageWriter io.WriteCloser,
) (io.Writer, *backup_encryption.EncryptionWriter, usecases_common.BackupMetadata, error) {
metadata := usecases_common.BackupMetadata{}
) (io.Writer, *backup_encryption.EncryptionWriter, common.BackupMetadata, error) {
metadata := common.BackupMetadata{}
if backupConfig.Encryption != backups_config.BackupEncryptionEncrypted {
metadata.Encryption = backups_config.BackupEncryptionNone

View File

@@ -15,8 +15,8 @@ import (
"github.com/google/uuid"
"databasus-backend/internal/config"
common "databasus-backend/internal/features/backups/backups/common"
backup_encryption "databasus-backend/internal/features/backups/backups/encryption"
usecases_common "databasus-backend/internal/features/backups/backups/usecases/common"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
mongodbtypes "databasus-backend/internal/features/databases/databases/mongodb"
@@ -27,7 +27,7 @@ import (
)
const (
backupTimeout = 23 * time.Hour
backupTimeout = 6 * time.Hour
shutdownCheckInterval = 1 * time.Second
copyBufferSize = 8 * 1024 * 1024
progressReportIntervalMB = 1.0
@@ -51,7 +51,7 @@ func (uc *CreateMongodbBackupUsecase) Execute(
db *databases.Database,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*usecases_common.BackupMetadata, error) {
) (*common.BackupMetadata, error) {
uc.logger.Info(
"Creating MongoDB backup via mongodump",
"databaseId", db.ID,
@@ -124,7 +124,7 @@ func (uc *CreateMongodbBackupUsecase) streamToStorage(
args []string,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*usecases_common.BackupMetadata, error) {
) (*common.BackupMetadata, error) {
uc.logger.Info("Streaming MongoDB backup to storage", "mongodumpBin", mongodumpBin)
ctx, cancel := uc.createBackupContext(parentCtx)
@@ -175,7 +175,7 @@ func (uc *CreateMongodbBackupUsecase) streamToStorage(
return nil, err
}
countingWriter := usecases_common.NewCountingWriter(finalWriter)
countingWriter := common.NewCountingWriter(finalWriter)
saveErrCh := make(chan error, 1)
go func() {
@@ -264,8 +264,8 @@ func (uc *CreateMongodbBackupUsecase) setupBackupEncryption(
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
storageWriter io.WriteCloser,
) (io.Writer, *backup_encryption.EncryptionWriter, usecases_common.BackupMetadata, error) {
backupMetadata := usecases_common.BackupMetadata{
) (io.Writer, *backup_encryption.EncryptionWriter, common.BackupMetadata, error) {
backupMetadata := common.BackupMetadata{
Encryption: backups_config.BackupEncryptionNone,
}

View File

@@ -18,8 +18,8 @@ import (
"github.com/klauspost/compress/zstd"
"databasus-backend/internal/config"
common "databasus-backend/internal/features/backups/backups/common"
backup_encryption "databasus-backend/internal/features/backups/backups/encryption"
usecases_common "databasus-backend/internal/features/backups/backups/usecases/common"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
mysqltypes "databasus-backend/internal/features/databases/databases/mysql"
@@ -30,7 +30,7 @@ import (
)
const (
backupTimeout = 23 * time.Hour
backupTimeout = 6 * time.Hour
shutdownCheckInterval = 1 * time.Second
copyBufferSize = 8 * 1024 * 1024
progressReportIntervalMB = 1.0
@@ -57,7 +57,7 @@ func (uc *CreateMysqlBackupUsecase) Execute(
db *databases.Database,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*usecases_common.BackupMetadata, error) {
) (*common.BackupMetadata, error) {
uc.logger.Info(
"Creating MySQL backup via mysqldump",
"databaseId", db.ID,
@@ -155,7 +155,7 @@ func (uc *CreateMysqlBackupUsecase) streamToStorage(
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
myConfig *mysqltypes.MysqlDatabase,
) (*usecases_common.BackupMetadata, error) {
) (*common.BackupMetadata, error) {
uc.logger.Info("Streaming MySQL backup to storage", "mysqlBin", mysqlBin)
ctx, cancel := uc.createBackupContext(parentCtx)
@@ -211,7 +211,7 @@ func (uc *CreateMysqlBackupUsecase) streamToStorage(
if err != nil {
return nil, fmt.Errorf("failed to create zstd writer: %w", err)
}
countingWriter := usecases_common.NewCountingWriter(zstdWriter)
countingWriter := common.NewCountingWriter(zstdWriter)
saveErrCh := make(chan error, 1)
go func() {
@@ -279,7 +279,7 @@ func (uc *CreateMysqlBackupUsecase) createTempMyCnfFile(
myConfig *mysqltypes.MysqlDatabase,
password string,
) (string, error) {
tempDir, err := os.MkdirTemp("", "mycnf")
tempDir, err := os.MkdirTemp(config.GetEnv().TempFolder, "mycnf_"+uuid.New().String())
if err != nil {
return "", fmt.Errorf("failed to create temp directory: %w", err)
}
@@ -414,8 +414,8 @@ func (uc *CreateMysqlBackupUsecase) setupBackupEncryption(
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
storageWriter io.WriteCloser,
) (io.Writer, *backup_encryption.EncryptionWriter, usecases_common.BackupMetadata, error) {
metadata := usecases_common.BackupMetadata{}
) (io.Writer, *backup_encryption.EncryptionWriter, common.BackupMetadata, error) {
metadata := common.BackupMetadata{}
if backupConfig.Encryption != backups_config.BackupEncryptionEncrypted {
metadata.Encryption = backups_config.BackupEncryptionNone

View File

@@ -1,6 +1,7 @@
package usecases_postgresql
import (
"archive/tar"
"context"
"encoding/base64"
"errors"
@@ -15,23 +16,23 @@ import (
"time"
"databasus-backend/internal/config"
common "databasus-backend/internal/features/backups/backups/common"
backup_encryption "databasus-backend/internal/features/backups/backups/encryption"
usecases_common "databasus-backend/internal/features/backups/backups/usecases/common"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
pgtypes "databasus-backend/internal/features/databases/databases/postgresql"
encryption_secrets "databasus-backend/internal/features/encryption/secrets"
"databasus-backend/internal/features/storages"
"databasus-backend/internal/util/encryption"
files_utils "databasus-backend/internal/util/files"
"databasus-backend/internal/util/tools"
"github.com/google/uuid"
)
const (
backupTimeout = 23 * time.Hour
backupTimeout = 6 * time.Hour
shutdownCheckInterval = 1 * time.Second
copyBufferSize = 8 * 1024 * 1024
progressReportIntervalMB = 1.0
pgConnectTimeout = 30
compressionLevel = 5
@@ -46,11 +47,6 @@ type CreatePostgresqlBackupUsecase struct {
fieldEncryptor encryption.FieldEncryptor
}
type writeResult struct {
bytesWritten int
writeErr error
}
func (uc *CreatePostgresqlBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
@@ -60,9 +56,9 @@ func (uc *CreatePostgresqlBackupUsecase) Execute(
backupProgressListener func(
completedMBs float64,
),
) (*usecases_common.BackupMetadata, error) {
) (*common.BackupMetadata, error) {
uc.logger.Info(
"Creating PostgreSQL backup via pg_dump custom format",
"Creating PostgreSQL backup via pg_dump directory type",
"databaseId",
db.ID,
"storageId",
@@ -83,14 +79,12 @@ func (uc *CreatePostgresqlBackupUsecase) Execute(
return nil, fmt.Errorf("database name is required for pg_dump backups")
}
args := uc.buildPgDumpArgs(pg)
decryptedPassword, err := uc.fieldEncryptor.Decrypt(db.ID, pg.Password)
if err != nil {
return nil, fmt.Errorf("failed to decrypt database password: %w", err)
}
return uc.streamToStorage(
return uc.executeDirectoryBackup(
ctx,
backupID,
backupConfig,
@@ -100,66 +94,127 @@ func (uc *CreatePostgresqlBackupUsecase) Execute(
config.GetEnv().EnvMode,
config.GetEnv().PostgresesInstallDir,
),
args,
pg,
decryptedPassword,
storage,
db,
backupProgressListener,
)
}
// streamToStorage streams pg_dump output directly to storage
func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
// executeDirectoryBackup runs pg_dump with directory type and streams as TAR to storage
func (uc *CreatePostgresqlBackupUsecase) executeDirectoryBackup(
parentCtx context.Context,
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
pgBin string,
args []string,
pg *pgtypes.PostgresqlDatabase,
password string,
storage *storages.Storage,
db *databases.Database,
backupProgressListener func(completedMBs float64),
) (*usecases_common.BackupMetadata, error) {
uc.logger.Info("Streaming PostgreSQL backup to storage", "pgBin", pgBin, "args", args)
) (*common.BackupMetadata, error) {
ctx, cancel := uc.createBackupContext(parentCtx)
defer cancel()
pgpassFile, err := uc.setupPgpassFile(db.Postgresql, password)
// Create temporary directory for pg_dump output
err := files_utils.EnsureDirectories([]string{config.GetEnv().TempFolder})
if err != nil {
return nil, fmt.Errorf("failed to ensure temp directories: %w", err)
}
tempDir, err := os.MkdirTemp(config.GetEnv().TempFolder, "pgdump_"+backupID.String())
if err != nil {
return nil, fmt.Errorf("failed to create temp directory: %w", err)
}
defer func() {
_ = os.RemoveAll(tempDir)
}()
outputDir := filepath.Join(tempDir, "dump")
args := uc.buildPgDumpArgs(pg, outputDir)
uc.logger.Info(
"Executing PostgreSQL backup with directory type",
"pgBin",
pgBin,
"args",
args,
"outputDir",
outputDir,
)
pgpassFile, err := uc.setupPgpassFile(pg, password)
if err != nil {
return nil, err
}
defer func() {
if pgpassFile != "" {
// Remove the entire temp directory (which contains the .pgpass file)
_ = os.RemoveAll(filepath.Dir(pgpassFile))
}
}()
// Execute pg_dump to directory
cmd := exec.CommandContext(ctx, pgBin, args...)
uc.logger.Info("Executing PostgreSQL backup command", "command", cmd.String())
if err := uc.setupPgEnvironment(cmd, pgpassFile, db.Postgresql.IsHttps, password, db.Postgresql.CpuCount, pgBin); err != nil {
if err := uc.setupPgEnvironment(cmd, pgpassFile, pg.IsHttps, password, pg.CpuCount, pgBin); err != nil {
return nil, err
}
pgStdout, err := cmd.StdoutPipe()
if err != nil {
return nil, fmt.Errorf("stdout pipe: %w", err)
}
pgStderr, err := cmd.StderrPipe()
if err != nil {
return nil, fmt.Errorf("stderr pipe: %w", err)
}
// Capture stderr in a separate goroutine to ensure we don't miss any error output
stderrCh := make(chan []byte, 1)
go func() {
stderrOutput, _ := io.ReadAll(pgStderr)
stderrCh <- stderrOutput
}()
if err = cmd.Start(); err != nil {
return nil, fmt.Errorf("start %s: %w", filepath.Base(pgBin), err)
}
waitErr := cmd.Wait()
stderrOutput := <-stderrCh
select {
case <-ctx.Done():
return nil, uc.checkCancellationReason()
default:
}
if waitErr != nil {
if err := uc.checkCancellation(ctx); err != nil {
return nil, err
}
return nil, uc.buildPgDumpErrorMessage(waitErr, stderrOutput, pgBin, args, password)
}
uc.logger.Info(
"pg_dump completed successfully, streaming TAR to storage",
"outputDir",
outputDir,
)
// Stream directory as TAR to storage
return uc.streamDirectoryToStorage(
ctx,
backupID,
backupConfig,
outputDir,
storage,
backupProgressListener,
)
}
// streamDirectoryToStorage creates a TAR archive from the directory and streams it to storage
func (uc *CreatePostgresqlBackupUsecase) streamDirectoryToStorage(
ctx context.Context,
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
sourceDir string,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*common.BackupMetadata, error) {
storageReader, storageWriter := io.Pipe()
finalWriter, encryptionWriter, backupMetadata, err := uc.setupBackupEncryption(
@@ -171,162 +226,176 @@ func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
return nil, err
}
countingWriter := usecases_common.NewCountingWriter(finalWriter)
// Set type to DIRECTORY for new PostgreSQL backups
backupMetadata.Type = common.BackupTypeDirectory
// The backup ID becomes the object key / filename in storage
// Start streaming into storage in its own goroutine
// Start streaming into storage
saveErrCh := make(chan error, 1)
go func() {
saveErr := storage.SaveFile(ctx, uc.fieldEncryptor, uc.logger, backupID, storageReader)
saveErrCh <- saveErr
}()
// Start pg_dump
if err = cmd.Start(); err != nil {
return nil, fmt.Errorf("start %s: %w", filepath.Base(pgBin), err)
}
// Copy pg output directly to storage with shutdown checks
copyResultCh := make(chan error, 1)
bytesWrittenCh := make(chan int64, 1)
// Create TAR and stream to storage
tarErrCh := make(chan error, 1)
totalSizeCh := make(chan int64, 1)
go func() {
bytesWritten, err := uc.copyWithShutdownCheck(
totalSize, tarErr := uc.writeTarToWriter(
ctx,
countingWriter,
pgStdout,
sourceDir,
finalWriter,
backupProgressListener,
)
bytesWrittenCh <- bytesWritten
copyResultCh <- err
totalSizeCh <- totalSize
tarErrCh <- tarErr
// Close encryption writer first if present
if encryptionWriter != nil {
if closeErr := encryptionWriter.Close(); closeErr != nil {
uc.logger.Error("Failed to close encryption writer", "error", closeErr)
}
}
// Then close the pipe writer to signal EOF to storage
if closeErr := storageWriter.Close(); closeErr != nil {
uc.logger.Error("Failed to close pipe writer", "error", closeErr)
}
}()
copyErr := <-copyResultCh
bytesWritten := <-bytesWrittenCh
waitErr := cmd.Wait()
tarErr := <-tarErrCh
totalSize := <-totalSizeCh
saveErr := <-saveErrCh
select {
case <-ctx.Done():
uc.cleanupOnCancellation(encryptionWriter, storageWriter, saveErrCh)
return nil, uc.checkCancellationReason()
default:
}
if err := uc.closeWriters(encryptionWriter, storageWriter); err != nil {
<-saveErrCh
return nil, err
}
saveErr := <-saveErrCh
stderrOutput := <-stderrCh
// Send final sizing after backup is completed
if waitErr == nil && copyErr == nil && saveErr == nil && backupProgressListener != nil {
sizeMB := float64(bytesWritten) / (1024 * 1024)
// Send final size after backup is completed
if tarErr == nil && saveErr == nil && backupProgressListener != nil {
sizeMB := float64(totalSize) / (1024 * 1024)
backupProgressListener(sizeMB)
}
switch {
case waitErr != nil:
if tarErr != nil {
if err := uc.checkCancellation(ctx); err != nil {
return nil, err
}
return nil, uc.buildPgDumpErrorMessage(waitErr, stderrOutput, pgBin, args, password)
case copyErr != nil:
if err := uc.checkCancellation(ctx); err != nil {
return nil, err
}
return nil, fmt.Errorf("copy to storage: %w", copyErr)
case saveErr != nil:
if err := uc.checkCancellation(ctx); err != nil {
return nil, err
}
return nil, fmt.Errorf("save to storage: %w", saveErr)
return nil, fmt.Errorf("failed to create TAR archive: %w", tarErr)
}
if saveErr != nil {
if err := uc.checkCancellation(ctx); err != nil {
return nil, err
}
return nil, fmt.Errorf("failed to save to storage: %w", saveErr)
}
uc.logger.Info(
"Backup completed successfully",
"backupId",
backupID,
"totalSizeBytes",
totalSize,
)
return &backupMetadata, nil
}
func (uc *CreatePostgresqlBackupUsecase) copyWithShutdownCheck(
// writeTarToWriter creates a TAR archive from sourceDir and writes it to the writer
func (uc *CreatePostgresqlBackupUsecase) writeTarToWriter(
ctx context.Context,
dst io.Writer,
src io.Reader,
sourceDir string,
writer io.Writer,
backupProgressListener func(completedMBs float64),
) (int64, error) {
buf := make([]byte, copyBufferSize)
var totalBytesWritten int64
tarWriter := tar.NewWriter(writer)
defer func() {
_ = tarWriter.Close()
}()
var totalSize int64
var lastReportedMB float64
for {
err := filepath.Walk(sourceDir, func(path string, info os.FileInfo, walkErr error) error {
if walkErr != nil {
return walkErr
}
select {
case <-ctx.Done():
return totalBytesWritten, fmt.Errorf("copy cancelled: %w", ctx.Err())
return ctx.Err()
default:
}
if config.IsShouldShutdown() {
return totalBytesWritten, fmt.Errorf("copy cancelled due to shutdown")
return fmt.Errorf("backup cancelled due to shutdown")
}
bytesRead, readErr := src.Read(buf)
if bytesRead > 0 {
writeResultCh := make(chan writeResult, 1)
go func() {
bytesWritten, writeErr := dst.Write(buf[0:bytesRead])
writeResultCh <- writeResult{bytesWritten, writeErr}
}()
// Get relative path for TAR header
relPath, err := filepath.Rel(sourceDir, path)
if err != nil {
return fmt.Errorf("failed to get relative path: %w", err)
}
var bytesWritten int
var writeErr error
// Skip the root directory itself
if relPath == "." {
return nil
}
select {
case <-ctx.Done():
return totalBytesWritten, fmt.Errorf("copy cancelled during write: %w", ctx.Err())
case result := <-writeResultCh:
bytesWritten = result.bytesWritten
writeErr = result.writeErr
}
// Create TAR header
header, err := tar.FileInfoHeader(info, "")
if err != nil {
return fmt.Errorf("failed to create TAR header: %w", err)
}
header.Name = relPath
if bytesWritten < 0 || bytesRead < bytesWritten {
bytesWritten = 0
if writeErr == nil {
writeErr = fmt.Errorf("invalid write result")
}
}
if err := tarWriter.WriteHeader(header); err != nil {
return fmt.Errorf("failed to write TAR header: %w", err)
}
if writeErr != nil {
return totalBytesWritten, writeErr
}
// If it's a directory, we're done
if info.IsDir() {
return nil
}
if bytesRead != bytesWritten {
return totalBytesWritten, io.ErrShortWrite
}
// Copy file content to TAR
file, err := os.Open(path)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", path, err)
}
defer func() {
_ = file.Close()
}()
totalBytesWritten += int64(bytesWritten)
written, err := io.Copy(tarWriter, file)
if err != nil {
return fmt.Errorf("failed to write file %s to TAR: %w", path, err)
}
if backupProgressListener != nil {
currentSizeMB := float64(totalBytesWritten) / (1024 * 1024)
if currentSizeMB >= lastReportedMB+progressReportIntervalMB {
backupProgressListener(currentSizeMB)
lastReportedMB = currentSizeMB
}
totalSize += written
// Report progress
if backupProgressListener != nil {
currentSizeMB := float64(totalSize) / (1024 * 1024)
if currentSizeMB >= lastReportedMB+progressReportIntervalMB {
backupProgressListener(currentSizeMB)
lastReportedMB = currentSizeMB
}
}
if readErr != nil {
if readErr != io.EOF {
return totalBytesWritten, readErr
}
break
}
}
return nil
})
return totalBytesWritten, nil
return totalSize, err
}
func (uc *CreatePostgresqlBackupUsecase) buildPgDumpArgs(pg *pgtypes.PostgresqlDatabase) []string {
func (uc *CreatePostgresqlBackupUsecase) buildPgDumpArgs(
pg *pgtypes.PostgresqlDatabase,
outputDir string,
) []string {
args := []string{
"-Fc",
"-Fd", // Directory type (enables parallel dump)
"-f", outputDir, // Output directory
"--no-password",
"-h", pg.Host,
"-p", strconv.Itoa(pg.Port),
@@ -335,7 +404,7 @@ func (uc *CreatePostgresqlBackupUsecase) buildPgDumpArgs(pg *pgtypes.PostgresqlD
"--verbose",
}
// Add parallel jobs based on CPU count
// Parallel jobs now actually work with directory type
if pg.CpuCount > 1 {
args = append(args, "-j", strconv.Itoa(pg.CpuCount))
}
@@ -476,8 +545,8 @@ func (uc *CreatePostgresqlBackupUsecase) setupBackupEncryption(
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
storageWriter io.WriteCloser,
) (io.Writer, *backup_encryption.EncryptionWriter, usecases_common.BackupMetadata, error) {
metadata := usecases_common.BackupMetadata{}
) (io.Writer, *backup_encryption.EncryptionWriter, common.BackupMetadata, error) {
metadata := common.BackupMetadata{}
if backupConfig.Encryption != backups_config.BackupEncryptionEncrypted {
metadata.Encryption = backups_config.BackupEncryptionNone
@@ -521,63 +590,6 @@ func (uc *CreatePostgresqlBackupUsecase) setupBackupEncryption(
return encWriter, encWriter, metadata, nil
}
func (uc *CreatePostgresqlBackupUsecase) cleanupOnCancellation(
encryptionWriter *backup_encryption.EncryptionWriter,
storageWriter io.WriteCloser,
saveErrCh chan error,
) {
if encryptionWriter != nil {
go func() {
if closeErr := encryptionWriter.Close(); closeErr != nil {
uc.logger.Error(
"Failed to close encrypting writer during cancellation",
"error",
closeErr,
)
}
}()
}
if err := storageWriter.Close(); err != nil {
uc.logger.Error("Failed to close pipe writer during cancellation", "error", err)
}
<-saveErrCh
}
func (uc *CreatePostgresqlBackupUsecase) closeWriters(
encryptionWriter *backup_encryption.EncryptionWriter,
storageWriter io.WriteCloser,
) error {
encryptionCloseErrCh := make(chan error, 1)
if encryptionWriter != nil {
go func() {
closeErr := encryptionWriter.Close()
if closeErr != nil {
uc.logger.Error("Failed to close encrypting writer", "error", closeErr)
}
encryptionCloseErrCh <- closeErr
}()
} else {
encryptionCloseErrCh <- nil
}
encryptionCloseErr := <-encryptionCloseErrCh
if encryptionCloseErr != nil {
if err := storageWriter.Close(); err != nil {
uc.logger.Error("Failed to close pipe writer after encryption error", "error", err)
}
return fmt.Errorf("failed to close encryption writer: %w", encryptionCloseErr)
}
if err := storageWriter.Close(); err != nil {
uc.logger.Error("Failed to close pipe writer", "error", err)
return err
}
return nil
}
func (uc *CreatePostgresqlBackupUsecase) checkCancellation(ctx context.Context) error {
select {
case <-ctx.Done():
@@ -759,7 +771,7 @@ func (uc *CreatePostgresqlBackupUsecase) createTempPgpassFile(
escapedPassword,
)
tempDir, err := os.MkdirTemp("", "pgpass")
tempDir, err := os.MkdirTemp(config.GetEnv().TempFolder, "pgpass_"+uuid.New().String())
if err != nil {
return "", fmt.Errorf("failed to create temporary directory: %w", err)
}

View File

@@ -1,7 +1,9 @@
package disk
import (
"databasus-backend/internal/config"
"fmt"
"path/filepath"
"runtime"
"github.com/shirou/gopsutil/v4/disk"
@@ -12,10 +14,14 @@ type DiskService struct{}
func (s *DiskService) GetDiskUsage() (*DiskUsage, error) {
platform := s.detectPlatform()
// Set path based on platform
path := "/"
var path string
if platform == PlatformWindows {
path = "C:\\"
} else {
// Use databasus-data folder location for Linux (Docker)
cfg := config.GetEnv()
path = filepath.Dir(cfg.DataFolder) // Gets /databasus-data from /databasus-data/backups
}
diskUsage, err := disk.Usage(path)

View File

@@ -250,6 +250,44 @@ func Test_RestoreBackup_AuditLogWritten(t *testing.T) {
assert.True(t, found, "Audit log for restore not found")
}
func Test_RestoreBackup_InsufficientDiskSpace_ReturnsError(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
_, backup := createTestDatabaseWithBackupForRestore(workspace, owner, router)
// Update backup size to 10 TB via repository
repo := &backups.BackupRepository{}
backup.BackupSizeMb = 10485760.0 // 10 TB in MB
err := repo.Save(backup)
assert.NoError(t, err)
request := RestoreBackupRequest{
PostgresqlDatabase: &postgresql.PostgresqlDatabase{
Version: tools.PostgresqlVersion16,
Host: "localhost",
Port: 5432,
Username: "postgres",
Password: "postgres",
},
}
testResp := test_utils.MakePostRequest(
t,
router,
fmt.Sprintf("/api/v1/restores/%s/restore", backup.ID.String()),
"Bearer "+owner.Token,
request,
http.StatusBadRequest,
)
bodyStr := string(testResp.Body)
assert.Contains(t, bodyStr, "is required")
assert.Contains(t, bodyStr, "is available")
assert.Contains(t, bodyStr, "disk space")
}
func createTestDatabaseWithBackupForRestore(
workspace *workspaces_models.Workspace,
owner *users_dto.SignInResponseDTO,

View File

@@ -5,6 +5,7 @@ import (
"databasus-backend/internal/features/backups/backups"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/disk"
"databasus-backend/internal/features/restores/usecases"
"databasus-backend/internal/features/storages"
workspaces_services "databasus-backend/internal/features/workspaces/services"
@@ -24,6 +25,7 @@ var restoreService = &RestoreService{
workspaces_services.GetWorkspaceService(),
audit_logs.GetAuditLogService(),
encryption.GetFieldEncryptor(),
disk.GetDiskService(),
}
var restoreController = &RestoreController{
restoreService,

View File

@@ -5,6 +5,7 @@ import (
"databasus-backend/internal/features/backups/backups"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/disk"
"databasus-backend/internal/features/restores/enums"
"databasus-backend/internal/features/restores/models"
"databasus-backend/internal/features/restores/usecases"
@@ -32,6 +33,7 @@ type RestoreService struct {
workspaceService *workspaces_services.WorkspaceService
auditLogService *audit_logs.AuditLogService
fieldEncryptor encryption.FieldEncryptor
diskService *disk.DiskService
}
func (s *RestoreService) OnBeforeBackupRemove(backup *backups.Backup) error {
@@ -126,6 +128,11 @@ func (s *RestoreService) RestoreBackupWithAuth(
return err
}
// Validate disk space before starting restore
if err := s.validateDiskSpace(backup); err != nil {
return err
}
go func() {
if err := s.RestoreBackup(backup, requestDTO); err != nil {
s.logger.Error("Failed to restore backup", "error", err)
@@ -361,3 +368,41 @@ func (s *RestoreService) validateVersionCompatibility(
}
return nil
}
func (s *RestoreService) validateDiskSpace(backup *backups.Backup) error {
diskUsage, err := s.diskService.GetDiskUsage()
if err != nil {
return fmt.Errorf("failed to check disk space: %w", err)
}
// Convert backup size from MB to bytes
backupSizeBytes := int64(backup.BackupSizeMb * 1024 * 1024)
// Calculate required space: backup size + 10% buffer
bufferBytes := int64(float64(backupSizeBytes) * 0.1)
requiredBytes := backupSizeBytes + bufferBytes
// Ensure minimum of 1 GB total (even if backup is small)
minRequiredBytes := int64(1024 * 1024 * 1024) // 1 GB
if requiredBytes < minRequiredBytes {
requiredBytes = minRequiredBytes
}
// Check if there's enough free space
if diskUsage.FreeSpaceBytes < requiredBytes {
backupSizeGB := float64(backupSizeBytes) / (1024 * 1024 * 1024)
bufferSizeGB := float64(bufferBytes) / (1024 * 1024 * 1024)
requiredGB := float64(requiredBytes) / (1024 * 1024 * 1024)
availableGB := float64(diskUsage.FreeSpaceBytes) / (1024 * 1024 * 1024)
return fmt.Errorf(
"to restore this backup, %.1f GB (%.1f GB backup + %.1f GB buffer) is required, but only %.1f GB is available. Please free up disk space before restoring",
requiredGB,
backupSizeGB,
bufferSizeGB,
availableGB,
)
}
return nil
}

View File

@@ -330,7 +330,7 @@ func (uc *RestoreMariadbBackupUsecase) createTempMyCnfFile(
mdbConfig *mariadbtypes.MariadbDatabase,
password string,
) (string, error) {
tempDir, err := os.MkdirTemp("", "mycnf")
tempDir, err := os.MkdirTemp(config.GetEnv().TempFolder, "mycnf_"+uuid.New().String())
if err != nil {
return "", fmt.Errorf("failed to create temp directory: %w", err)
}

View File

@@ -322,7 +322,7 @@ func (uc *RestoreMysqlBackupUsecase) createTempMyCnfFile(
myConfig *mysqltypes.MysqlDatabase,
password string,
) (string, error) {
tempDir, err := os.MkdirTemp("", "mycnf")
tempDir, err := os.MkdirTemp(config.GetEnv().TempFolder, "mycnf_"+uuid.New().String())
if err != nil {
return "", fmt.Errorf("failed to create temp directory: %w", err)
}

View File

@@ -1,6 +1,7 @@
package usecases_postgresql
import (
"archive/tar"
"context"
"encoding/base64"
"errors"
@@ -16,6 +17,7 @@ import (
"databasus-backend/internal/config"
"databasus-backend/internal/features/backups/backups"
common "databasus-backend/internal/features/backups/backups/common"
"databasus-backend/internal/features/backups/backups/encryption"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
@@ -54,6 +56,8 @@ func (uc *RestorePostgresqlBackupUsecase) Execute(
restore.ID,
"backupId",
backup.ID,
"format",
backup.Type,
)
pg := restoringToDB.Postgresql
@@ -65,33 +69,72 @@ func (uc *RestorePostgresqlBackupUsecase) Execute(
return fmt.Errorf("target database name is required for pg_restore")
}
// Use parallel jobs based on CPU count (same as backup)
pgBin := tools.GetPostgresqlExecutable(
pg.Version,
"pg_restore",
config.GetEnv().EnvMode,
config.GetEnv().PostgresesInstallDir,
)
// Route based on backup type
switch backup.Type {
case common.BackupTypeDirectory:
return uc.restoreDirectoryType(
originalDB,
restoringToDB,
pgBin,
backup,
storage,
pg,
isExcludeExtensions,
)
case common.BackupTypeDefault, "": // empty = legacy DEFAULT
return uc.restoreCustomType(
originalDB,
pgBin,
backup,
storage,
pg,
isExcludeExtensions,
)
default:
return fmt.Errorf("unsupported backup type: %s", backup.Type)
}
}
// restoreCustomType restores a backup in custom type (-Fc) - legacy type
func (uc *RestorePostgresqlBackupUsecase) restoreCustomType(
originalDB *databases.Database,
pgBin string,
backup *backups.Backup,
storage *storages.Storage,
pg *pgtypes.PostgresqlDatabase,
isExcludeExtensions bool,
) error {
uc.logger.Info("Restoring backup in custom type (-Fc)", "backupId", backup.ID)
// Use parallel jobs based on CPU count
// Cap between 1 and 8 to avoid overwhelming the server
parallelJobs := max(1, min(restoringToDB.Postgresql.CpuCount, 8))
parallelJobs := max(1, min(pg.CpuCount, 8))
args := []string{
"-Fc", // expect custom format (same as backup)
"-Fc", // expect custom type
"-j", strconv.Itoa(parallelJobs), // parallel jobs based on CPU count
"--no-password", // Use environment variable for password, prevent prompts
"--no-password",
"-h", pg.Host,
"-p", strconv.Itoa(pg.Port),
"-U", pg.Username,
"-d", *pg.Database,
"--verbose", // Add verbose output to help with debugging
"--clean", // Clean (drop) database objects before recreating them
"--if-exists", // Use IF EXISTS when dropping objects
"--no-owner", // Skip restoring ownership
"--no-acl", // Skip restoring access privileges (GRANT/REVOKE commands)
"--verbose",
"--clean",
"--if-exists",
"--no-owner",
"--no-acl",
}
return uc.restoreFromStorage(
originalDB,
tools.GetPostgresqlExecutable(
pg.Version,
"pg_restore",
config.GetEnv().EnvMode,
config.GetEnv().PostgresesInstallDir,
),
pgBin,
args,
pg.Password,
backup,
@@ -101,6 +144,100 @@ func (uc *RestorePostgresqlBackupUsecase) Execute(
)
}
// restoreDirectoryType restores a backup in directory type (-Fd) - new TAR type
func (uc *RestorePostgresqlBackupUsecase) restoreDirectoryType(
originalDB *databases.Database,
_ *databases.Database, // restoringToDB not used but kept for API consistency
pgBin string,
backup *backups.Backup,
storage *storages.Storage,
pg *pgtypes.PostgresqlDatabase,
isExcludeExtensions bool,
) error {
uc.logger.Info("Restoring backup in directory type (-Fd)", "backupId", backup.ID)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute)
defer cancel()
// Monitor for shutdown
go func() {
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if config.IsShouldShutdown() {
cancel()
return
}
}
}
}()
// Create temporary .pgpass file
pgpassFile, err := uc.createTempPgpassFile(pg, pg.Password)
if err != nil {
return fmt.Errorf("failed to create temporary .pgpass file: %w", err)
}
defer func() {
if pgpassFile != "" {
_ = os.RemoveAll(filepath.Dir(pgpassFile))
}
}()
// Download and extract TAR to temporary directory
tempDir, cleanupFunc, err := uc.downloadAndExtractTar(ctx, backup, storage)
if err != nil {
return fmt.Errorf("failed to download and extract backup: %w", err)
}
defer cleanupFunc()
// Use parallel jobs based on CPU count
parallelJobs := max(1, min(pg.CpuCount, 8))
args := []string{
"-Fd", // directory type
"-j", strconv.Itoa(parallelJobs), // parallel restore
"--no-password",
"-h", pg.Host,
"-p", strconv.Itoa(pg.Port),
"-U", pg.Username,
"-d", *pg.Database,
"--verbose",
"--clean",
"--if-exists",
"--no-owner",
"--no-acl",
}
// If excluding extensions, generate filtered TOC list
if isExcludeExtensions {
tocListFile, err := uc.generateFilteredTocList(
ctx,
pgBin,
tempDir,
pgpassFile,
pg,
)
if err != nil {
return fmt.Errorf("failed to generate filtered TOC list: %w", err)
}
defer func() {
_ = os.Remove(tocListFile)
}()
args = append(args, "-L", tocListFile)
}
// Add the directory as the last argument
args = append(args, tempDir)
return uc.executePgRestore(ctx, originalDB, pgBin, args, pgpassFile, pg)
}
// restoreFromStorage restores backup data from storage using pg_restore
func (uc *RestorePostgresqlBackupUsecase) restoreFromStorage(
database *databases.Database,
@@ -150,7 +287,7 @@ func (uc *RestorePostgresqlBackupUsecase) restoreFromStorage(
}
defer func() {
if pgpassFile != "" {
_ = os.Remove(pgpassFile)
_ = os.RemoveAll(filepath.Dir(pgpassFile))
}
}()
@@ -321,6 +458,175 @@ func (uc *RestorePostgresqlBackupUsecase) downloadBackupToTempFile(
return tempBackupFile, cleanupFunc, nil
}
// downloadAndExtractTar downloads a TAR backup from storage and extracts it to a temporary directory
func (uc *RestorePostgresqlBackupUsecase) downloadAndExtractTar(
ctx context.Context,
backup *backups.Backup,
storage *storages.Storage,
) (string, func(), error) {
err := files_utils.EnsureDirectories([]string{config.GetEnv().TempFolder})
if err != nil {
return "", nil, fmt.Errorf("failed to ensure directories: %w", err)
}
// Create temporary directory for extracted data
tempDir, err := os.MkdirTemp(config.GetEnv().TempFolder, "restore_dir_"+uuid.New().String())
if err != nil {
return "", nil, fmt.Errorf("failed to create temporary directory: %w", err)
}
cleanupFunc := func() {
_ = os.RemoveAll(tempDir)
}
uc.logger.Info(
"Downloading TAR backup from storage",
"backupId", backup.ID,
"tempDir", tempDir,
"encrypted", backup.Encryption == backups_config.BackupEncryptionEncrypted,
)
fieldEncryptor := util_encryption.GetFieldEncryptor()
rawReader, err := storage.GetFile(fieldEncryptor, backup.ID)
if err != nil {
cleanupFunc()
return "", nil, fmt.Errorf("failed to get backup file from storage: %w", err)
}
defer func() {
if err := rawReader.Close(); err != nil {
uc.logger.Error("Failed to close backup reader", "error", err)
}
}()
// Create a reader that handles decryption if needed
var backupReader io.Reader = rawReader
if backup.Encryption == backups_config.BackupEncryptionEncrypted {
if backup.EncryptionSalt == nil || backup.EncryptionIV == nil {
cleanupFunc()
return "", nil, fmt.Errorf("backup is encrypted but missing encryption metadata")
}
masterKey, err := uc.secretKeyService.GetSecretKey()
if err != nil {
cleanupFunc()
return "", nil, fmt.Errorf("failed to get master key for decryption: %w", err)
}
salt, err := base64.StdEncoding.DecodeString(*backup.EncryptionSalt)
if err != nil {
cleanupFunc()
return "", nil, fmt.Errorf("failed to decode encryption salt: %w", err)
}
iv, err := base64.StdEncoding.DecodeString(*backup.EncryptionIV)
if err != nil {
cleanupFunc()
return "", nil, fmt.Errorf("failed to decode encryption IV: %w", err)
}
decryptReader, err := encryption.NewDecryptionReader(
rawReader,
masterKey,
backup.ID,
salt,
iv,
)
if err != nil {
cleanupFunc()
return "", nil, fmt.Errorf("failed to create decryption reader: %w", err)
}
backupReader = decryptReader
uc.logger.Info("Using decryption for encrypted backup", "backupId", backup.ID)
}
// Extract TAR archive to temp directory
if err := uc.extractTar(ctx, backupReader, tempDir); err != nil {
cleanupFunc()
return "", nil, fmt.Errorf("failed to extract TAR archive: %w", err)
}
uc.logger.Info("TAR backup extracted to temporary directory", "tempDir", tempDir)
return tempDir, cleanupFunc, nil
}
// extractTar extracts a TAR archive to the specified directory
func (uc *RestorePostgresqlBackupUsecase) extractTar(
ctx context.Context,
reader io.Reader,
destDir string,
) error {
tarReader := tar.NewReader(reader)
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if config.IsShouldShutdown() {
return fmt.Errorf("extraction cancelled due to shutdown")
}
header, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("failed to read TAR header: %w", err)
}
targetPath := filepath.Join(destDir, header.Name)
// Ensure the target path is within destDir (prevent path traversal)
if !strings.HasPrefix(filepath.Clean(targetPath), filepath.Clean(destDir)) {
return fmt.Errorf("invalid file path in TAR: %s", header.Name)
}
switch header.Typeflag {
case tar.TypeDir:
if err := os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil {
return fmt.Errorf("failed to create directory %s: %w", targetPath, err)
}
case tar.TypeReg:
// Ensure parent directory exists
if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil {
return fmt.Errorf("failed to create parent directory for %s: %w", targetPath, err)
}
outFile, err := os.OpenFile(
targetPath,
os.O_CREATE|os.O_WRONLY|os.O_TRUNC,
os.FileMode(header.Mode),
)
if err != nil {
return fmt.Errorf("failed to create file %s: %w", targetPath, err)
}
_, copyErr := uc.copyWithShutdownCheck(ctx, outFile, tarReader)
closeErr := outFile.Close()
if copyErr != nil {
return fmt.Errorf("failed to write file %s: %w", targetPath, copyErr)
}
if closeErr != nil {
return fmt.Errorf("failed to close file %s: %w", targetPath, closeErr)
}
default:
uc.logger.Warn(
"Skipping unsupported TAR entry type",
"type",
header.Typeflag,
"name",
header.Name,
)
}
}
return nil
}
// executePgRestore executes the pg_restore command with proper environment setup
func (uc *RestorePostgresqlBackupUsecase) executePgRestore(
ctx context.Context,
@@ -621,7 +927,7 @@ func (uc *RestorePostgresqlBackupUsecase) generateFilteredTocList(
}
// Write filtered TOC to temporary file
tocFile, err := os.CreateTemp("", "pg_restore_toc_*.list")
tocFile, err := os.CreateTemp(config.GetEnv().TempFolder, "pg_restore_toc_*.list")
if err != nil {
return "", fmt.Errorf("failed to create TOC list file: %w", err)
}
@@ -668,7 +974,7 @@ func (uc *RestorePostgresqlBackupUsecase) createTempPgpassFile(
escapedPassword,
)
tempDir, err := os.MkdirTemp("", "pgpass")
tempDir, err := os.MkdirTemp(config.GetEnv().TempFolder, "pgpass_"+uuid.New().String())
if err != nil {
return "", fmt.Errorf("failed to create temporary directory: %w", err)
}

View File

@@ -3,8 +3,10 @@ package s3_storage
import (
"bytes"
"context"
"crypto/md5"
"crypto/tls"
"databasus-backend/internal/util/encryption"
"encoding/base64"
"errors"
"fmt"
"io"
@@ -101,15 +103,21 @@ func (s *S3Storage) SaveFile(
return fmt.Errorf("read error: %w", readErr)
}
partData := buf[:n]
hash := md5.Sum(partData)
md5Base64 := base64.StdEncoding.EncodeToString(hash[:])
part, err := coreClient.PutObjectPart(
ctx,
s.S3Bucket,
objectKey,
uploadID,
partNumber,
bytes.NewReader(buf[:n]),
bytes.NewReader(partData),
int64(n),
minio.PutObjectPartOptions{},
minio.PutObjectPartOptions{
Md5Base64: md5Base64,
},
)
if err != nil {
_ = coreClient.AbortMultipartUpload(ctx, s.S3Bucket, objectKey, uploadID)
@@ -147,7 +155,9 @@ func (s *S3Storage) SaveFile(
objectKey,
bytes.NewReader([]byte{}),
0,
minio.PutObjectOptions{},
minio.PutObjectOptions{
SendContentMd5: true,
},
)
if err != nil {
return fmt.Errorf("failed to upload empty file: %w", err)
@@ -283,7 +293,9 @@ func (s *S3Storage) TestConnection(encryptor encryption.FieldEncryptor) error {
testObjectKey,
testReader,
int64(len(testData)),
minio.PutObjectOptions{},
minio.PutObjectOptions{
SendContentMd5: true,
},
)
if err != nil {
return fmt.Errorf("failed to upload test file to S3: %w", err)

View File

@@ -0,0 +1,9 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE backups ADD COLUMN type TEXT NOT NULL DEFAULT 'DEFAULT';
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE backups DROP COLUMN type;
-- +goose StatementEnd

View File

@@ -1 +0,0 @@
This is test data for storage testing

View File

@@ -1 +0,0 @@
This is test data for storage testing

View File

@@ -29,8 +29,23 @@ export const backupsApi = {
return apiHelper.fetchDeleteRaw(`${getApplicationServer()}/api/v1/backups/${id}`);
},
async downloadBackup(id: string): Promise<Blob> {
return apiHelper.fetchGetBlob(`${getApplicationServer()}/api/v1/backups/${id}/file`);
async downloadBackup(id: string): Promise<{ blob: Blob; filename: string }> {
const result = await apiHelper.fetchGetBlobWithHeaders(
`${getApplicationServer()}/api/v1/backups/${id}/file`,
);
// Extract filename from Content-Disposition header
const contentDisposition = result.headers.get('Content-Disposition');
let filename = `backup_${id}.backup`; // fallback filename
if (contentDisposition) {
const filenameMatch = contentDisposition.match(/filename="?(.+?)"?$/);
if (filenameMatch && filenameMatch[1]) {
filename = filenameMatch[1];
}
}
return { blob: result.blob, filename };
},
async cancelBackup(id: string) {

View File

@@ -9,6 +9,6 @@ export interface MongodbDatabase {
password: string;
database: string;
authDatabase: string;
useTls: boolean;
isHttps: boolean;
cpuCount: number;
}

View File

@@ -64,18 +64,13 @@ export const BackupsComponent = ({ database, isCanManageDBs, scrollContainerRef
const downloadBackup = async (backupId: string) => {
try {
const blob = await backupsApi.downloadBackup(backupId);
const { blob, filename } = await backupsApi.downloadBackup(backupId);
// Create a download link
const url = window.URL.createObjectURL(blob);
const link = document.createElement('a');
link.href = url;
// Find the backup to get a meaningful filename
const backup = backups.find((b) => b.id === backupId);
const createdAt = backup ? dayjs(backup.createdAt).format('YYYY-MM-DD_HH-mm-ss') : 'backup';
const extension = database.type === DatabaseType.MYSQL ? '.sql.zst' : '.dump.zst';
link.download = `${database.name}_backup_${createdAt}${extension}`;
link.download = filename;
// Trigger download
document.body.appendChild(link);

View File

@@ -50,13 +50,13 @@ const initializeDatabaseTypeData = (db: Database): Database => {
switch (db.type) {
case DatabaseType.POSTGRES:
return { ...base, postgresql: db.postgresql ?? ({ cpuCount: 1 } as PostgresqlDatabase) };
return { ...base, postgresql: db.postgresql ?? ({ cpuCount: 4 } as PostgresqlDatabase) };
case DatabaseType.MYSQL:
return { ...base, mysql: db.mysql ?? ({} as MysqlDatabase) };
case DatabaseType.MARIADB:
return { ...base, mariadb: db.mariadb ?? ({} as MariadbDatabase) };
case DatabaseType.MONGODB:
return { ...base, mongodb: db.mongodb ?? ({ cpuCount: 1 } as MongodbDatabase) };
return { ...base, mongodb: db.mongodb ?? ({ cpuCount: 4 } as MongodbDatabase) };
default:
return db;
}

View File

@@ -23,7 +23,17 @@ export const CreateReadOnlyComponent = ({
const isPostgres = database.type === DatabaseType.POSTGRES;
const isMysql = database.type === DatabaseType.MYSQL;
const databaseTypeName = isPostgres ? 'PostgreSQL' : isMysql ? 'MySQL' : 'database';
const isMariadb = database.type === DatabaseType.MARIADB;
const isMongodb = database.type === DatabaseType.MONGODB;
const databaseTypeName = isPostgres
? 'PostgreSQL'
: isMysql
? 'MySQL'
: isMariadb
? 'MariaDB'
: isMongodb
? 'MongoDB'
: 'database';
const checkReadOnlyUser = async (): Promise<boolean> => {
try {
@@ -47,6 +57,12 @@ export const CreateReadOnlyComponent = ({
} else if (isMysql && database.mysql) {
database.mysql.username = response.username;
database.mysql.password = response.password;
} else if (isMariadb && database.mariadb) {
database.mariadb.username = response.username;
database.mariadb.password = response.password;
} else if (isMongodb && database.mongodb) {
database.mongodb.username = response.username;
database.mongodb.password = response.password;
}
onReadOnlyUserUpdated(database);

View File

@@ -77,8 +77,8 @@ export const EditMongoDbSpecificDataComponent = ({
password: result.password,
database: result.database,
authDatabase: result.authDatabase,
useTls: result.useTls,
cpuCount: 1,
isHttps: result.useTls,
cpuCount: 4,
},
};
@@ -287,15 +287,15 @@ export const EditMongoDbSpecificDataComponent = ({
)}
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Use TLS</div>
<div className="min-w-[150px]">Use HTTPS</div>
<Switch
checked={editingDatabase.mongodb?.useTls}
checked={editingDatabase.mongodb?.isHttps}
onChange={(checked) => {
if (!editingDatabase.mongodb) return;
setEditingDatabase({
...editingDatabase,
mongodb: { ...editingDatabase.mongodb, useTls: checked },
mongodb: { ...editingDatabase.mongodb, isHttps: checked },
});
setIsConnectionTested(false);
}}
@@ -309,7 +309,7 @@ export const EditMongoDbSpecificDataComponent = ({
<InputNumber
min={1}
max={16}
value={editingDatabase.mongodb?.cpuCount || 1}
value={editingDatabase.mongodb?.cpuCount}
onChange={(value) => {
if (!editingDatabase.mongodb) return;

View File

@@ -82,7 +82,7 @@ export const EditPostgreSqlSpecificDataComponent = ({
password: result.password,
database: result.database,
isHttps: result.isHttps,
cpuCount: 1,
cpuCount: 4,
},
};
@@ -356,36 +356,34 @@ export const EditPostgreSqlSpecificDataComponent = ({
/>
</div>
{isRestoreMode && (
<div className="mb-5 flex w-full items-center">
<div className="min-w-[150px]">CPU count</div>
<div className="flex items-center">
<InputNumber
min={1}
max={128}
value={editingDatabase.postgresql?.cpuCount || 1}
onChange={(value) => {
if (!editingDatabase.postgresql) return;
<div className="mb-5 flex w-full items-center">
<div className="min-w-[150px]">CPU count</div>
<div className="flex items-center">
<InputNumber
min={1}
max={128}
value={editingDatabase.postgresql?.cpuCount}
onChange={(value) => {
if (!editingDatabase.postgresql) return;
setEditingDatabase({
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, cpuCount: value || 1 },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[75px] grow"
/>
setEditingDatabase({
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, cpuCount: value || 1 },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[75px] grow"
/>
<Tooltip
className="cursor-pointer"
title="Number of CPU cores to use for backup and restore operations. Higher values may speed up operations but use more resources."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
<Tooltip
className="cursor-pointer"
title="Number of CPU cores to use for backup and restore operations. Higher values may speed up operations but use more resources."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
)}
</div>
<div className="mt-4 mb-1 flex items-center">
<div

View File

@@ -33,8 +33,13 @@ export const ShowMongoDbSpecificDataComponent = ({ database }: Props) => {
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Use TLS</div>
<div>{database.mongodb?.useTls ? 'Yes' : 'No'}</div>
<div className="min-w-[150px]">Use HTTPS</div>
<div>{database.mongodb?.isHttps ? 'Yes' : 'No'}</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">CPU count</div>
<div>{database.mongodb?.cpuCount}</div>
</div>
{database.mongodb?.authDatabase && (
@@ -43,11 +48,6 @@ export const ShowMongoDbSpecificDataComponent = ({ database }: Props) => {
<div>{database.mongodb.authDatabase}</div>
</div>
)}
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">CPU count</div>
<div>{database.mongodb?.cpuCount || 1}</div>
</div>
</div>
);
};

View File

@@ -54,6 +54,11 @@ export const ShowPostgreSqlSpecificDataComponent = ({ database }: Props) => {
<div>{database.postgresql?.isHttps ? 'Yes' : 'No'}</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">CPU count</div>
<div>{database.postgresql?.cpuCount}</div>
</div>
{!!database.postgresql?.includeSchemas?.length && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Include schemas</div>

View File

@@ -14,7 +14,8 @@ export function EditSFTPStorageComponent({ storage, setStorage, setUnsaved }: Pr
const hasAdvancedValues = !!storage?.sftpStorage?.skipHostKeyVerify;
const [showAdvanced, setShowAdvanced] = useState(hasAdvancedValues);
const authMethod = storage?.sftpStorage?.privateKey ? 'privateKey' : 'password';
const initialAuthMethod = storage?.sftpStorage?.privateKey ? 'privateKey' : 'password';
const [authMethod, setAuthMethod] = useState<'password' | 'privateKey'>(initialAuthMethod);
return (
<>
@@ -93,7 +94,10 @@ export function EditSFTPStorageComponent({ storage, setStorage, setUnsaved }: Pr
onChange={(e) => {
if (!storage?.sftpStorage) return;
if (e.target.value === 'password') {
const newMethod = e.target.value as 'password' | 'privateKey';
setAuthMethod(newMethod);
if (newMethod === 'password') {
setStorage({
...storage,
sftpStorage: {

View File

@@ -176,6 +176,25 @@ export const apiHelper = {
return response.blob();
},
fetchGetBlobWithHeaders: async (
url: string,
requestOptions?: RequestOptions,
isRetryOnError = false,
): Promise<{ blob: Blob; headers: Headers }> => {
const optionsWrapper = (requestOptions ?? new RequestOptions())
.addHeader('Access-Control-Allow-Methods', 'GET')
.addHeader('Authorization', accessTokenHelper.getAccessToken());
const response = await makeRequest(
url,
optionsWrapper,
isRetryOnError ? 0 : REPEAT_TRIES_COUNT,
);
const blob = await response.blob();
return { blob, headers: response.headers };
},
fetchPutJson: async <T>(
url: string,
requestOptions?: RequestOptions,