Compare commits

...

22 Commits

Author SHA1 Message Date
Rostislav Dugin
5d685e0a39 Merge pull request #492 from databasus/develop
FIX (backups): Save metadata file to storage before marking backup as…
2026-04-02 09:48:39 +03:00
Rostislav Dugin
18b8178608 FIX (backups): Save metadata file to storage before marking backup as COMPLETED to fix flaky test race condition 2026-04-02 09:48:05 +03:00
Rostislav Dugin
981d560768 Merge pull request #491 from databasus/develop
Develop
2026-04-02 09:05:03 +03:00
Rostislav Dugin
02d9cda86f FEATURE (storages): Add configurable S3 storage class to allow cheaper storage tiers like ONEZONE_IA 2026-04-02 09:00:13 +03:00
Rostislav Dugin
cefedb6ddd FIX (mariadb): Disable wsrep replication during restore to fix "Maximum writeset size exceeded" on Galera Cluster 2026-04-02 08:46:48 +03:00
Rostislav Dugin
27d891fb34 FIX (docker): Use -k /tmp for PostgreSQL socket directory to fix lock file permission denied on NAS systems 2026-04-02 08:26:07 +03:00
Rostislav Dugin
7123de9fa3 Merge pull request #486 from databasus/develop
FIX (docker): Chown /var/run/postgresql after UID/GID adjustment to f…
2026-03-31 14:25:15 +03:00
Rostislav Dugin
d1c41ed53a FIX (docker): Chown /var/run/postgresql after UID/GID adjustment to fix PostgreSQL lock file permission denied on startup 2026-03-31 14:24:43 +03:00
Rostislav Dugin
c2ddbfc86f Merge pull request #484 from databasus/develop
FEATURE (docker): Add PUID/PGID environment variables to control post…
2026-03-31 11:52:23 +03:00
Rostislav Dugin
f287967b5d FEATURE (docker): Add PUID/PGID environment variables to control postgres user UID/GID for host-level backup compatibility 2026-03-31 11:51:57 +03:00
Rostislav Dugin
ef879df08f Merge pull request #483 from databasus/develop
FIX (backups): Use system's temp directory instead of mounter directo…
2026-03-31 11:41:26 +03:00
Rostislav Dugin
44ddcb836e FIX (backups): Use system's temp directory instead of mounter directory to fix access permissions on TrueNAS 2026-03-31 11:40:11 +03:00
Rostislav Dugin
b5178f5752 Merge pull request #482 from databasus/develop
FEATURE (clipboard): Add parsing from clipboard via dialog in HTTP\no…
2026-03-31 11:21:25 +03:00
Rostislav Dugin
7913c1b474 FEATURE (clipboard): Add parsing from clipboard via dialog in HTTP\no navigator mode 2026-03-31 11:20:13 +03:00
Rostislav Dugin
2815cc3752 Merge pull request #481 from databasus/develop
FIX (storages): Validat only single rclone storage is passed
2026-03-31 10:37:54 +03:00
Rostislav Dugin
189573fa1b FIX (storages): Validat only single rclone storage is passed 2026-03-31 10:37:13 +03:00
Rostislav Dugin
81f77760c9 Merge pull request #479 from databasus/develop
FEATURE (navbar): Update navbar link color
2026-03-30 13:16:07 +03:00
Rostislav Dugin
63e23b2489 FEATURE (navbar): Update navbar link color 2026-03-30 13:15:03 +03:00
Rostislav Dugin
8c1b8ac00f Merge pull request #477 from databasus/develop
Develop
2026-03-29 15:45:15 +03:00
Rostislav Dugin
1926096377 FEATURE (backups): Add filters to backups panel 2026-03-29 15:33:01 +03:00
Rostislav Dugin
0a131511a8 FIX (agent): Fix uploading WAL to storages 2026-03-29 14:35:46 +03:00
Rostislav Dugin
aa01ce0b76 FEATURE (agent): Make installation guide more structured 2026-03-29 14:34:33 +03:00
47 changed files with 1486 additions and 396 deletions

View File

@@ -1566,6 +1566,13 @@ export const ReactComponent = ({ someValue }: Props): JSX.Element => {
- **Calculated values** - Derived data from props/state
- **Return** - JSX markup
### Clipboard operations
Always use `ClipboardHelper` (`shared/lib/ClipboardHelper.ts`) for clipboard operations — never call `navigator.clipboard` directly.
- **Copy:** `ClipboardHelper.copyToClipboard(text)` — uses `navigator.clipboard` with `execCommand('copy')` fallback for non-secure contexts (HTTP).
- **Paste:** Check `ClipboardHelper.isClipboardApiAvailable()` first. If available, use `ClipboardHelper.readFromClipboard()`. If not, show `ClipboardPasteModalComponent` (`shared/ui`) which lets the user paste manually via a text input modal.
---
## Summary

View File

@@ -239,7 +239,8 @@ RUN apt-get update && \
fi
# Create postgres user and set up directories
RUN useradd -m -s /bin/bash postgres || true && \
RUN groupadd -g 999 postgres || true && \
useradd -m -s /bin/bash -u 999 -g 999 postgres || true && \
mkdir -p /databasus-data/pgdata && \
chown -R postgres:postgres /databasus-data/pgdata
@@ -294,6 +295,23 @@ if [ -d "/postgresus-data" ] && [ "\$(ls -A /postgresus-data 2>/dev/null)" ]; th
exit 1
fi
# ========= Adjust postgres user UID/GID =========
PUID=\${PUID:-999}
PGID=\${PGID:-999}
CURRENT_UID=\$(id -u postgres)
CURRENT_GID=\$(id -g postgres)
if [ "\$CURRENT_GID" != "\$PGID" ]; then
echo "Adjusting postgres group GID from \$CURRENT_GID to \$PGID..."
groupmod -o -g "\$PGID" postgres
fi
if [ "\$CURRENT_UID" != "\$PUID" ]; then
echo "Adjusting postgres user UID from \$CURRENT_UID to \$PUID..."
usermod -o -u "\$PUID" postgres
fi
# PostgreSQL 17 binary paths
PG_BIN="/usr/lib/postgresql/17/bin"
@@ -406,7 +424,12 @@ fi
# Function to start PostgreSQL and wait for it to be ready
start_postgres() {
echo "Starting PostgreSQL..."
gosu postgres \$PG_BIN/postgres -D /databasus-data/pgdata -p 5437 &
# -k /tmp: create Unix socket and lock file in /tmp instead of /var/run/postgresql/.
# On NAS systems (e.g. TrueNAS Scale), the ZFS-backed Docker overlay filesystem
# ignores chown/chmod on directories from image layers, so PostgreSQL gets
# "Permission denied" when creating .s.PGSQL.5437.lock in /var/run/postgresql/.
# All internal connections use TCP (-h localhost), so the socket location does not matter.
gosu postgres \$PG_BIN/postgres -D /databasus-data/pgdata -p 5437 -k /tmp &
POSTGRES_PID=\$!
echo "Waiting for PostgreSQL to be ready..."

View File

@@ -280,7 +280,6 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) {
return
}
backup.Status = backups_core.BackupStatusCompleted
backup.BackupDurationMs = time.Since(start).Milliseconds()
// Update backup with encryption metadata if provided
@@ -297,12 +296,6 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) {
backup.Encryption = backupMetadata.Encryption
}
if err := n.backupRepository.Save(backup); err != nil {
n.logger.Error("Failed to save backup", "error", err)
return
}
// Save metadata file to storage
if backupMetadata != nil {
metadataJSON, err := json.Marshal(backupMetadata)
if err != nil {
@@ -335,6 +328,13 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) {
}
}
backup.Status = backups_core.BackupStatusCompleted
if err := n.backupRepository.Save(backup); err != nil {
n.logger.Error("Failed to save backup", "error", err)
return
}
// Update database last backup time
now := time.Now().UTC()
if updateErr := n.databaseService.SetLastBackupTime(databaseID, now); updateErr != nil {

View File

@@ -40,12 +40,15 @@ func (c *BackupController) RegisterPublicRoutes(router *gin.RouterGroup) {
// GetBackups
// @Summary Get backups for a database
// @Description Get paginated backups for the specified database
// @Description Get paginated backups for the specified database with optional filters
// @Tags backups
// @Produce json
// @Param database_id query string true "Database ID"
// @Param limit query int false "Number of items per page" default(10)
// @Param offset query int false "Offset for pagination" default(0)
// @Param status query []string false "Filter by backup status (can be repeated)" Enums(IN_PROGRESS, COMPLETED, FAILED, CANCELED)
// @Param beforeDate query string false "Filter backups created before this date (RFC3339)" format(date-time)
// @Param pgWalBackupType query string false "Filter by WAL backup type" Enums(PG_FULL_BACKUP, PG_WAL_SEGMENT)
// @Success 200 {object} backups_dto.GetBackupsResponse
// @Failure 400
// @Failure 401
@@ -70,7 +73,9 @@ func (c *BackupController) GetBackups(ctx *gin.Context) {
return
}
response, err := c.backupService.GetBackups(user, databaseID, request.Limit, request.Offset)
filters := c.buildBackupFilters(&request)
response, err := c.backupService.GetBackups(user, databaseID, request.Limit, request.Offset, filters)
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
@@ -359,3 +364,35 @@ func (c *BackupController) startDownloadHeartbeat(ctx context.Context, userID uu
}
}
}
func (c *BackupController) buildBackupFilters(
request *backups_dto.GetBackupsRequest,
) *backups_core.BackupFilters {
isHasFilters := len(request.Statuses) > 0 ||
request.BeforeDate != nil ||
request.PgWalBackupType != nil
if !isHasFilters {
return nil
}
filters := &backups_core.BackupFilters{}
if len(request.Statuses) > 0 {
statuses := make([]backups_core.BackupStatus, 0, len(request.Statuses))
for _, statusStr := range request.Statuses {
statuses = append(statuses, backups_core.BackupStatus(statusStr))
}
filters.Statuses = statuses
}
filters.BeforeDate = request.BeforeDate
if request.PgWalBackupType != nil {
walType := backups_core.PgWalBackupType(*request.PgWalBackupType)
filters.PgWalBackupType = &walType
}
return filters
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"databasus-backend/internal/config"
audit_logs "databasus-backend/internal/features/audit_logs"
@@ -140,6 +141,225 @@ func Test_GetBackups_PermissionsEnforced(t *testing.T) {
}
}
func Test_GetBackups_WithStatusFilter_ReturnsFilteredBackups(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
database := createTestDatabase("Test Database", workspace.ID, owner.Token, router)
storage := createTestStorage(workspace.ID)
defer func() {
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
now := time.Now().UTC()
CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
Status: backups_core.BackupStatusCompleted,
CreatedAt: now.Add(-3 * time.Hour),
})
CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
Status: backups_core.BackupStatusFailed,
CreatedAt: now.Add(-2 * time.Hour),
})
CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
Status: backups_core.BackupStatusCanceled,
CreatedAt: now.Add(-1 * time.Hour),
})
// Single status filter
var singleResponse backups_dto.GetBackupsResponse
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf("/api/v1/backups?database_id=%s&status=COMPLETED", database.ID.String()),
"Bearer "+owner.Token,
http.StatusOK,
&singleResponse,
)
assert.Equal(t, int64(1), singleResponse.Total)
assert.Len(t, singleResponse.Backups, 1)
assert.Equal(t, backups_core.BackupStatusCompleted, singleResponse.Backups[0].Status)
// Multiple status filter
var multiResponse backups_dto.GetBackupsResponse
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf(
"/api/v1/backups?database_id=%s&status=COMPLETED&status=FAILED",
database.ID.String(),
),
"Bearer "+owner.Token,
http.StatusOK,
&multiResponse,
)
assert.Equal(t, int64(2), multiResponse.Total)
assert.Len(t, multiResponse.Backups, 2)
for _, backup := range multiResponse.Backups {
assert.True(
t,
backup.Status == backups_core.BackupStatusCompleted ||
backup.Status == backups_core.BackupStatusFailed,
"expected COMPLETED or FAILED, got %s", backup.Status,
)
}
}
func Test_GetBackups_WithBeforeDateFilter_ReturnsFilteredBackups(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
database := createTestDatabase("Test Database", workspace.ID, owner.Token, router)
storage := createTestStorage(workspace.ID)
defer func() {
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
now := time.Now().UTC()
cutoff := now.Add(-1 * time.Hour)
olderBackup := CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
Status: backups_core.BackupStatusCompleted,
CreatedAt: now.Add(-3 * time.Hour),
})
CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
Status: backups_core.BackupStatusCompleted,
CreatedAt: now,
})
var response backups_dto.GetBackupsResponse
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf(
"/api/v1/backups?database_id=%s&beforeDate=%s",
database.ID.String(),
cutoff.Format(time.RFC3339),
),
"Bearer "+owner.Token,
http.StatusOK,
&response,
)
assert.Equal(t, int64(1), response.Total)
assert.Len(t, response.Backups, 1)
assert.Equal(t, olderBackup.ID, response.Backups[0].ID)
}
func Test_GetBackups_WithPgWalBackupTypeFilter_ReturnsFilteredBackups(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
database := createTestDatabase("Test Database", workspace.ID, owner.Token, router)
storage := createTestStorage(workspace.ID)
defer func() {
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
now := time.Now().UTC()
fullBackupType := backups_core.PgWalBackupTypeFullBackup
walSegmentType := backups_core.PgWalBackupTypeWalSegment
fullBackup := CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
Status: backups_core.BackupStatusCompleted,
CreatedAt: now.Add(-2 * time.Hour),
PgWalBackupType: &fullBackupType,
})
CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
Status: backups_core.BackupStatusCompleted,
CreatedAt: now.Add(-1 * time.Hour),
PgWalBackupType: &walSegmentType,
})
var response backups_dto.GetBackupsResponse
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf(
"/api/v1/backups?database_id=%s&pgWalBackupType=PG_FULL_BACKUP",
database.ID.String(),
),
"Bearer "+owner.Token,
http.StatusOK,
&response,
)
assert.Equal(t, int64(1), response.Total)
assert.Len(t, response.Backups, 1)
assert.Equal(t, fullBackup.ID, response.Backups[0].ID)
}
func Test_GetBackups_WithCombinedFilters_ReturnsFilteredBackups(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
database := createTestDatabase("Test Database", workspace.ID, owner.Token, router)
storage := createTestStorage(workspace.ID)
defer func() {
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
now := time.Now().UTC()
cutoff := now.Add(-1 * time.Hour)
// Old completed — should match
oldCompleted := CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
Status: backups_core.BackupStatusCompleted,
CreatedAt: now.Add(-3 * time.Hour),
})
// Old failed — should NOT match (wrong status)
CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
Status: backups_core.BackupStatusFailed,
CreatedAt: now.Add(-2 * time.Hour),
})
// New completed — should NOT match (too recent)
CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
Status: backups_core.BackupStatusCompleted,
CreatedAt: now,
})
var response backups_dto.GetBackupsResponse
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf(
"/api/v1/backups?database_id=%s&status=COMPLETED&beforeDate=%s",
database.ID.String(),
cutoff.Format(time.RFC3339),
),
"Bearer "+owner.Token,
http.StatusOK,
&response,
)
assert.Equal(t, int64(1), response.Total)
assert.Len(t, response.Backups, 1)
assert.Equal(t, oldCompleted.ID, response.Backups[0].ID)
}
func Test_CreateBackup_PermissionsEnforced(t *testing.T) {
tests := []struct {
name string
@@ -376,7 +596,7 @@ func Test_DeleteBackup_PermissionsEnforced(t *testing.T) {
ownerUser, err := userService.GetUserFromToken(owner.Token)
assert.NoError(t, err)
response, err := backups_services.GetBackupService().GetBackups(ownerUser, database.ID, 10, 0)
response, err := backups_services.GetBackupService().GetBackups(ownerUser, database.ID, 10, 0, nil)
assert.NoError(t, err)
assert.Equal(t, 0, len(response.Backups))
}
@@ -1297,14 +1517,14 @@ func Test_MakeBackup_VerifyBackupAndMetadataFilesExistInStorage(t *testing.T) {
encryptor := encryption.GetFieldEncryptor()
backupFile, err := backupStorage.GetFile(encryptor, backup.FileName)
assert.NoError(t, err)
require.NoError(t, err)
backupFile.Close()
metadataFile, err := backupStorage.GetFile(encryptor, backup.FileName+".metadata")
assert.NoError(t, err)
require.NoError(t, err)
metadataContent, err := io.ReadAll(metadataFile)
assert.NoError(t, err)
require.NoError(t, err)
metadataFile.Close()
var storageMetadata backups_common.BackupMetadata

View File

@@ -95,3 +95,33 @@ func CreateTestBackup(databaseID, storageID uuid.UUID) *backups_core.Backup {
return backup
}
type TestBackupOptions struct {
Status backups_core.BackupStatus
CreatedAt time.Time
PgWalBackupType *backups_core.PgWalBackupType
}
// CreateTestBackupWithOptions creates a test backup with custom status, time, and WAL type
func CreateTestBackupWithOptions(
databaseID, storageID uuid.UUID,
opts TestBackupOptions,
) *backups_core.Backup {
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: databaseID,
StorageID: storageID,
Status: opts.Status,
BackupSizeMb: 10.5,
BackupDurationMs: 1000,
PgWalBackupType: opts.PgWalBackupType,
CreatedAt: opts.CreatedAt,
}
repo := &backups_core.BackupRepository{}
if err := repo.Save(backup); err != nil {
panic(err)
}
return backup
}

View File

@@ -0,0 +1,9 @@
package backups_core
import "time"
type BackupFilters struct {
Statuses []BackupStatus
BeforeDate *time.Time
PgWalBackupType *PgWalBackupType
}

View File

@@ -422,3 +422,67 @@ func (r *BackupRepository) FindLastWalSegmentAfter(
return &backup, nil
}
func (r *BackupRepository) FindByDatabaseIDWithFiltersAndPagination(
databaseID uuid.UUID,
filters *BackupFilters,
limit, offset int,
) ([]*Backup, error) {
var backups []*Backup
query := storage.
GetDb().
Where("database_id = ?", databaseID)
if filters != nil {
query = filters.applyToQuery(query)
}
if err := query.
Order("created_at DESC").
Limit(limit).
Offset(offset).
Find(&backups).Error; err != nil {
return nil, err
}
return backups, nil
}
func (r *BackupRepository) CountByDatabaseIDWithFilters(
databaseID uuid.UUID,
filters *BackupFilters,
) (int64, error) {
var count int64
query := storage.
GetDb().
Model(&Backup{}).
Where("database_id = ?", databaseID)
if filters != nil {
query = filters.applyToQuery(query)
}
if err := query.Count(&count).Error; err != nil {
return 0, err
}
return count, nil
}
func (f *BackupFilters) applyToQuery(query *gorm.DB) *gorm.DB {
if len(f.Statuses) > 0 {
query = query.Where("status IN ?", f.Statuses)
}
if f.BeforeDate != nil {
query = query.Where("created_at < ?", *f.BeforeDate)
}
if f.PgWalBackupType != nil {
query = query.Where("pg_wal_backup_type = ?", *f.PgWalBackupType)
}
return query
}

View File

@@ -11,9 +11,12 @@ import (
)
type GetBackupsRequest struct {
DatabaseID string `form:"database_id" binding:"required"`
Limit int `form:"limit"`
Offset int `form:"offset"`
DatabaseID string `form:"database_id" binding:"required"`
Limit int `form:"limit"`
Offset int `form:"offset"`
Statuses []string `form:"status"`
BeforeDate *time.Time `form:"beforeDate"`
PgWalBackupType *string `form:"pgWalBackupType"`
}
type GetBackupsResponse struct {

View File

@@ -109,6 +109,7 @@ func (s *BackupService) GetBackups(
user *users_models.User,
databaseID uuid.UUID,
limit, offset int,
filters *backups_core.BackupFilters,
) (*backups_dto.GetBackupsResponse, error) {
database, err := s.databaseService.GetDatabaseByID(databaseID)
if err != nil {
@@ -134,12 +135,14 @@ func (s *BackupService) GetBackups(
offset = 0
}
backups, err := s.backupRepository.FindByDatabaseIDWithPagination(databaseID, limit, offset)
backups, err := s.backupRepository.FindByDatabaseIDWithFiltersAndPagination(
databaseID, filters, limit, offset,
)
if err != nil {
return nil, err
}
total, err := s.backupRepository.CountByDatabaseID(databaseID)
total, err := s.backupRepository.CountByDatabaseIDWithFilters(databaseID, filters)
if err != nil {
return nil, err
}

View File

@@ -281,15 +281,9 @@ func (uc *CreateMariadbBackupUsecase) createTempMyCnfFile(
mdbConfig *mariadbtypes.MariadbDatabase,
password string,
) (string, error) {
tempFolder := config.GetEnv().TempFolder
if err := os.MkdirAll(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to ensure temp folder exists: %w", err)
}
if err := os.Chmod(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to set temp folder permissions: %w", err)
}
tempDir, err := os.MkdirTemp(tempFolder, "mycnf_"+uuid.New().String())
// Credential files use OS temp dir (/tmp) because some filesystems
// (e.g. ZFS on TrueNAS) ignore chmod, causing "group or world access" errors.
tempDir, err := os.MkdirTemp(os.TempDir(), "mycnf_"+uuid.New().String())
if err != nil {
return "", fmt.Errorf("failed to create temp directory: %w", err)
}

View File

@@ -300,15 +300,9 @@ func (uc *CreateMysqlBackupUsecase) createTempMyCnfFile(
myConfig *mysqltypes.MysqlDatabase,
password string,
) (string, error) {
tempFolder := config.GetEnv().TempFolder
if err := os.MkdirAll(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to ensure temp folder exists: %w", err)
}
if err := os.Chmod(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to set temp folder permissions: %w", err)
}
tempDir, err := os.MkdirTemp(tempFolder, "mycnf_"+uuid.New().String())
// Credential files use OS temp dir (/tmp) because some filesystems
// (e.g. ZFS on TrueNAS) ignore chmod, causing "group or world access" errors.
tempDir, err := os.MkdirTemp(os.TempDir(), "mycnf_"+uuid.New().String())
if err != nil {
return "", fmt.Errorf("failed to create temp directory: %w", err)
}

View File

@@ -747,15 +747,9 @@ func (uc *CreatePostgresqlBackupUsecase) createTempPgpassFile(
escapedPassword,
)
tempFolder := config.GetEnv().TempFolder
if err := os.MkdirAll(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to ensure temp folder exists: %w", err)
}
if err := os.Chmod(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to set temp folder permissions: %w", err)
}
tempDir, err := os.MkdirTemp(tempFolder, "pgpass_"+uuid.New().String())
// Credential files use OS temp dir (/tmp) because some filesystems
// (e.g. ZFS on TrueNAS) ignore chmod, causing "group or world access" errors.
tempDir, err := os.MkdirTemp(os.TempDir(), "pgpass_"+uuid.New().String())
if err != nil {
return "", fmt.Errorf("failed to create temporary directory: %w", err)
}

View File

@@ -62,6 +62,14 @@ func (r *BackupConfigRepository) FindByDatabaseID(databaseID uuid.UUID) (*Backup
GetDb().
Preload("BackupInterval").
Preload("Storage").
Preload("Storage.LocalStorage").
Preload("Storage.S3Storage").
Preload("Storage.GoogleDriveStorage").
Preload("Storage.NASStorage").
Preload("Storage.AzureBlobStorage").
Preload("Storage.FTPStorage").
Preload("Storage.SFTPStorage").
Preload("Storage.RcloneStorage").
Where("database_id = ?", databaseID).
First(&backupConfig).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
@@ -81,6 +89,14 @@ func (r *BackupConfigRepository) GetWithEnabledBackups() ([]*BackupConfig, error
GetDb().
Preload("BackupInterval").
Preload("Storage").
Preload("Storage.LocalStorage").
Preload("Storage.S3Storage").
Preload("Storage.GoogleDriveStorage").
Preload("Storage.NASStorage").
Preload("Storage.AzureBlobStorage").
Preload("Storage.FTPStorage").
Preload("Storage.SFTPStorage").
Preload("Storage.RcloneStorage").
Where("is_backups_enabled = ?", true).
Find(&backupConfigs).Error; err != nil {
return nil, err

View File

@@ -1 +0,0 @@
package secrets

View File

@@ -70,6 +70,14 @@ func (uc *RestoreMariadbBackupUsecase) Execute(
"--verbose",
}
// Disable Galera Cluster replication for the restore session to prevent
// "Maximum writeset size exceeded" errors on large restores.
// wsrep_on is available in MariaDB 10.1+ (all builds with Galera support).
// On non-Galera instances the variable still exists but is a no-op.
if mdb.Version != tools.MariadbVersion55 {
args = append(args, "--init-command=SET SESSION wsrep_on=OFF")
}
if !config.GetEnv().IsCloud {
args = append(args, "--max-allowed-packet=1G")
}
@@ -287,15 +295,9 @@ func (uc *RestoreMariadbBackupUsecase) createTempMyCnfFile(
mdbConfig *mariadbtypes.MariadbDatabase,
password string,
) (string, error) {
tempFolder := config.GetEnv().TempFolder
if err := os.MkdirAll(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to ensure temp folder exists: %w", err)
}
if err := os.Chmod(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to set temp folder permissions: %w", err)
}
tempDir, err := os.MkdirTemp(tempFolder, "mycnf_"+uuid.New().String())
// Credential files use OS temp dir (/tmp) because some filesystems
// (e.g. ZFS on TrueNAS) ignore chmod, causing "group or world access" errors.
tempDir, err := os.MkdirTemp(os.TempDir(), "mycnf_"+uuid.New().String())
if err != nil {
return "", fmt.Errorf("failed to create temp directory: %w", err)
}
@@ -385,6 +387,13 @@ func (uc *RestoreMariadbBackupUsecase) handleMariadbRestoreError(
)
}
if containsIgnoreCase(stderrStr, "writeset size exceeded") {
return fmt.Errorf(
"MariaDB Galera Cluster writeset size limit exceeded. Try increasing wsrep_max_ws_size on your cluster nodes. stderr: %s",
stderrStr,
)
}
return errors.New(errorMsg)
}

View File

@@ -278,15 +278,9 @@ func (uc *RestoreMysqlBackupUsecase) createTempMyCnfFile(
myConfig *mysqltypes.MysqlDatabase,
password string,
) (string, error) {
tempFolder := config.GetEnv().TempFolder
if err := os.MkdirAll(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to ensure temp folder exists: %w", err)
}
if err := os.Chmod(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to set temp folder permissions: %w", err)
}
tempDir, err := os.MkdirTemp(tempFolder, "mycnf_"+uuid.New().String())
// Credential files use OS temp dir (/tmp) because some filesystems
// (e.g. ZFS on TrueNAS) ignore chmod, causing "group or world access" errors.
tempDir, err := os.MkdirTemp(os.TempDir(), "mycnf_"+uuid.New().String())
if err != nil {
return "", fmt.Errorf("failed to create temp directory: %w", err)
}

View File

@@ -995,15 +995,9 @@ func (uc *RestorePostgresqlBackupUsecase) createTempPgpassFile(
escapedPassword,
)
tempFolder := config.GetEnv().TempFolder
if err := os.MkdirAll(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to ensure temp folder exists: %w", err)
}
if err := os.Chmod(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to set temp folder permissions: %w", err)
}
tempDir, err := os.MkdirTemp(tempFolder, "pgpass_"+uuid.New().String())
// Credential files use OS temp dir (/tmp) because some filesystems
// (e.g. ZFS on TrueNAS) ignore chmod, causing "group or world access" errors.
tempDir, err := os.MkdirTemp(os.TempDir(), "pgpass_"+uuid.New().String())
if err != nil {
return "", fmt.Errorf("failed to create temporary directory: %w", err)
}

View File

@@ -110,6 +110,18 @@ func Test_Storage_BasicOperations(t *testing.T) {
S3Endpoint: "http://" + s3Container.endpoint,
},
},
{
name: "S3Storage_WithStorageClass",
storage: &s3_storage.S3Storage{
StorageID: uuid.New(),
S3Bucket: s3Container.bucketName,
S3Region: s3Container.region,
S3AccessKey: s3Container.accessKey,
S3SecretKey: s3Container.secretKey,
S3Endpoint: "http://" + s3Container.endpoint,
S3StorageClass: s3_storage.S3StorageClassStandard,
},
},
{
name: "NASStorage",
storage: &nas_storage.NASStorage{

View File

@@ -144,6 +144,27 @@ func (r *RcloneStorage) Validate(encryptor encryption.FieldEncryptor) error {
return errors.New("rclone config content is required")
}
configContent, err := encryptor.Decrypt(r.StorageID, r.ConfigContent)
if err != nil {
return fmt.Errorf("failed to decrypt rclone config content: %w", err)
}
parsedConfig, err := parseConfigContent(configContent)
if err != nil {
return fmt.Errorf("failed to parse rclone config: %w", err)
}
if len(parsedConfig) == 0 {
return errors.New("rclone config must contain at least one remote section")
}
if len(parsedConfig) > 1 {
return fmt.Errorf(
"rclone config must contain exactly one remote section, but found %d; create a separate storage for each remote",
len(parsedConfig),
)
}
return nil
}
@@ -230,6 +251,13 @@ func (r *RcloneStorage) getFs(
return nil, errors.New("rclone config must contain at least one remote section")
}
if len(parsedConfig) > 1 {
return nil, fmt.Errorf(
"rclone config must contain exactly one remote section, but found %d; create a separate storage for each remote",
len(parsedConfig),
)
}
var remoteName string
for section, values := range parsedConfig {
remoteName = section

View File

@@ -0,0 +1,118 @@
package rclone_storage
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_ParseConfigContent_SingleRemote_ParsedCorrectly(t *testing.T) {
content := `[myremote]
type = s3
provider = AWS
access_key_id = AKIAIOSFODNN7EXAMPLE
secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
region = us-east-1`
sections, err := parseConfigContent(content)
require.NoError(t, err)
require.Len(t, sections, 1)
assert.Equal(t, "s3", sections["myremote"]["type"])
assert.Equal(t, "AWS", sections["myremote"]["provider"])
assert.Equal(t, "AKIAIOSFODNN7EXAMPLE", sections["myremote"]["access_key_id"])
assert.Equal(t, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", sections["myremote"]["secret_access_key"])
assert.Equal(t, "us-east-1", sections["myremote"]["region"])
}
func Test_ParseConfigContent_MultipleRemotes_AllParsed(t *testing.T) {
content := `[remote1]
type = s3
region = us-east-1
[remote2]
type = drive
client_id = abc123`
sections, err := parseConfigContent(content)
require.NoError(t, err)
assert.Len(t, sections, 2)
assert.Equal(t, "s3", sections["remote1"]["type"])
assert.Equal(t, "us-east-1", sections["remote1"]["region"])
assert.Equal(t, "drive", sections["remote2"]["type"])
assert.Equal(t, "abc123", sections["remote2"]["client_id"])
}
func Test_ParseConfigContent_EmptyContent_ReturnsEmptyMap(t *testing.T) {
sections, err := parseConfigContent("")
require.NoError(t, err)
assert.Empty(t, sections)
}
func Test_ParseConfigContent_CommentsAndBlankLines_Ignored(t *testing.T) {
content := `# This is a comment
; Another comment
[myremote]
type = s3
# inline comment line
region = eu-west-1`
sections, err := parseConfigContent(content)
require.NoError(t, err)
require.Len(t, sections, 1)
assert.Equal(t, "s3", sections["myremote"]["type"])
assert.Equal(t, "eu-west-1", sections["myremote"]["region"])
}
func Test_ParseConfigContent_ValueWithEqualsSign_PreservesFullValue(t *testing.T) {
content := `[myremote]
type = s3
secret_access_key = abc=def=ghi`
sections, err := parseConfigContent(content)
require.NoError(t, err)
assert.Equal(t, "abc=def=ghi", sections["myremote"]["secret_access_key"])
}
func Test_ParseConfigContent_KeyWithoutValue_EmptyString(t *testing.T) {
content := `[myremote]
type =
provider = AWS`
sections, err := parseConfigContent(content)
require.NoError(t, err)
assert.Equal(t, "", sections["myremote"]["type"])
assert.Equal(t, "AWS", sections["myremote"]["provider"])
}
func Test_ParseConfigContent_KeyValueOutsideSection_Ignored(t *testing.T) {
content := `orphan_key = orphan_value
[myremote]
type = s3`
sections, err := parseConfigContent(content)
require.NoError(t, err)
assert.Len(t, sections, 1)
assert.Equal(t, "s3", sections["myremote"]["type"])
}
func Test_ParseConfigContent_WhitespaceAroundKeysAndValues_Trimmed(t *testing.T) {
content := `[myremote]
type = s3
region = us-west-2 `
sections, err := parseConfigContent(content)
require.NoError(t, err)
assert.Equal(t, "s3", sections["myremote"]["type"])
assert.Equal(t, "us-west-2", sections["myremote"]["region"])
}

View File

@@ -0,0 +1,13 @@
package s3_storage
type S3StorageClass string
const (
S3StorageClassDefault S3StorageClass = ""
S3StorageClassStandard S3StorageClass = "STANDARD"
S3StorageClassStandardIA S3StorageClass = "STANDARD_IA"
S3StorageClassOnezoneIA S3StorageClass = "ONEZONE_IA"
S3StorageClassIntelligentTiering S3StorageClass = "INTELLIGENT_TIERING"
S3StorageClassReducedRedundancy S3StorageClass = "REDUCED_REDUNDANCY"
S3StorageClassGlacierIR S3StorageClass = "GLACIER_IR"
)

View File

@@ -43,9 +43,10 @@ type S3Storage struct {
S3SecretKey string `json:"s3SecretKey" gorm:"not null;type:text;column:s3_secret_key"`
S3Endpoint string `json:"s3Endpoint" gorm:"type:text;column:s3_endpoint"`
S3Prefix string `json:"s3Prefix" gorm:"type:text;column:s3_prefix"`
S3UseVirtualHostedStyle bool `json:"s3UseVirtualHostedStyle" gorm:"default:false;column:s3_use_virtual_hosted_style"`
SkipTLSVerify bool `json:"skipTLSVerify" gorm:"default:false;column:skip_tls_verify"`
S3Prefix string `json:"s3Prefix" gorm:"type:text;column:s3_prefix"`
S3UseVirtualHostedStyle bool `json:"s3UseVirtualHostedStyle" gorm:"default:false;column:s3_use_virtual_hosted_style"`
SkipTLSVerify bool `json:"skipTLSVerify" gorm:"default:false;column:skip_tls_verify"`
S3StorageClass S3StorageClass `json:"s3StorageClass" gorm:"type:text;column:s3_storage_class;default:''"`
}
func (s *S3Storage) TableName() string {
@@ -76,7 +77,7 @@ func (s *S3Storage) SaveFile(
ctx,
s.S3Bucket,
objectKey,
minio.PutObjectOptions{},
s.putObjectOptions(),
)
if err != nil {
return fmt.Errorf("failed to initiate multipart upload: %w", err)
@@ -151,15 +152,16 @@ func (s *S3Storage) SaveFile(
if err != nil {
return err
}
opts := s.putObjectOptions()
opts.SendContentMd5 = true
_, err = client.PutObject(
ctx,
s.S3Bucket,
objectKey,
bytes.NewReader([]byte{}),
0,
minio.PutObjectOptions{
SendContentMd5: true,
},
opts,
)
if err != nil {
return fmt.Errorf("failed to upload empty file: %w", err)
@@ -173,7 +175,7 @@ func (s *S3Storage) SaveFile(
objectKey,
uploadID,
parts,
minio.PutObjectOptions{},
s.putObjectOptions(),
)
if err != nil {
_ = coreClient.AbortMultipartUpload(ctx, s.S3Bucket, objectKey, uploadID)
@@ -350,6 +352,7 @@ func (s *S3Storage) Update(incoming *S3Storage) {
s.S3Endpoint = incoming.S3Endpoint
s.S3UseVirtualHostedStyle = incoming.S3UseVirtualHostedStyle
s.SkipTLSVerify = incoming.SkipTLSVerify
s.S3StorageClass = incoming.S3StorageClass
if incoming.S3AccessKey != "" {
s.S3AccessKey = incoming.S3AccessKey
@@ -363,6 +366,12 @@ func (s *S3Storage) Update(incoming *S3Storage) {
// otherwise we will have to transfer all the data to the new prefix
}
func (s *S3Storage) putObjectOptions() minio.PutObjectOptions {
return minio.PutObjectOptions{
StorageClass: string(s.S3StorageClass),
}
}
func (s *S3Storage) buildObjectKey(fileName string) string {
if s.S3Prefix == "" {
return fileName

View File

@@ -23,7 +23,7 @@ func (c *VersionController) RegisterRoutes(router *gin.RouterGroup) {
func (c *VersionController) GetVersion(ctx *gin.Context) {
version := os.Getenv("APP_VERSION")
if version == "" {
version = "dev"
version = "3.26.0"
}
ctx.JSON(http.StatusOK, VersionResponse{Version: version})

View File

@@ -0,0 +1,11 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE s3_storages
ADD COLUMN s3_storage_class TEXT NOT NULL DEFAULT '';
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE s3_storages
DROP COLUMN s3_storage_class;
-- +goose StatementEnd

View File

@@ -3,12 +3,30 @@ import RequestOptions from '../../../shared/api/RequestOptions';
import { apiHelper } from '../../../shared/api/apiHelper';
import type { GetBackupsResponse } from '../model/GetBackupsResponse';
export interface BackupsFilters {
statuses?: string[];
beforeDate?: string;
pgWalBackupType?: string;
}
export const backupsApi = {
async getBackups(databaseId: string, limit?: number, offset?: number) {
async getBackups(databaseId: string, limit?: number, offset?: number, filters?: BackupsFilters) {
const params = new URLSearchParams({ database_id: databaseId });
if (limit !== undefined) params.append('limit', limit.toString());
if (offset !== undefined) params.append('offset', offset.toString());
if (filters?.statuses) {
for (const status of filters.statuses) {
params.append('status', status);
}
}
if (filters?.beforeDate) {
params.append('beforeDate', filters.beforeDate);
}
if (filters?.pgWalBackupType) {
params.append('pgWalBackupType', filters.pgWalBackupType);
}
return apiHelper.fetchGetJson<GetBackupsResponse>(
`${getApplicationServer()}/api/v1/backups?${params.toString()}`,
undefined,

View File

@@ -3,6 +3,7 @@ export { type Storage } from './models/Storage';
export { StorageType } from './models/StorageType';
export { type LocalStorage } from './models/LocalStorage';
export { type S3Storage } from './models/S3Storage';
export { S3StorageClass, S3StorageClassLabels } from './models/S3StorageClass';
export { type NASStorage } from './models/NASStorage';
export { getStorageLogoFromType } from './models/getStorageLogoFromType';
export { getStorageNameFromType } from './models/getStorageNameFromType';

View File

@@ -7,4 +7,5 @@ export interface S3Storage {
s3Prefix?: string;
s3UseVirtualHostedStyle?: boolean;
skipTLSVerify?: boolean;
s3StorageClass?: string;
}

View File

@@ -0,0 +1,19 @@
export enum S3StorageClass {
DEFAULT = '',
STANDARD = 'STANDARD',
STANDARD_IA = 'STANDARD_IA',
ONEZONE_IA = 'ONEZONE_IA',
INTELLIGENT_TIERING = 'INTELLIGENT_TIERING',
REDUCED_REDUNDANCY = 'REDUCED_REDUNDANCY',
GLACIER_IR = 'GLACIER_IR',
}
export const S3StorageClassLabels: Record<S3StorageClass, string> = {
[S3StorageClass.DEFAULT]: 'Default (Standard)',
[S3StorageClass.STANDARD]: 'Standard',
[S3StorageClass.STANDARD_IA]: 'Standard - Infrequent Access',
[S3StorageClass.ONEZONE_IA]: 'One Zone - Infrequent Access',
[S3StorageClass.INTELLIGENT_TIERING]: 'Intelligent Tiering',
[S3StorageClass.REDUCED_REDUNDANCY]: 'Reduced Redundancy',
[S3StorageClass.GLACIER_IR]: 'Glacier Instant Retrieval',
};

View File

@@ -6,6 +6,7 @@ import { useState } from 'react';
import { getApplicationServer } from '../../../constants';
import { type Backup, PgWalBackupType } from '../../../entity/backups';
import { type Database } from '../../../entity/databases';
import { ClipboardHelper } from '../../../shared/lib/ClipboardHelper';
import { getUserTimeFormat } from '../../../shared/time';
interface Props {
@@ -26,7 +27,7 @@ export const AgentRestoreComponent = ({ database, backup }: Props) => {
const copyToClipboard = async (text: string) => {
try {
await navigator.clipboard.writeText(text);
await ClipboardHelper.copyToClipboard(text);
message.success('Copied to clipboard');
} catch {
message.error('Failed to copy');

View File

@@ -5,6 +5,8 @@ import {
DeleteOutlined,
DownloadOutlined,
ExclamationCircleOutlined,
FilterFilled,
FilterOutlined,
InfoCircleOutlined,
LockOutlined,
SyncOutlined,
@@ -24,12 +26,14 @@ import {
backupConfigApi,
backupsApi,
} from '../../../entity/backups';
import type { BackupsFilters } from '../../../entity/backups/api/backupsApi';
import { type Database, DatabaseType, PostgresBackupType } from '../../../entity/databases';
import { getUserTimeFormat } from '../../../shared/time';
import { ConfirmationComponent } from '../../../shared/ui';
import { RestoresComponent } from '../../restores';
import { AgentRestoreComponent } from './AgentRestoreComponent';
import { BackupsBillingBannerComponent } from './BackupsBillingBannerComponent';
import { BackupsFiltersPanelComponent } from './BackupsFiltersPanelComponent';
const BACKUPS_PAGE_SIZE = 50;
@@ -74,6 +78,9 @@ export const BackupsComponent = ({
const [downloadingBackupId, setDownloadingBackupId] = useState<string | undefined>();
const [cancellingBackupId, setCancellingBackupId] = useState<string | undefined>();
const [isFilterPanelVisible, setIsFilterPanelVisible] = useState(false);
const [filters, setFilters] = useState<BackupsFilters>({});
const downloadBackup = async (backupId: string) => {
try {
await backupsApi.downloadBackup(backupId);
@@ -84,7 +91,7 @@ export const BackupsComponent = ({
}
};
const loadBackups = async (limit?: number) => {
const loadBackups = async (limit?: number, filtersOverride?: BackupsFilters) => {
if (isBackupsRequestInFlightRef.current) return;
isBackupsRequestInFlightRef.current = true;
@@ -92,9 +99,10 @@ export const BackupsComponent = ({
lastRequestTimeRef.current = requestTime;
const loadLimit = limit ?? currentLimit;
const activeFilters = filtersOverride ?? filters;
try {
const response = await backupsApi.getBackups(database.id, loadLimit, 0);
const response = await backupsApi.getBackups(database.id, loadLimit, 0, activeFilters);
if (lastRequestTimeRef.current !== requestTime) return;
@@ -124,7 +132,7 @@ export const BackupsComponent = ({
lastRequestTimeRef.current = requestTime;
try {
const response = await backupsApi.getBackups(database.id, newLimit, 0);
const response = await backupsApi.getBackups(database.id, newLimit, 0, filters);
if (lastRequestTimeRef.current !== requestTime) return;
@@ -206,13 +214,20 @@ export const BackupsComponent = ({
return () => {};
}, [database]);
useEffect(() => {
setCurrentLimit(BACKUPS_PAGE_SIZE);
setHasMore(true);
setIsBackupsLoading(true);
loadBackups(BACKUPS_PAGE_SIZE, filters).then(() => setIsBackupsLoading(false));
}, [filters]);
useEffect(() => {
const intervalId = setInterval(() => {
loadBackups();
}, 1_000);
return () => clearInterval(intervalId);
}, [currentLimit]);
}, [currentLimit, filters]);
useEffect(() => {
if (downloadingBackupId) {
@@ -432,29 +447,6 @@ export const BackupsComponent = ({
dataIndex: 'status',
key: 'status',
render: (status: BackupStatus, record: Backup) => renderStatus(status, record),
filters: [
{
value: BackupStatus.IN_PROGRESS,
text: 'In progress',
},
{
value: BackupStatus.FAILED,
text: 'Failed',
},
{
value: BackupStatus.COMPLETED,
text: 'Successful',
},
{
value: BackupStatus.DELETED,
text: 'Deleted',
},
{
value: BackupStatus.CANCELED,
text: 'Canceled',
},
],
onFilter: (value, record) => record.status === value,
},
{
title: (
@@ -502,6 +494,11 @@ export const BackupsComponent = ({
},
];
const isAnyFilterApplied =
(filters.statuses && filters.statuses.length > 0) ||
filters.beforeDate !== undefined ||
filters.pgWalBackupType !== undefined;
if (isBackupConfigLoading) {
return (
<div className="mb-5 flex items-center">
@@ -514,7 +511,35 @@ export const BackupsComponent = ({
<div
className={`w-full bg-white p-3 shadow md:p-5 dark:bg-gray-800 ${isDirectlyUnderTab ? 'rounded-tr-md rounded-br-md rounded-bl-md' : 'rounded-md'}`}
>
<h2 className="text-lg font-bold md:text-xl dark:text-white">Backups</h2>
<div className="flex items-center gap-2">
<h2 className="text-lg font-bold md:text-xl dark:text-white">Backups</h2>
<div className="relative">
{isFilterPanelVisible ? (
<FilterFilled
className="cursor-pointer text-blue-600"
onClick={() => setIsFilterPanelVisible(false)}
/>
) : (
<FilterOutlined
className="cursor-pointer"
onClick={() => setIsFilterPanelVisible(true)}
/>
)}
{!isFilterPanelVisible && isAnyFilterApplied && (
<span className="absolute -top-1 -right-1 h-2 w-2 rounded-full bg-blue-600" />
)}
</div>
</div>
{isFilterPanelVisible && (
<div className="mt-3">
<BackupsFiltersPanelComponent
filters={filters}
onFiltersChange={setFilters}
isWalDatabase={database.postgresql?.backupType === PostgresBackupType.WAL_V1}
/>
</div>
)}
{IS_CLOUD && (
<BackupsBillingBannerComponent

View File

@@ -0,0 +1,92 @@
import { DatePicker, Select } from 'antd';
import type { Dayjs } from 'dayjs';
import dayjs from 'dayjs';
import { BackupStatus, PgWalBackupType } from '../../../entity/backups';
import type { BackupsFilters } from '../../../entity/backups/api/backupsApi';
interface Props {
filters: BackupsFilters;
onFiltersChange: (filters: BackupsFilters) => void;
isWalDatabase: boolean;
}
const statusOptions = [
{ label: 'In progress', value: BackupStatus.IN_PROGRESS },
{ label: 'Successful', value: BackupStatus.COMPLETED },
{ label: 'Failed', value: BackupStatus.FAILED },
{ label: 'Canceled', value: BackupStatus.CANCELED },
];
const pgWalBackupTypeOptions = [
{ label: 'Full backup', value: PgWalBackupType.PG_FULL_BACKUP },
{ label: 'WAL segment', value: PgWalBackupType.PG_WAL_SEGMENT },
];
export const BackupsFiltersPanelComponent = ({
filters,
onFiltersChange,
isWalDatabase,
}: Props) => {
const handleStatusChange = (statuses: string[]) => {
onFiltersChange({ ...filters, statuses: statuses.length > 0 ? statuses : undefined });
};
const handleBeforeDateChange = (date: Dayjs | null) => {
onFiltersChange({
...filters,
beforeDate: date ? date.toISOString() : undefined,
});
};
const handlePgWalBackupTypeChange = (value: string | undefined) => {
onFiltersChange({ ...filters, pgWalBackupType: value });
};
return (
<div className="flex flex-col gap-3">
<div className="flex items-center gap-2">
<span className="min-w-[90px] text-sm text-gray-500 dark:text-gray-400">Status</span>
<Select
mode="multiple"
value={filters.statuses ?? []}
onChange={handleStatusChange}
options={statusOptions}
placeholder="All statuses"
size="small"
variant="filled"
className="w-[200px] [&_.ant-select-selector]:!rounded-md"
allowClear
/>
</div>
<div className="flex items-center gap-2">
<span className="min-w-[90px] text-sm text-gray-500 dark:text-gray-400">Before</span>
<DatePicker
value={filters.beforeDate ? dayjs(filters.beforeDate) : null}
onChange={handleBeforeDateChange}
size="small"
variant="filled"
className="w-[200px] !rounded-md"
allowClear
/>
</div>
{isWalDatabase && (
<div className="flex items-center gap-2">
<span className="min-w-[90px] text-sm text-gray-500 dark:text-gray-400">Backup type</span>
<Select
value={filters.pgWalBackupType}
onChange={handlePgWalBackupTypeChange}
options={pgWalBackupTypeOptions}
placeholder="All types"
size="small"
variant="filled"
className="w-[200px] [&_.ant-select-selector]:!rounded-md"
allowClear
/>
</div>
)}
</div>
);
};

View File

@@ -1,5 +1,7 @@
import { useState } from 'react';
import { ClipboardHelper } from '../../../shared/lib/ClipboardHelper';
interface DbSizeCommand {
label: string;
code: string;
@@ -44,7 +46,7 @@ export function DbSizeCommands({ commands }: Props) {
<button
onClick={async () => {
try {
await navigator.clipboard.writeText(cmd.code);
await ClipboardHelper.copyToClipboard(cmd.code);
setCopiedIndex(index);
setTimeout(() => setCopiedIndex(null), 2000);
} catch {

View File

@@ -4,6 +4,7 @@ import { useState } from 'react';
import { getApplicationServer } from '../../../constants';
import { type Database, databaseApi } from '../../../entity/databases';
import { ClipboardHelper } from '../../../shared/lib/ClipboardHelper';
type Architecture = 'amd64' | 'arm64';
type PgDeploymentType = 'system' | 'folder' | 'docker';
@@ -42,7 +43,7 @@ export const AgentInstallationComponent = ({ database, onTokenGenerated }: Props
const copyToClipboard = async (text: string) => {
try {
await navigator.clipboard.writeText(text);
await ClipboardHelper.copyToClipboard(text);
message.success('Copied to clipboard');
} catch {
message.error('Failed to copy');
@@ -72,71 +73,281 @@ export const AgentInstallationComponent = ({ database, onTokenGenerated }: Props
);
const downloadCommand = `curl -L -o databasus-agent "${databasusHost}/api/v1/system/agent?arch=${selectedArch}" && chmod +x databasus-agent`;
const walQueuePath = pgDeploymentType === 'docker' ? '/wal-queue' : '/opt/databasus/wal-queue';
const postgresqlConfSettings = `wal_level = replica
archive_mode = on
archive_command = 'cp %p ${walQueuePath}/%f.tmp && mv ${walQueuePath}/%f.tmp ${walQueuePath}/%f'`;
const pgHbaEntry = `host replication all 127.0.0.1/32 md5`;
const grantReplicationSql = `ALTER ROLE <YOUR_PG_USER> WITH REPLICATION;`;
const createWalDirCommand = `mkdir -p /opt/databasus/wal-queue`;
// -- Step 2: Configure postgresql.conf --
const walDirPermissionsCommand = `chown postgres:postgres /opt/databasus/wal-queue
chmod 755 /opt/databasus/wal-queue`;
const renderStep2System = () => (
<div>
<div className="font-semibold dark:text-white">Step 2 Configure postgresql.conf</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
Add or update these settings in your <code>postgresql.conf</code>.
</p>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
Typical location Debian/Ubuntu:{' '}
<code>/etc/postgresql/&lt;version&gt;/main/postgresql.conf</code>, RHEL/CentOS:{' '}
<code>/var/lib/pgsql/&lt;version&gt;/data/postgresql.conf</code>
</p>
{renderCodeBlock(`wal_level = replica
archive_mode = on
archive_command = 'cp %p /opt/databasus/wal-queue/%f.tmp && mv /opt/databasus/wal-queue/%f.tmp /opt/databasus/wal-queue/%f'`)}
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
Restart PostgreSQL to apply the changes:
</p>
{renderCodeBlock('sudo systemctl restart postgresql')}
</div>
);
const dockerWalDirPermissionsCommand = `# Inside the container (or via docker exec):
chown postgres:postgres /wal-queue`;
const renderStep2Folder = () => (
<div>
<div className="font-semibold dark:text-white">Step 2 Configure postgresql.conf</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
Add or update these settings in the <code>postgresql.conf</code> inside your PostgreSQL data
directory.
</p>
{renderCodeBlock(`wal_level = replica
archive_mode = on
archive_command = 'cp %p /opt/databasus/wal-queue/%f.tmp && mv /opt/databasus/wal-queue/%f.tmp /opt/databasus/wal-queue/%f'`)}
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
Restart PostgreSQL to apply the changes:
</p>
{renderCodeBlock('pg_ctl -D <YOUR_PG_DATA_DIR> restart')}
</div>
);
const dockerVolumeExample = `# In your docker run command:
const renderStep2Docker = () => (
<div>
<div className="font-semibold dark:text-white">Step 2 Configure postgresql.conf</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
Add or update these settings in your <code>postgresql.conf</code> inside the container. The{' '}
<code>/wal-queue</code> path in <code>archive_command</code> is the path{' '}
<strong>inside the container</strong> it must match the volume mount target configured in
Step 5.
</p>
{renderCodeBlock(`wal_level = replica
archive_mode = on
archive_command = 'cp %p /wal-queue/%f.tmp && mv /wal-queue/%f.tmp /wal-queue/%f'`)}
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
Restart the container to apply the changes:
</p>
{renderCodeBlock('docker restart <CONTAINER_NAME>')}
</div>
);
// -- Step 3: Configure pg_hba.conf --
const renderStep3System = () => (
<div>
<div className="font-semibold dark:text-white">Step 3 Configure pg_hba.conf</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
Add this line to <code>pg_hba.conf</code> to allow <code>pg_basebackup</code> to take full
backups via a local replication connection. Adjust the address and auth method as needed.
</p>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
Typical location Debian/Ubuntu:{' '}
<code>/etc/postgresql/&lt;version&gt;/main/pg_hba.conf</code>, RHEL/CentOS:{' '}
<code>/var/lib/pgsql/&lt;version&gt;/data/pg_hba.conf</code>
</p>
{renderCodeBlock(pgHbaEntry)}
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
Restart PostgreSQL to apply the changes:
</p>
{renderCodeBlock('sudo systemctl restart postgresql')}
</div>
);
const renderStep3Folder = () => (
<div>
<div className="font-semibold dark:text-white">Step 3 Configure pg_hba.conf</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
Add this line to <code>pg_hba.conf</code> in your PostgreSQL data directory to allow{' '}
<code>pg_basebackup</code> to take full backups via a local replication connection. Adjust
the address and auth method as needed.
</p>
{renderCodeBlock(pgHbaEntry)}
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
Restart PostgreSQL to apply the changes:
</p>
{renderCodeBlock('pg_ctl -D <YOUR_PG_DATA_DIR> restart')}
</div>
);
const renderStep3Docker = () => (
<div>
<div className="font-semibold dark:text-white">Step 3 Configure pg_hba.conf</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
Add this line to <code>pg_hba.conf</code> inside the container to allow{' '}
<code>pg_basebackup</code> to take full backups via a replication connection on the
container&apos;s loopback interface. Adjust the address and auth method as needed.
</p>
{renderCodeBlock(pgHbaEntry)}
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
Restart the container to apply the changes:
</p>
{renderCodeBlock('docker restart <CONTAINER_NAME>')}
</div>
);
// -- Step 5: WAL queue directory --
const renderStep5System = () => (
<div>
<div className="font-semibold dark:text-white">Step 5 Create WAL queue directory</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
PostgreSQL will place WAL archive files here for the agent to upload.
</p>
{renderCodeBlock('mkdir -p /opt/databasus/wal-queue')}
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
Ensure the directory is writable by PostgreSQL and readable by the agent:
</p>
{renderCodeBlock(`chown postgres:postgres /opt/databasus/wal-queue
chmod 755 /opt/databasus/wal-queue`)}
</div>
);
const renderStep5Folder = () => (
<div>
<div className="font-semibold dark:text-white">Step 5 Create WAL queue directory</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
PostgreSQL will place WAL archive files here for the agent to upload.
</p>
{renderCodeBlock('mkdir -p /opt/databasus/wal-queue')}
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
Ensure the directory is writable by PostgreSQL and readable by the agent:
</p>
{renderCodeBlock(`chown postgres:postgres /opt/databasus/wal-queue
chmod 755 /opt/databasus/wal-queue`)}
</div>
);
const renderStep5Docker = () => (
<div>
<div className="font-semibold dark:text-white">Step 5 Set up WAL queue volume</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
The WAL queue directory must be a <strong>volume mount</strong> shared between the
PostgreSQL container and the host. The agent reads WAL files from the host path, while
PostgreSQL writes to the container path via <code>archive_command</code>.
</p>
{renderCodeBlock('mkdir -p /opt/databasus/wal-queue')}
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
Then mount it as a volume so both the container and the agent can access it:
</p>
{renderCodeBlock(`# In your docker run command:
docker run ... -v /opt/databasus/wal-queue:/wal-queue ...
# Or in docker-compose.yml:
volumes:
- /opt/databasus/wal-queue:/wal-queue`;
- /opt/databasus/wal-queue:/wal-queue`)}
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
Ensure the directory inside the container is owned by the <code>postgres</code> user:
</p>
{renderCodeBlock(`# Inside the container (or via docker exec):
chown postgres:postgres /wal-queue`)}
</div>
);
const buildStartCommand = () => {
const baseFlags = [
` --databasus-host=${databasusHost}`,
` --db-id=${database.id}`,
` --token=<YOUR_AGENT_TOKEN>`,
` --pg-host=localhost`,
` --pg-port=5432`,
` --pg-user=<YOUR_PG_USER>`,
` --pg-password=<YOUR_PG_PASSWORD>`,
];
// -- Step 6: Start the agent --
const baseFlagsWithContinuation = baseFlags.map((f) => f + ' \\');
const buildBaseFlags = () => [
` --databasus-host=${databasusHost} \\`,
` --db-id=${database.id} \\`,
` --token=<YOUR_AGENT_TOKEN> \\`,
` --pg-host=localhost \\`,
` --pg-port=5432 \\`,
` --pg-user=<YOUR_PG_USER> \\`,
` --pg-password=<YOUR_PG_PASSWORD> \\`,
];
if (pgDeploymentType === 'system') {
return [
'./databasus-agent start \\',
...baseFlagsWithContinuation,
` --pg-type=host \\`,
` --pg-wal-dir=/opt/databasus/wal-queue`,
].join('\n');
}
if (pgDeploymentType === 'folder') {
return [
'./databasus-agent start \\',
...baseFlagsWithContinuation,
` --pg-type=host \\`,
` --pg-host-bin-dir=<PATH_TO_PG_BIN_DIR> \\`,
` --pg-wal-dir=/opt/databasus/wal-queue`,
].join('\n');
}
return [
const renderStep6System = () => {
const startCommand = [
'./databasus-agent start \\',
...baseFlagsWithContinuation,
...buildBaseFlags(),
` --pg-type=host \\`,
` --pg-wal-dir=/opt/databasus/wal-queue`,
].join('\n');
return (
<div>
<div className="font-semibold dark:text-white">Step 6 Start the agent</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
Replace placeholders in <code>{'<ANGLE_BRACKETS>'}</code> with your actual values.
</p>
{renderCodeBlock(startCommand)}
</div>
);
};
const renderStep6Folder = () => {
const startCommand = [
'./databasus-agent start \\',
...buildBaseFlags(),
` --pg-type=host \\`,
` --pg-host-bin-dir=<PATH_TO_PG_BIN_DIR> \\`,
` --pg-wal-dir=/opt/databasus/wal-queue`,
].join('\n');
return (
<div>
<div className="font-semibold dark:text-white">Step 6 Start the agent</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
Replace placeholders in <code>{'<ANGLE_BRACKETS>'}</code> with your actual values.{' '}
<code>--pg-host-bin-dir</code> should point to the directory containing{' '}
<code>pg_basebackup</code> (e.g. <code>/usr/lib/postgresql/17/bin</code>).
</p>
{renderCodeBlock(startCommand)}
</div>
);
};
const renderStep6Docker = () => {
const startCommand = [
'./databasus-agent start \\',
...buildBaseFlags(),
` --pg-type=docker \\`,
` --pg-docker-container-name=<CONTAINER_NAME> \\`,
` --pg-wal-dir=/opt/databasus/wal-queue`,
].join('\n');
return (
<div>
<div className="font-semibold dark:text-white">Step 6 Start the agent</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
Replace placeholders in <code>{'<ANGLE_BRACKETS>'}</code> with your actual values.
</p>
<p className="mt-1 text-sm text-amber-600 dark:text-amber-400">
Use the PostgreSQL port <strong>inside the container</strong> (usually 5432), not the
host-mapped port.
</p>
{renderCodeBlock(startCommand)}
</div>
);
};
// -- Dispatch helpers --
const renderStep2 = () => {
if (pgDeploymentType === 'system') return renderStep2System();
if (pgDeploymentType === 'folder') return renderStep2Folder();
return renderStep2Docker();
};
const renderStep3 = () => {
if (pgDeploymentType === 'system') return renderStep3System();
if (pgDeploymentType === 'folder') return renderStep3Folder();
return renderStep3Docker();
};
const renderStep5 = () => {
if (pgDeploymentType === 'system') return renderStep5System();
if (pgDeploymentType === 'folder') return renderStep5Folder();
return renderStep5Docker();
};
const renderStep6 = () => {
if (pgDeploymentType === 'system') return renderStep6System();
if (pgDeploymentType === 'folder') return renderStep6Folder();
return renderStep6Docker();
};
return (
@@ -247,31 +458,8 @@ volumes:
{renderCodeBlock(downloadCommand)}
</div>
<div>
<div className="font-semibold dark:text-white">Step 2 Configure postgresql.conf</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
Add or update these settings in your <code>postgresql.conf</code>, then{' '}
<strong>restart PostgreSQL</strong>.
</p>
{pgDeploymentType === 'docker' && (
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
The <code>archive_command</code> path (<code>/wal-queue</code>) is the path{' '}
<strong>inside the container</strong>. It must match the volume mount target see
Step 5.
</p>
)}
{renderCodeBlock(postgresqlConfSettings)}
</div>
<div>
<div className="font-semibold dark:text-white">Step 3 Configure pg_hba.conf</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
Add this line to <code>pg_hba.conf</code>. This is required for{' '}
<code>pg_basebackup</code> to take full backups not for streaming replication. Adjust
the address and auth method as needed, then reload PostgreSQL.
</p>
{renderCodeBlock(pgHbaEntry)}
</div>
{renderStep2()}
{renderStep3()}
<div>
<div className="font-semibold dark:text-white">Step 4 Grant replication privilege</div>
@@ -282,58 +470,8 @@ volumes:
{renderCodeBlock(grantReplicationSql)}
</div>
<div>
<div className="font-semibold dark:text-white">
Step 5 {' '}
{pgDeploymentType === 'docker'
? 'Set up WAL queue volume'
: 'Create WAL queue directory'}
</div>
{pgDeploymentType === 'docker' ? (
<>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
The WAL queue directory must be a <strong>volume mount</strong> shared between the
PostgreSQL container and the host. The agent reads WAL files from the host path,
while PostgreSQL writes to the container path via <code>archive_command</code>.
</p>
{renderCodeBlock(createWalDirCommand)}
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
Then mount it as a volume so both the container and the agent can access it:
</p>
{renderCodeBlock(dockerVolumeExample)}
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
Ensure the directory inside the container is owned by the <code>postgres</code>{' '}
user:
</p>
{renderCodeBlock(dockerWalDirPermissionsCommand)}
</>
) : (
<>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
PostgreSQL will place WAL archive files here for the agent to upload.
</p>
{renderCodeBlock(createWalDirCommand)}
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
Ensure the directory is writable by PostgreSQL and readable by the agent:
</p>
{renderCodeBlock(walDirPermissionsCommand)}
</>
)}
</div>
<div>
<div className="font-semibold dark:text-white">Step 6 Start the agent</div>
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
Replace placeholders in <code>{'<ANGLE_BRACKETS>'}</code> with your actual values.
</p>
{pgDeploymentType === 'docker' && (
<p className="mt-1 text-sm text-amber-600 dark:text-amber-400">
Use the PostgreSQL port <strong>inside the container</strong> (usually 5432), not the
host-mapped port.
</p>
)}
{renderCodeBlock(buildStartCommand())}
</div>
{renderStep5()}
{renderStep6()}
<div>
<div className="font-semibold dark:text-white">After installation</div>

View File

@@ -5,7 +5,9 @@ import { useEffect, useState } from 'react';
import { IS_CLOUD } from '../../../../constants';
import { type Database, databaseApi } from '../../../../entity/databases';
import { MariadbConnectionStringParser } from '../../../../entity/databases/model/mariadb/MariadbConnectionStringParser';
import { ClipboardHelper } from '../../../../shared/lib/ClipboardHelper';
import { ToastHelper } from '../../../../shared/toast';
import { ClipboardPasteModalComponent } from '../../../../shared/ui';
interface Props {
database: Database;
@@ -49,41 +51,52 @@ export const EditMariaDbSpecificDataComponent = ({
const hasAdvancedValues = !!database.mariadb?.isExcludeEvents;
const [isShowAdvanced, setShowAdvanced] = useState(hasAdvancedValues);
const [isShowPasteModal, setIsShowPasteModal] = useState(false);
const applyConnectionString = (text: string) => {
const trimmedText = text.trim();
if (!trimmedText) {
message.error('Clipboard is empty');
return;
}
const result = MariadbConnectionStringParser.parse(trimmedText);
if ('error' in result) {
message.error(result.error);
return;
}
if (!editingDatabase?.mariadb) return;
const updatedDatabase: Database = {
...editingDatabase,
mariadb: {
...editingDatabase.mariadb,
host: result.host,
port: result.port,
username: result.username,
password: result.password,
database: result.database,
isHttps: result.isHttps,
},
};
setEditingDatabase(updatedDatabase);
setIsConnectionTested(false);
message.success('Connection string parsed successfully');
};
const parseFromClipboard = async () => {
if (!ClipboardHelper.isClipboardApiAvailable()) {
setIsShowPasteModal(true);
return;
}
try {
const text = await navigator.clipboard.readText();
const trimmedText = text.trim();
if (!trimmedText) {
message.error('Clipboard is empty');
return;
}
const result = MariadbConnectionStringParser.parse(trimmedText);
if ('error' in result) {
message.error(result.error);
return;
}
if (!editingDatabase?.mariadb) return;
const updatedDatabase: Database = {
...editingDatabase,
mariadb: {
...editingDatabase.mariadb,
host: result.host,
port: result.port,
username: result.username,
password: result.password,
database: result.database,
isHttps: result.isHttps,
},
};
setEditingDatabase(updatedDatabase);
setIsConnectionTested(false);
message.success('Connection string parsed successfully');
const text = await ClipboardHelper.readFromClipboard();
applyConnectionString(text);
} catch {
message.error('Failed to read clipboard. Please check browser permissions.');
}
@@ -408,6 +421,15 @@ export const EditMariaDbSpecificDataComponent = ({
list.
</div>
)}
<ClipboardPasteModalComponent
open={isShowPasteModal}
onSubmit={(text) => {
setIsShowPasteModal(false);
applyConnectionString(text);
}}
onCancel={() => setIsShowPasteModal(false)}
/>
</div>
);
};

View File

@@ -5,7 +5,9 @@ import { useEffect, useState } from 'react';
import { IS_CLOUD } from '../../../../constants';
import { type Database, databaseApi } from '../../../../entity/databases';
import { MongodbConnectionStringParser } from '../../../../entity/databases/model/mongodb/MongodbConnectionStringParser';
import { ClipboardHelper } from '../../../../shared/lib/ClipboardHelper';
import { ToastHelper } from '../../../../shared/toast';
import { ClipboardPasteModalComponent } from '../../../../shared/ui';
interface Props {
database: Database;
@@ -52,56 +54,65 @@ export const EditMongoDbSpecificDataComponent = ({
!!database.mongodb?.isDirectConnection;
const [isShowAdvanced, setShowAdvanced] = useState(hasAdvancedValues);
const [isShowPasteModal, setIsShowPasteModal] = useState(false);
const applyConnectionString = (text: string) => {
const trimmedText = text.trim();
if (!trimmedText) {
message.error('Clipboard is empty');
return;
}
const result = MongodbConnectionStringParser.parse(trimmedText);
if ('error' in result) {
message.error(result.error);
return;
}
if (!editingDatabase?.mongodb) return;
const updatedDatabase: Database = {
...editingDatabase,
mongodb: {
...editingDatabase.mongodb,
host: result.host,
port: result.port,
username: result.username,
password: result.password || '',
database: result.database,
authDatabase: result.authDatabase,
isHttps: result.useTls,
isSrv: result.isSrv,
isDirectConnection: result.isDirectConnection,
cpuCount: 1,
},
};
if (result.isSrv || result.isDirectConnection) {
setShowAdvanced(true);
}
setEditingDatabase(updatedDatabase);
setIsConnectionTested(false);
if (!result.password) {
message.warning('Connection string parsed successfully. Please enter the password manually.');
} else {
message.success('Connection string parsed successfully');
}
};
const parseFromClipboard = async () => {
if (!ClipboardHelper.isClipboardApiAvailable()) {
setIsShowPasteModal(true);
return;
}
try {
const text = await navigator.clipboard.readText();
const trimmedText = text.trim();
if (!trimmedText) {
message.error('Clipboard is empty');
return;
}
const result = MongodbConnectionStringParser.parse(trimmedText);
if ('error' in result) {
message.error(result.error);
return;
}
if (!editingDatabase?.mongodb) return;
const updatedDatabase: Database = {
...editingDatabase,
mongodb: {
...editingDatabase.mongodb,
host: result.host,
port: result.port,
username: result.username,
password: result.password || '',
database: result.database,
authDatabase: result.authDatabase,
isHttps: result.useTls,
isSrv: result.isSrv,
isDirectConnection: result.isDirectConnection,
cpuCount: 1,
},
};
if (result.isSrv || result.isDirectConnection) {
setShowAdvanced(true);
}
setEditingDatabase(updatedDatabase);
setIsConnectionTested(false);
if (!result.password) {
message.warning(
'Connection string parsed successfully. Please enter the password manually.',
);
} else {
message.success('Connection string parsed successfully');
}
const text = await ClipboardHelper.readFromClipboard();
applyConnectionString(text);
} catch {
message.error('Failed to read clipboard. Please check browser permissions.');
}
@@ -501,6 +512,15 @@ export const EditMongoDbSpecificDataComponent = ({
list.
</div>
)}
<ClipboardPasteModalComponent
open={isShowPasteModal}
onSubmit={(text) => {
setIsShowPasteModal(false);
applyConnectionString(text);
}}
onCancel={() => setIsShowPasteModal(false)}
/>
</div>
);
};

View File

@@ -5,7 +5,9 @@ import { useEffect, useState } from 'react';
import { IS_CLOUD } from '../../../../constants';
import { type Database, databaseApi } from '../../../../entity/databases';
import { MySqlConnectionStringParser } from '../../../../entity/databases/model/mysql/MySqlConnectionStringParser';
import { ClipboardHelper } from '../../../../shared/lib/ClipboardHelper';
import { ToastHelper } from '../../../../shared/toast';
import { ClipboardPasteModalComponent } from '../../../../shared/ui';
interface Props {
database: Database;
@@ -46,41 +48,52 @@ export const EditMySqlSpecificDataComponent = ({
const [isTestingConnection, setIsTestingConnection] = useState(false);
const [isConnectionFailed, setIsConnectionFailed] = useState(false);
const [isShowPasteModal, setIsShowPasteModal] = useState(false);
const applyConnectionString = (text: string) => {
const trimmedText = text.trim();
if (!trimmedText) {
message.error('Clipboard is empty');
return;
}
const result = MySqlConnectionStringParser.parse(trimmedText);
if ('error' in result) {
message.error(result.error);
return;
}
if (!editingDatabase?.mysql) return;
const updatedDatabase: Database = {
...editingDatabase,
mysql: {
...editingDatabase.mysql,
host: result.host,
port: result.port,
username: result.username,
password: result.password,
database: result.database,
isHttps: result.isHttps,
},
};
setEditingDatabase(updatedDatabase);
setIsConnectionTested(false);
message.success('Connection string parsed successfully');
};
const parseFromClipboard = async () => {
if (!ClipboardHelper.isClipboardApiAvailable()) {
setIsShowPasteModal(true);
return;
}
try {
const text = await navigator.clipboard.readText();
const trimmedText = text.trim();
if (!trimmedText) {
message.error('Clipboard is empty');
return;
}
const result = MySqlConnectionStringParser.parse(trimmedText);
if ('error' in result) {
message.error(result.error);
return;
}
if (!editingDatabase?.mysql) return;
const updatedDatabase: Database = {
...editingDatabase,
mysql: {
...editingDatabase.mysql,
host: result.host,
port: result.port,
username: result.username,
password: result.password,
database: result.database,
isHttps: result.isHttps,
},
};
setEditingDatabase(updatedDatabase);
setIsConnectionTested(false);
message.success('Connection string parsed successfully');
const text = await ClipboardHelper.readFromClipboard();
applyConnectionString(text);
} catch {
message.error('Failed to read clipboard. Please check browser permissions.');
}
@@ -359,6 +372,15 @@ export const EditMySqlSpecificDataComponent = ({
list.
</div>
)}
<ClipboardPasteModalComponent
open={isShowPasteModal}
onSubmit={(text) => {
setIsShowPasteModal(false);
applyConnectionString(text);
}}
onCancel={() => setIsShowPasteModal(false)}
/>
</div>
);
};

View File

@@ -5,7 +5,9 @@ import { useEffect, useState } from 'react';
import { IS_CLOUD } from '../../../../constants';
import { type Database, PostgresBackupType, databaseApi } from '../../../../entity/databases';
import { ConnectionStringParser } from '../../../../entity/databases/model/postgresql/ConnectionStringParser';
import { ClipboardHelper } from '../../../../shared/lib/ClipboardHelper';
import { ToastHelper } from '../../../../shared/toast';
import { ClipboardPasteModalComponent } from '../../../../shared/ui';
interface Props {
database: Database;
@@ -54,42 +56,53 @@ export const EditPostgreSqlSpecificDataComponent = ({
const [hasAutoAddedPublicSchema, setHasAutoAddedPublicSchema] = useState(false);
const [isShowPasteModal, setIsShowPasteModal] = useState(false);
const applyConnectionString = (text: string) => {
const trimmedText = text.trim();
if (!trimmedText) {
message.error('Clipboard is empty');
return;
}
const result = ConnectionStringParser.parse(trimmedText);
if ('error' in result) {
message.error(result.error);
return;
}
if (!editingDatabase?.postgresql) return;
const updatedDatabase: Database = {
...editingDatabase,
postgresql: {
...editingDatabase.postgresql,
host: result.host,
port: result.port,
username: result.username,
password: result.password,
database: result.database,
isHttps: result.isHttps,
cpuCount: 1,
},
};
setEditingDatabase(autoAddPublicSchemaForSupabase(updatedDatabase));
setIsConnectionTested(false);
message.success('Connection string parsed successfully');
};
const parseFromClipboard = async () => {
if (!ClipboardHelper.isClipboardApiAvailable()) {
setIsShowPasteModal(true);
return;
}
try {
const text = await navigator.clipboard.readText();
const trimmedText = text.trim();
if (!trimmedText) {
message.error('Clipboard is empty');
return;
}
const result = ConnectionStringParser.parse(trimmedText);
if ('error' in result) {
message.error(result.error);
return;
}
if (!editingDatabase?.postgresql) return;
const updatedDatabase: Database = {
...editingDatabase,
postgresql: {
...editingDatabase.postgresql,
host: result.host,
port: result.port,
username: result.username,
password: result.password,
database: result.database,
isHttps: result.isHttps,
cpuCount: 1,
},
};
setEditingDatabase(autoAddPublicSchemaForSupabase(updatedDatabase));
setIsConnectionTested(false);
message.success('Connection string parsed successfully');
const text = await ClipboardHelper.readFromClipboard();
applyConnectionString(text);
} catch {
message.error('Failed to read clipboard. Please check browser permissions.');
}
@@ -603,6 +616,15 @@ export const EditPostgreSqlSpecificDataComponent = ({
<div>
{renderBackupTypeSelector()}
{renderFormContent()}
<ClipboardPasteModalComponent
open={isShowPasteModal}
onSubmit={(text) => {
setIsShowPasteModal(false);
applyConnectionString(text);
}}
onCancel={() => setIsShowPasteModal(false)}
/>
</div>
);
};

View File

@@ -12,6 +12,7 @@ import { useEffect, useRef, useState } from 'react';
import type { Backup } from '../../../entity/backups';
import { type Database, DatabaseType } from '../../../entity/databases';
import { type Restore, RestoreStatus, restoreApi } from '../../../entity/restores';
import { ClipboardHelper } from '../../../shared/lib/ClipboardHelper';
import { getUserTimeFormat } from '../../../shared/time';
import { ConfirmationComponent } from '../../../shared/ui';
import { EditDatabaseSpecificDataComponent } from '../../databases/ui/edit/EditDatabaseSpecificDataComponent';
@@ -328,7 +329,7 @@ export const RestoresComponent = ({ database, backup }: Props) => {
<Button
icon={<CopyOutlined />}
onClick={() => {
navigator.clipboard.writeText(showingRestoreError.failMessage || '');
ClipboardHelper.copyToClipboard(showingRestoreError.failMessage || '');
message.success('Error message copied to clipboard');
}}
>

View File

@@ -5,6 +5,7 @@ import { useEffect, useRef, useState } from 'react';
import { IS_CLOUD, getApplicationServer } from '../../../constants';
import { settingsApi } from '../../../entity/users/api/settingsApi';
import type { UsersSettings } from '../../../entity/users/model/UsersSettings';
import { ClipboardHelper } from '../../../shared/lib/ClipboardHelper';
import { AuditLogsComponent } from './AuditLogsComponent';
interface Props {
@@ -247,7 +248,9 @@ export function SettingsComponent({ contentHeight }: Props) {
size="small"
className="ml-2 opacity-0 transition-opacity group-hover:opacity-100"
onClick={() => {
navigator.clipboard.writeText(`${getApplicationServer()}/api/v1/system/health`);
ClipboardHelper.copyToClipboard(
`${getApplicationServer()}/api/v1/system/health`,
);
message.success('Health-check endpoint copied to clipboard');
}}
>

View File

@@ -1,8 +1,8 @@
import { DownOutlined, InfoCircleOutlined, UpOutlined } from '@ant-design/icons';
import { Checkbox, Input, Tooltip } from 'antd';
import { Checkbox, Input, Select, Tooltip } from 'antd';
import { useEffect, useState } from 'react';
import type { Storage } from '../../../../../entity/storages';
import { S3StorageClass, S3StorageClassLabels, type Storage } from '../../../../../entity/storages';
interface Props {
storage: Storage;
@@ -20,7 +20,8 @@ export function EditS3StorageComponent({
const hasAdvancedValues =
!!storage?.s3Storage?.s3Prefix ||
!!storage?.s3Storage?.s3UseVirtualHostedStyle ||
!!storage?.s3Storage?.skipTLSVerify;
!!storage?.s3Storage?.skipTLSVerify ||
!!storage?.s3Storage?.s3StorageClass;
const [showAdvanced, setShowAdvanced] = useState(hasAdvancedValues);
useEffect(() => {
@@ -278,6 +279,40 @@ export function EditS3StorageComponent({
</Tooltip>
</div>
</div>
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[110px] sm:mb-0">Storage class</div>
<div className="flex items-center">
<Select
value={storage?.s3Storage?.s3StorageClass || S3StorageClass.DEFAULT}
options={Object.entries(S3StorageClassLabels).map(([value, label]) => ({
value,
label,
}))}
onChange={(value) => {
if (!storage?.s3Storage) return;
setStorage({
...storage,
s3Storage: {
...storage.s3Storage,
s3StorageClass: value,
},
});
setUnsaved();
}}
size="small"
className="w-[250px] max-w-[250px]"
/>
<Tooltip
className="cursor-pointer"
title="S3 storage class for uploaded objects. Leave as default for Standard. Some providers offer cheaper classes like One Zone IA. Do not use Glacier/Deep Archive — files must be immediately accessible for restores."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
</>
)}

View File

@@ -1,4 +1,4 @@
import type { Storage } from '../../../../../entity/storages';
import { S3StorageClass, S3StorageClassLabels, type Storage } from '../../../../../entity/storages';
interface Props {
storage: Storage;
@@ -52,6 +52,14 @@ export function ShowS3StorageComponent({ storage }: Props) {
Enabled
</div>
)}
{storage?.s3Storage?.s3StorageClass && (
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Storage Class</div>
{S3StorageClassLabels[storage.s3Storage.s3StorageClass as S3StorageClass] ||
storage.s3Storage.s3StorageClass}
</div>
)}
</>
);
}

View File

@@ -34,7 +34,7 @@ export function AuthNavbarComponent() {
{!IS_CLOUD && (
<a
className="!text-black hover:opacity-80 dark:!text-gray-200"
className="!text-black !underline !decoration-blue-600 !decoration-2 underline-offset-2 hover:opacity-80 dark:!text-gray-200"
href="https://databasus.com/cloud"
target="_blank"
rel="noreferrer"

View File

@@ -0,0 +1,26 @@
export class ClipboardHelper {
static isClipboardApiAvailable(): boolean {
return !!(navigator.clipboard && window.isSecureContext);
}
static async copyToClipboard(text: string): Promise<void> {
if (this.isClipboardApiAvailable()) {
await navigator.clipboard.writeText(text);
return;
}
const textarea = document.createElement('textarea');
textarea.value = text;
textarea.style.position = 'fixed';
textarea.style.opacity = '0';
document.body.appendChild(textarea);
textarea.select();
document.execCommand('copy');
document.body.removeChild(textarea);
}
static async readFromClipboard(): Promise<string> {
const text = await navigator.clipboard.readText();
return text;
}
}

View File

@@ -0,0 +1,52 @@
import { Button, Input, Modal } from 'antd';
import { useState } from 'react';
interface Props {
open: boolean;
onSubmit(text: string): void;
onCancel(): void;
}
export function ClipboardPasteModalComponent({ open, onSubmit, onCancel }: Props) {
const [value, setValue] = useState('');
const handleSubmit = () => {
const trimmed = value.trim();
if (!trimmed) return;
onSubmit(trimmed);
setValue('');
};
const handleCancel = () => {
setValue('');
onCancel();
};
return (
<Modal
title="Paste from clipboard"
open={open}
onCancel={handleCancel}
footer={
<div className="flex justify-end gap-2">
<Button onClick={handleCancel}>Cancel</Button>
<Button type="primary" disabled={!value.trim()} onClick={handleSubmit}>
Submit
</Button>
</div>
}
>
<p className="mb-2 text-sm text-gray-500 dark:text-gray-400">
Automatic clipboard access is not available. Please paste your content below.
</p>
<Input.TextArea
value={value}
onChange={(e) => setValue(e.target.value)}
placeholder="Paste your connection string here..."
rows={4}
autoFocus
/>
</Modal>
);
}

View File

@@ -1,3 +1,4 @@
export { ClipboardPasteModalComponent } from './ClipboardPasteModalComponent';
export { CloudflareTurnstileWidget } from './CloudflareTurnstileWidget';
export { ConfirmationComponent } from './ConfirmationComponent';
export { StarButtonComponent } from './StarButtonComponent';

View File

@@ -232,7 +232,7 @@ export const MainScreenComponent = () => {
{!IS_CLOUD && (
<a
className="!text-black hover:opacity-80 dark:!text-gray-200"
className="!text-black !underline !decoration-blue-600 !decoration-2 underline-offset-2 hover:opacity-80 dark:!text-gray-200"
href="https://databasus.com/cloud"
target="_blank"
rel="noreferrer"