mirror of
https://github.com/databasus/databasus.git
synced 2026-04-06 00:32:03 +02:00
Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
81f77760c9 | ||
|
|
63e23b2489 | ||
|
|
8c1b8ac00f | ||
|
|
1926096377 | ||
|
|
0a131511a8 | ||
|
|
aa01ce0b76 |
@@ -40,12 +40,15 @@ func (c *BackupController) RegisterPublicRoutes(router *gin.RouterGroup) {
|
||||
|
||||
// GetBackups
|
||||
// @Summary Get backups for a database
|
||||
// @Description Get paginated backups for the specified database
|
||||
// @Description Get paginated backups for the specified database with optional filters
|
||||
// @Tags backups
|
||||
// @Produce json
|
||||
// @Param database_id query string true "Database ID"
|
||||
// @Param limit query int false "Number of items per page" default(10)
|
||||
// @Param offset query int false "Offset for pagination" default(0)
|
||||
// @Param status query []string false "Filter by backup status (can be repeated)" Enums(IN_PROGRESS, COMPLETED, FAILED, CANCELED)
|
||||
// @Param beforeDate query string false "Filter backups created before this date (RFC3339)" format(date-time)
|
||||
// @Param pgWalBackupType query string false "Filter by WAL backup type" Enums(PG_FULL_BACKUP, PG_WAL_SEGMENT)
|
||||
// @Success 200 {object} backups_dto.GetBackupsResponse
|
||||
// @Failure 400
|
||||
// @Failure 401
|
||||
@@ -70,7 +73,9 @@ func (c *BackupController) GetBackups(ctx *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
response, err := c.backupService.GetBackups(user, databaseID, request.Limit, request.Offset)
|
||||
filters := c.buildBackupFilters(&request)
|
||||
|
||||
response, err := c.backupService.GetBackups(user, databaseID, request.Limit, request.Offset, filters)
|
||||
if err != nil {
|
||||
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
@@ -359,3 +364,35 @@ func (c *BackupController) startDownloadHeartbeat(ctx context.Context, userID uu
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BackupController) buildBackupFilters(
|
||||
request *backups_dto.GetBackupsRequest,
|
||||
) *backups_core.BackupFilters {
|
||||
isHasFilters := len(request.Statuses) > 0 ||
|
||||
request.BeforeDate != nil ||
|
||||
request.PgWalBackupType != nil
|
||||
|
||||
if !isHasFilters {
|
||||
return nil
|
||||
}
|
||||
|
||||
filters := &backups_core.BackupFilters{}
|
||||
|
||||
if len(request.Statuses) > 0 {
|
||||
statuses := make([]backups_core.BackupStatus, 0, len(request.Statuses))
|
||||
for _, statusStr := range request.Statuses {
|
||||
statuses = append(statuses, backups_core.BackupStatus(statusStr))
|
||||
}
|
||||
|
||||
filters.Statuses = statuses
|
||||
}
|
||||
|
||||
filters.BeforeDate = request.BeforeDate
|
||||
|
||||
if request.PgWalBackupType != nil {
|
||||
walType := backups_core.PgWalBackupType(*request.PgWalBackupType)
|
||||
filters.PgWalBackupType = &walType
|
||||
}
|
||||
|
||||
return filters
|
||||
}
|
||||
|
||||
@@ -140,6 +140,225 @@ func Test_GetBackups_PermissionsEnforced(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_GetBackups_WithStatusFilter_ReturnsFilteredBackups(t *testing.T) {
|
||||
router := createTestRouter()
|
||||
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
|
||||
|
||||
database := createTestDatabase("Test Database", workspace.ID, owner.Token, router)
|
||||
storage := createTestStorage(workspace.ID)
|
||||
|
||||
defer func() {
|
||||
databases.RemoveTestDatabase(database)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
storages.RemoveTestStorage(storage.ID)
|
||||
workspaces_testing.RemoveTestWorkspace(workspace, router)
|
||||
}()
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
CreatedAt: now.Add(-3 * time.Hour),
|
||||
})
|
||||
CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
|
||||
Status: backups_core.BackupStatusFailed,
|
||||
CreatedAt: now.Add(-2 * time.Hour),
|
||||
})
|
||||
CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
|
||||
Status: backups_core.BackupStatusCanceled,
|
||||
CreatedAt: now.Add(-1 * time.Hour),
|
||||
})
|
||||
|
||||
// Single status filter
|
||||
var singleResponse backups_dto.GetBackupsResponse
|
||||
test_utils.MakeGetRequestAndUnmarshal(
|
||||
t,
|
||||
router,
|
||||
fmt.Sprintf("/api/v1/backups?database_id=%s&status=COMPLETED", database.ID.String()),
|
||||
"Bearer "+owner.Token,
|
||||
http.StatusOK,
|
||||
&singleResponse,
|
||||
)
|
||||
|
||||
assert.Equal(t, int64(1), singleResponse.Total)
|
||||
assert.Len(t, singleResponse.Backups, 1)
|
||||
assert.Equal(t, backups_core.BackupStatusCompleted, singleResponse.Backups[0].Status)
|
||||
|
||||
// Multiple status filter
|
||||
var multiResponse backups_dto.GetBackupsResponse
|
||||
test_utils.MakeGetRequestAndUnmarshal(
|
||||
t,
|
||||
router,
|
||||
fmt.Sprintf(
|
||||
"/api/v1/backups?database_id=%s&status=COMPLETED&status=FAILED",
|
||||
database.ID.String(),
|
||||
),
|
||||
"Bearer "+owner.Token,
|
||||
http.StatusOK,
|
||||
&multiResponse,
|
||||
)
|
||||
|
||||
assert.Equal(t, int64(2), multiResponse.Total)
|
||||
assert.Len(t, multiResponse.Backups, 2)
|
||||
|
||||
for _, backup := range multiResponse.Backups {
|
||||
assert.True(
|
||||
t,
|
||||
backup.Status == backups_core.BackupStatusCompleted ||
|
||||
backup.Status == backups_core.BackupStatusFailed,
|
||||
"expected COMPLETED or FAILED, got %s", backup.Status,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_GetBackups_WithBeforeDateFilter_ReturnsFilteredBackups(t *testing.T) {
|
||||
router := createTestRouter()
|
||||
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
|
||||
|
||||
database := createTestDatabase("Test Database", workspace.ID, owner.Token, router)
|
||||
storage := createTestStorage(workspace.ID)
|
||||
|
||||
defer func() {
|
||||
databases.RemoveTestDatabase(database)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
storages.RemoveTestStorage(storage.ID)
|
||||
workspaces_testing.RemoveTestWorkspace(workspace, router)
|
||||
}()
|
||||
|
||||
now := time.Now().UTC()
|
||||
cutoff := now.Add(-1 * time.Hour)
|
||||
|
||||
olderBackup := CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
CreatedAt: now.Add(-3 * time.Hour),
|
||||
})
|
||||
CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
CreatedAt: now,
|
||||
})
|
||||
|
||||
var response backups_dto.GetBackupsResponse
|
||||
test_utils.MakeGetRequestAndUnmarshal(
|
||||
t,
|
||||
router,
|
||||
fmt.Sprintf(
|
||||
"/api/v1/backups?database_id=%s&beforeDate=%s",
|
||||
database.ID.String(),
|
||||
cutoff.Format(time.RFC3339),
|
||||
),
|
||||
"Bearer "+owner.Token,
|
||||
http.StatusOK,
|
||||
&response,
|
||||
)
|
||||
|
||||
assert.Equal(t, int64(1), response.Total)
|
||||
assert.Len(t, response.Backups, 1)
|
||||
assert.Equal(t, olderBackup.ID, response.Backups[0].ID)
|
||||
}
|
||||
|
||||
func Test_GetBackups_WithPgWalBackupTypeFilter_ReturnsFilteredBackups(t *testing.T) {
|
||||
router := createTestRouter()
|
||||
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
|
||||
|
||||
database := createTestDatabase("Test Database", workspace.ID, owner.Token, router)
|
||||
storage := createTestStorage(workspace.ID)
|
||||
|
||||
defer func() {
|
||||
databases.RemoveTestDatabase(database)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
storages.RemoveTestStorage(storage.ID)
|
||||
workspaces_testing.RemoveTestWorkspace(workspace, router)
|
||||
}()
|
||||
|
||||
now := time.Now().UTC()
|
||||
fullBackupType := backups_core.PgWalBackupTypeFullBackup
|
||||
walSegmentType := backups_core.PgWalBackupTypeWalSegment
|
||||
|
||||
fullBackup := CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
CreatedAt: now.Add(-2 * time.Hour),
|
||||
PgWalBackupType: &fullBackupType,
|
||||
})
|
||||
CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
CreatedAt: now.Add(-1 * time.Hour),
|
||||
PgWalBackupType: &walSegmentType,
|
||||
})
|
||||
|
||||
var response backups_dto.GetBackupsResponse
|
||||
test_utils.MakeGetRequestAndUnmarshal(
|
||||
t,
|
||||
router,
|
||||
fmt.Sprintf(
|
||||
"/api/v1/backups?database_id=%s&pgWalBackupType=PG_FULL_BACKUP",
|
||||
database.ID.String(),
|
||||
),
|
||||
"Bearer "+owner.Token,
|
||||
http.StatusOK,
|
||||
&response,
|
||||
)
|
||||
|
||||
assert.Equal(t, int64(1), response.Total)
|
||||
assert.Len(t, response.Backups, 1)
|
||||
assert.Equal(t, fullBackup.ID, response.Backups[0].ID)
|
||||
}
|
||||
|
||||
func Test_GetBackups_WithCombinedFilters_ReturnsFilteredBackups(t *testing.T) {
|
||||
router := createTestRouter()
|
||||
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
|
||||
|
||||
database := createTestDatabase("Test Database", workspace.ID, owner.Token, router)
|
||||
storage := createTestStorage(workspace.ID)
|
||||
|
||||
defer func() {
|
||||
databases.RemoveTestDatabase(database)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
storages.RemoveTestStorage(storage.ID)
|
||||
workspaces_testing.RemoveTestWorkspace(workspace, router)
|
||||
}()
|
||||
|
||||
now := time.Now().UTC()
|
||||
cutoff := now.Add(-1 * time.Hour)
|
||||
|
||||
// Old completed — should match
|
||||
oldCompleted := CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
CreatedAt: now.Add(-3 * time.Hour),
|
||||
})
|
||||
// Old failed — should NOT match (wrong status)
|
||||
CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
|
||||
Status: backups_core.BackupStatusFailed,
|
||||
CreatedAt: now.Add(-2 * time.Hour),
|
||||
})
|
||||
// New completed — should NOT match (too recent)
|
||||
CreateTestBackupWithOptions(database.ID, storage.ID, TestBackupOptions{
|
||||
Status: backups_core.BackupStatusCompleted,
|
||||
CreatedAt: now,
|
||||
})
|
||||
|
||||
var response backups_dto.GetBackupsResponse
|
||||
test_utils.MakeGetRequestAndUnmarshal(
|
||||
t,
|
||||
router,
|
||||
fmt.Sprintf(
|
||||
"/api/v1/backups?database_id=%s&status=COMPLETED&beforeDate=%s",
|
||||
database.ID.String(),
|
||||
cutoff.Format(time.RFC3339),
|
||||
),
|
||||
"Bearer "+owner.Token,
|
||||
http.StatusOK,
|
||||
&response,
|
||||
)
|
||||
|
||||
assert.Equal(t, int64(1), response.Total)
|
||||
assert.Len(t, response.Backups, 1)
|
||||
assert.Equal(t, oldCompleted.ID, response.Backups[0].ID)
|
||||
}
|
||||
|
||||
func Test_CreateBackup_PermissionsEnforced(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -376,7 +595,7 @@ func Test_DeleteBackup_PermissionsEnforced(t *testing.T) {
|
||||
ownerUser, err := userService.GetUserFromToken(owner.Token)
|
||||
assert.NoError(t, err)
|
||||
|
||||
response, err := backups_services.GetBackupService().GetBackups(ownerUser, database.ID, 10, 0)
|
||||
response, err := backups_services.GetBackupService().GetBackups(ownerUser, database.ID, 10, 0, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(response.Backups))
|
||||
}
|
||||
|
||||
@@ -95,3 +95,33 @@ func CreateTestBackup(databaseID, storageID uuid.UUID) *backups_core.Backup {
|
||||
|
||||
return backup
|
||||
}
|
||||
|
||||
type TestBackupOptions struct {
|
||||
Status backups_core.BackupStatus
|
||||
CreatedAt time.Time
|
||||
PgWalBackupType *backups_core.PgWalBackupType
|
||||
}
|
||||
|
||||
// CreateTestBackupWithOptions creates a test backup with custom status, time, and WAL type
|
||||
func CreateTestBackupWithOptions(
|
||||
databaseID, storageID uuid.UUID,
|
||||
opts TestBackupOptions,
|
||||
) *backups_core.Backup {
|
||||
backup := &backups_core.Backup{
|
||||
ID: uuid.New(),
|
||||
DatabaseID: databaseID,
|
||||
StorageID: storageID,
|
||||
Status: opts.Status,
|
||||
BackupSizeMb: 10.5,
|
||||
BackupDurationMs: 1000,
|
||||
PgWalBackupType: opts.PgWalBackupType,
|
||||
CreatedAt: opts.CreatedAt,
|
||||
}
|
||||
|
||||
repo := &backups_core.BackupRepository{}
|
||||
if err := repo.Save(backup); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return backup
|
||||
}
|
||||
|
||||
9
backend/internal/features/backups/backups/core/dto.go
Normal file
9
backend/internal/features/backups/backups/core/dto.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package backups_core
|
||||
|
||||
import "time"
|
||||
|
||||
type BackupFilters struct {
|
||||
Statuses []BackupStatus
|
||||
BeforeDate *time.Time
|
||||
PgWalBackupType *PgWalBackupType
|
||||
}
|
||||
@@ -422,3 +422,67 @@ func (r *BackupRepository) FindLastWalSegmentAfter(
|
||||
|
||||
return &backup, nil
|
||||
}
|
||||
|
||||
func (r *BackupRepository) FindByDatabaseIDWithFiltersAndPagination(
|
||||
databaseID uuid.UUID,
|
||||
filters *BackupFilters,
|
||||
limit, offset int,
|
||||
) ([]*Backup, error) {
|
||||
var backups []*Backup
|
||||
|
||||
query := storage.
|
||||
GetDb().
|
||||
Where("database_id = ?", databaseID)
|
||||
|
||||
if filters != nil {
|
||||
query = filters.applyToQuery(query)
|
||||
}
|
||||
|
||||
if err := query.
|
||||
Order("created_at DESC").
|
||||
Limit(limit).
|
||||
Offset(offset).
|
||||
Find(&backups).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return backups, nil
|
||||
}
|
||||
|
||||
func (r *BackupRepository) CountByDatabaseIDWithFilters(
|
||||
databaseID uuid.UUID,
|
||||
filters *BackupFilters,
|
||||
) (int64, error) {
|
||||
var count int64
|
||||
|
||||
query := storage.
|
||||
GetDb().
|
||||
Model(&Backup{}).
|
||||
Where("database_id = ?", databaseID)
|
||||
|
||||
if filters != nil {
|
||||
query = filters.applyToQuery(query)
|
||||
}
|
||||
|
||||
if err := query.Count(&count).Error; err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (f *BackupFilters) applyToQuery(query *gorm.DB) *gorm.DB {
|
||||
if len(f.Statuses) > 0 {
|
||||
query = query.Where("status IN ?", f.Statuses)
|
||||
}
|
||||
|
||||
if f.BeforeDate != nil {
|
||||
query = query.Where("created_at < ?", *f.BeforeDate)
|
||||
}
|
||||
|
||||
if f.PgWalBackupType != nil {
|
||||
query = query.Where("pg_wal_backup_type = ?", *f.PgWalBackupType)
|
||||
}
|
||||
|
||||
return query
|
||||
}
|
||||
|
||||
@@ -11,9 +11,12 @@ import (
|
||||
)
|
||||
|
||||
type GetBackupsRequest struct {
|
||||
DatabaseID string `form:"database_id" binding:"required"`
|
||||
Limit int `form:"limit"`
|
||||
Offset int `form:"offset"`
|
||||
DatabaseID string `form:"database_id" binding:"required"`
|
||||
Limit int `form:"limit"`
|
||||
Offset int `form:"offset"`
|
||||
Statuses []string `form:"status"`
|
||||
BeforeDate *time.Time `form:"beforeDate"`
|
||||
PgWalBackupType *string `form:"pgWalBackupType"`
|
||||
}
|
||||
|
||||
type GetBackupsResponse struct {
|
||||
|
||||
@@ -109,6 +109,7 @@ func (s *BackupService) GetBackups(
|
||||
user *users_models.User,
|
||||
databaseID uuid.UUID,
|
||||
limit, offset int,
|
||||
filters *backups_core.BackupFilters,
|
||||
) (*backups_dto.GetBackupsResponse, error) {
|
||||
database, err := s.databaseService.GetDatabaseByID(databaseID)
|
||||
if err != nil {
|
||||
@@ -134,12 +135,14 @@ func (s *BackupService) GetBackups(
|
||||
offset = 0
|
||||
}
|
||||
|
||||
backups, err := s.backupRepository.FindByDatabaseIDWithPagination(databaseID, limit, offset)
|
||||
backups, err := s.backupRepository.FindByDatabaseIDWithFiltersAndPagination(
|
||||
databaseID, filters, limit, offset,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
total, err := s.backupRepository.CountByDatabaseID(databaseID)
|
||||
total, err := s.backupRepository.CountByDatabaseIDWithFilters(databaseID, filters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -62,6 +62,14 @@ func (r *BackupConfigRepository) FindByDatabaseID(databaseID uuid.UUID) (*Backup
|
||||
GetDb().
|
||||
Preload("BackupInterval").
|
||||
Preload("Storage").
|
||||
Preload("Storage.LocalStorage").
|
||||
Preload("Storage.S3Storage").
|
||||
Preload("Storage.GoogleDriveStorage").
|
||||
Preload("Storage.NASStorage").
|
||||
Preload("Storage.AzureBlobStorage").
|
||||
Preload("Storage.FTPStorage").
|
||||
Preload("Storage.SFTPStorage").
|
||||
Preload("Storage.RcloneStorage").
|
||||
Where("database_id = ?", databaseID).
|
||||
First(&backupConfig).Error; err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
@@ -81,6 +89,14 @@ func (r *BackupConfigRepository) GetWithEnabledBackups() ([]*BackupConfig, error
|
||||
GetDb().
|
||||
Preload("BackupInterval").
|
||||
Preload("Storage").
|
||||
Preload("Storage.LocalStorage").
|
||||
Preload("Storage.S3Storage").
|
||||
Preload("Storage.GoogleDriveStorage").
|
||||
Preload("Storage.NASStorage").
|
||||
Preload("Storage.AzureBlobStorage").
|
||||
Preload("Storage.FTPStorage").
|
||||
Preload("Storage.SFTPStorage").
|
||||
Preload("Storage.RcloneStorage").
|
||||
Where("is_backups_enabled = ?", true).
|
||||
Find(&backupConfigs).Error; err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -23,7 +23,7 @@ func (c *VersionController) RegisterRoutes(router *gin.RouterGroup) {
|
||||
func (c *VersionController) GetVersion(ctx *gin.Context) {
|
||||
version := os.Getenv("APP_VERSION")
|
||||
if version == "" {
|
||||
version = "dev"
|
||||
version = "3.26.0"
|
||||
}
|
||||
|
||||
ctx.JSON(http.StatusOK, VersionResponse{Version: version})
|
||||
|
||||
@@ -3,12 +3,30 @@ import RequestOptions from '../../../shared/api/RequestOptions';
|
||||
import { apiHelper } from '../../../shared/api/apiHelper';
|
||||
import type { GetBackupsResponse } from '../model/GetBackupsResponse';
|
||||
|
||||
export interface BackupsFilters {
|
||||
statuses?: string[];
|
||||
beforeDate?: string;
|
||||
pgWalBackupType?: string;
|
||||
}
|
||||
|
||||
export const backupsApi = {
|
||||
async getBackups(databaseId: string, limit?: number, offset?: number) {
|
||||
async getBackups(databaseId: string, limit?: number, offset?: number, filters?: BackupsFilters) {
|
||||
const params = new URLSearchParams({ database_id: databaseId });
|
||||
if (limit !== undefined) params.append('limit', limit.toString());
|
||||
if (offset !== undefined) params.append('offset', offset.toString());
|
||||
|
||||
if (filters?.statuses) {
|
||||
for (const status of filters.statuses) {
|
||||
params.append('status', status);
|
||||
}
|
||||
}
|
||||
if (filters?.beforeDate) {
|
||||
params.append('beforeDate', filters.beforeDate);
|
||||
}
|
||||
if (filters?.pgWalBackupType) {
|
||||
params.append('pgWalBackupType', filters.pgWalBackupType);
|
||||
}
|
||||
|
||||
return apiHelper.fetchGetJson<GetBackupsResponse>(
|
||||
`${getApplicationServer()}/api/v1/backups?${params.toString()}`,
|
||||
undefined,
|
||||
|
||||
@@ -5,6 +5,8 @@ import {
|
||||
DeleteOutlined,
|
||||
DownloadOutlined,
|
||||
ExclamationCircleOutlined,
|
||||
FilterFilled,
|
||||
FilterOutlined,
|
||||
InfoCircleOutlined,
|
||||
LockOutlined,
|
||||
SyncOutlined,
|
||||
@@ -24,12 +26,14 @@ import {
|
||||
backupConfigApi,
|
||||
backupsApi,
|
||||
} from '../../../entity/backups';
|
||||
import type { BackupsFilters } from '../../../entity/backups/api/backupsApi';
|
||||
import { type Database, DatabaseType, PostgresBackupType } from '../../../entity/databases';
|
||||
import { getUserTimeFormat } from '../../../shared/time';
|
||||
import { ConfirmationComponent } from '../../../shared/ui';
|
||||
import { RestoresComponent } from '../../restores';
|
||||
import { AgentRestoreComponent } from './AgentRestoreComponent';
|
||||
import { BackupsBillingBannerComponent } from './BackupsBillingBannerComponent';
|
||||
import { BackupsFiltersPanelComponent } from './BackupsFiltersPanelComponent';
|
||||
|
||||
const BACKUPS_PAGE_SIZE = 50;
|
||||
|
||||
@@ -74,6 +78,9 @@ export const BackupsComponent = ({
|
||||
const [downloadingBackupId, setDownloadingBackupId] = useState<string | undefined>();
|
||||
const [cancellingBackupId, setCancellingBackupId] = useState<string | undefined>();
|
||||
|
||||
const [isFilterPanelVisible, setIsFilterPanelVisible] = useState(false);
|
||||
const [filters, setFilters] = useState<BackupsFilters>({});
|
||||
|
||||
const downloadBackup = async (backupId: string) => {
|
||||
try {
|
||||
await backupsApi.downloadBackup(backupId);
|
||||
@@ -84,7 +91,7 @@ export const BackupsComponent = ({
|
||||
}
|
||||
};
|
||||
|
||||
const loadBackups = async (limit?: number) => {
|
||||
const loadBackups = async (limit?: number, filtersOverride?: BackupsFilters) => {
|
||||
if (isBackupsRequestInFlightRef.current) return;
|
||||
isBackupsRequestInFlightRef.current = true;
|
||||
|
||||
@@ -92,9 +99,10 @@ export const BackupsComponent = ({
|
||||
lastRequestTimeRef.current = requestTime;
|
||||
|
||||
const loadLimit = limit ?? currentLimit;
|
||||
const activeFilters = filtersOverride ?? filters;
|
||||
|
||||
try {
|
||||
const response = await backupsApi.getBackups(database.id, loadLimit, 0);
|
||||
const response = await backupsApi.getBackups(database.id, loadLimit, 0, activeFilters);
|
||||
|
||||
if (lastRequestTimeRef.current !== requestTime) return;
|
||||
|
||||
@@ -124,7 +132,7 @@ export const BackupsComponent = ({
|
||||
lastRequestTimeRef.current = requestTime;
|
||||
|
||||
try {
|
||||
const response = await backupsApi.getBackups(database.id, newLimit, 0);
|
||||
const response = await backupsApi.getBackups(database.id, newLimit, 0, filters);
|
||||
|
||||
if (lastRequestTimeRef.current !== requestTime) return;
|
||||
|
||||
@@ -206,13 +214,20 @@ export const BackupsComponent = ({
|
||||
return () => {};
|
||||
}, [database]);
|
||||
|
||||
useEffect(() => {
|
||||
setCurrentLimit(BACKUPS_PAGE_SIZE);
|
||||
setHasMore(true);
|
||||
setIsBackupsLoading(true);
|
||||
loadBackups(BACKUPS_PAGE_SIZE, filters).then(() => setIsBackupsLoading(false));
|
||||
}, [filters]);
|
||||
|
||||
useEffect(() => {
|
||||
const intervalId = setInterval(() => {
|
||||
loadBackups();
|
||||
}, 1_000);
|
||||
|
||||
return () => clearInterval(intervalId);
|
||||
}, [currentLimit]);
|
||||
}, [currentLimit, filters]);
|
||||
|
||||
useEffect(() => {
|
||||
if (downloadingBackupId) {
|
||||
@@ -432,29 +447,6 @@ export const BackupsComponent = ({
|
||||
dataIndex: 'status',
|
||||
key: 'status',
|
||||
render: (status: BackupStatus, record: Backup) => renderStatus(status, record),
|
||||
filters: [
|
||||
{
|
||||
value: BackupStatus.IN_PROGRESS,
|
||||
text: 'In progress',
|
||||
},
|
||||
{
|
||||
value: BackupStatus.FAILED,
|
||||
text: 'Failed',
|
||||
},
|
||||
{
|
||||
value: BackupStatus.COMPLETED,
|
||||
text: 'Successful',
|
||||
},
|
||||
{
|
||||
value: BackupStatus.DELETED,
|
||||
text: 'Deleted',
|
||||
},
|
||||
{
|
||||
value: BackupStatus.CANCELED,
|
||||
text: 'Canceled',
|
||||
},
|
||||
],
|
||||
onFilter: (value, record) => record.status === value,
|
||||
},
|
||||
{
|
||||
title: (
|
||||
@@ -502,6 +494,11 @@ export const BackupsComponent = ({
|
||||
},
|
||||
];
|
||||
|
||||
const isAnyFilterApplied =
|
||||
(filters.statuses && filters.statuses.length > 0) ||
|
||||
filters.beforeDate !== undefined ||
|
||||
filters.pgWalBackupType !== undefined;
|
||||
|
||||
if (isBackupConfigLoading) {
|
||||
return (
|
||||
<div className="mb-5 flex items-center">
|
||||
@@ -514,7 +511,35 @@ export const BackupsComponent = ({
|
||||
<div
|
||||
className={`w-full bg-white p-3 shadow md:p-5 dark:bg-gray-800 ${isDirectlyUnderTab ? 'rounded-tr-md rounded-br-md rounded-bl-md' : 'rounded-md'}`}
|
||||
>
|
||||
<h2 className="text-lg font-bold md:text-xl dark:text-white">Backups</h2>
|
||||
<div className="flex items-center gap-2">
|
||||
<h2 className="text-lg font-bold md:text-xl dark:text-white">Backups</h2>
|
||||
<div className="relative">
|
||||
{isFilterPanelVisible ? (
|
||||
<FilterFilled
|
||||
className="cursor-pointer text-blue-600"
|
||||
onClick={() => setIsFilterPanelVisible(false)}
|
||||
/>
|
||||
) : (
|
||||
<FilterOutlined
|
||||
className="cursor-pointer"
|
||||
onClick={() => setIsFilterPanelVisible(true)}
|
||||
/>
|
||||
)}
|
||||
{!isFilterPanelVisible && isAnyFilterApplied && (
|
||||
<span className="absolute -top-1 -right-1 h-2 w-2 rounded-full bg-blue-600" />
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{isFilterPanelVisible && (
|
||||
<div className="mt-3">
|
||||
<BackupsFiltersPanelComponent
|
||||
filters={filters}
|
||||
onFiltersChange={setFilters}
|
||||
isWalDatabase={database.postgresql?.backupType === PostgresBackupType.WAL_V1}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{IS_CLOUD && (
|
||||
<BackupsBillingBannerComponent
|
||||
|
||||
@@ -0,0 +1,92 @@
|
||||
import { DatePicker, Select } from 'antd';
|
||||
import type { Dayjs } from 'dayjs';
|
||||
import dayjs from 'dayjs';
|
||||
|
||||
import { BackupStatus, PgWalBackupType } from '../../../entity/backups';
|
||||
import type { BackupsFilters } from '../../../entity/backups/api/backupsApi';
|
||||
|
||||
interface Props {
|
||||
filters: BackupsFilters;
|
||||
onFiltersChange: (filters: BackupsFilters) => void;
|
||||
isWalDatabase: boolean;
|
||||
}
|
||||
|
||||
const statusOptions = [
|
||||
{ label: 'In progress', value: BackupStatus.IN_PROGRESS },
|
||||
{ label: 'Successful', value: BackupStatus.COMPLETED },
|
||||
{ label: 'Failed', value: BackupStatus.FAILED },
|
||||
{ label: 'Canceled', value: BackupStatus.CANCELED },
|
||||
];
|
||||
|
||||
const pgWalBackupTypeOptions = [
|
||||
{ label: 'Full backup', value: PgWalBackupType.PG_FULL_BACKUP },
|
||||
{ label: 'WAL segment', value: PgWalBackupType.PG_WAL_SEGMENT },
|
||||
];
|
||||
|
||||
export const BackupsFiltersPanelComponent = ({
|
||||
filters,
|
||||
onFiltersChange,
|
||||
isWalDatabase,
|
||||
}: Props) => {
|
||||
const handleStatusChange = (statuses: string[]) => {
|
||||
onFiltersChange({ ...filters, statuses: statuses.length > 0 ? statuses : undefined });
|
||||
};
|
||||
|
||||
const handleBeforeDateChange = (date: Dayjs | null) => {
|
||||
onFiltersChange({
|
||||
...filters,
|
||||
beforeDate: date ? date.toISOString() : undefined,
|
||||
});
|
||||
};
|
||||
|
||||
const handlePgWalBackupTypeChange = (value: string | undefined) => {
|
||||
onFiltersChange({ ...filters, pgWalBackupType: value });
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-3">
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="min-w-[90px] text-sm text-gray-500 dark:text-gray-400">Status</span>
|
||||
<Select
|
||||
mode="multiple"
|
||||
value={filters.statuses ?? []}
|
||||
onChange={handleStatusChange}
|
||||
options={statusOptions}
|
||||
placeholder="All statuses"
|
||||
size="small"
|
||||
variant="filled"
|
||||
className="w-[200px] [&_.ant-select-selector]:!rounded-md"
|
||||
allowClear
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="min-w-[90px] text-sm text-gray-500 dark:text-gray-400">Before</span>
|
||||
<DatePicker
|
||||
value={filters.beforeDate ? dayjs(filters.beforeDate) : null}
|
||||
onChange={handleBeforeDateChange}
|
||||
size="small"
|
||||
variant="filled"
|
||||
className="w-[200px] !rounded-md"
|
||||
allowClear
|
||||
/>
|
||||
</div>
|
||||
|
||||
{isWalDatabase && (
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="min-w-[90px] text-sm text-gray-500 dark:text-gray-400">Backup type</span>
|
||||
<Select
|
||||
value={filters.pgWalBackupType}
|
||||
onChange={handlePgWalBackupTypeChange}
|
||||
options={pgWalBackupTypeOptions}
|
||||
placeholder="All types"
|
||||
size="small"
|
||||
variant="filled"
|
||||
className="w-[200px] [&_.ant-select-selector]:!rounded-md"
|
||||
allowClear
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -72,71 +72,281 @@ export const AgentInstallationComponent = ({ database, onTokenGenerated }: Props
|
||||
);
|
||||
|
||||
const downloadCommand = `curl -L -o databasus-agent "${databasusHost}/api/v1/system/agent?arch=${selectedArch}" && chmod +x databasus-agent`;
|
||||
|
||||
const walQueuePath = pgDeploymentType === 'docker' ? '/wal-queue' : '/opt/databasus/wal-queue';
|
||||
|
||||
const postgresqlConfSettings = `wal_level = replica
|
||||
archive_mode = on
|
||||
archive_command = 'cp %p ${walQueuePath}/%f.tmp && mv ${walQueuePath}/%f.tmp ${walQueuePath}/%f'`;
|
||||
|
||||
const pgHbaEntry = `host replication all 127.0.0.1/32 md5`;
|
||||
|
||||
const grantReplicationSql = `ALTER ROLE <YOUR_PG_USER> WITH REPLICATION;`;
|
||||
|
||||
const createWalDirCommand = `mkdir -p /opt/databasus/wal-queue`;
|
||||
// -- Step 2: Configure postgresql.conf --
|
||||
|
||||
const walDirPermissionsCommand = `chown postgres:postgres /opt/databasus/wal-queue
|
||||
chmod 755 /opt/databasus/wal-queue`;
|
||||
const renderStep2System = () => (
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 2 — Configure postgresql.conf</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
Add or update these settings in your <code>postgresql.conf</code>.
|
||||
</p>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
Typical location — Debian/Ubuntu:{' '}
|
||||
<code>/etc/postgresql/<version>/main/postgresql.conf</code>, RHEL/CentOS:{' '}
|
||||
<code>/var/lib/pgsql/<version>/data/postgresql.conf</code>
|
||||
</p>
|
||||
{renderCodeBlock(`wal_level = replica
|
||||
archive_mode = on
|
||||
archive_command = 'cp %p /opt/databasus/wal-queue/%f.tmp && mv /opt/databasus/wal-queue/%f.tmp /opt/databasus/wal-queue/%f'`)}
|
||||
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
Restart PostgreSQL to apply the changes:
|
||||
</p>
|
||||
{renderCodeBlock('sudo systemctl restart postgresql')}
|
||||
</div>
|
||||
);
|
||||
|
||||
const dockerWalDirPermissionsCommand = `# Inside the container (or via docker exec):
|
||||
chown postgres:postgres /wal-queue`;
|
||||
const renderStep2Folder = () => (
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 2 — Configure postgresql.conf</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
Add or update these settings in the <code>postgresql.conf</code> inside your PostgreSQL data
|
||||
directory.
|
||||
</p>
|
||||
{renderCodeBlock(`wal_level = replica
|
||||
archive_mode = on
|
||||
archive_command = 'cp %p /opt/databasus/wal-queue/%f.tmp && mv /opt/databasus/wal-queue/%f.tmp /opt/databasus/wal-queue/%f'`)}
|
||||
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
Restart PostgreSQL to apply the changes:
|
||||
</p>
|
||||
{renderCodeBlock('pg_ctl -D <YOUR_PG_DATA_DIR> restart')}
|
||||
</div>
|
||||
);
|
||||
|
||||
const dockerVolumeExample = `# In your docker run command:
|
||||
const renderStep2Docker = () => (
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 2 — Configure postgresql.conf</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
Add or update these settings in your <code>postgresql.conf</code> inside the container. The{' '}
|
||||
<code>/wal-queue</code> path in <code>archive_command</code> is the path{' '}
|
||||
<strong>inside the container</strong> — it must match the volume mount target configured in
|
||||
Step 5.
|
||||
</p>
|
||||
{renderCodeBlock(`wal_level = replica
|
||||
archive_mode = on
|
||||
archive_command = 'cp %p /wal-queue/%f.tmp && mv /wal-queue/%f.tmp /wal-queue/%f'`)}
|
||||
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
Restart the container to apply the changes:
|
||||
</p>
|
||||
{renderCodeBlock('docker restart <CONTAINER_NAME>')}
|
||||
</div>
|
||||
);
|
||||
|
||||
// -- Step 3: Configure pg_hba.conf --
|
||||
|
||||
const renderStep3System = () => (
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 3 — Configure pg_hba.conf</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
Add this line to <code>pg_hba.conf</code> to allow <code>pg_basebackup</code> to take full
|
||||
backups via a local replication connection. Adjust the address and auth method as needed.
|
||||
</p>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
Typical location — Debian/Ubuntu:{' '}
|
||||
<code>/etc/postgresql/<version>/main/pg_hba.conf</code>, RHEL/CentOS:{' '}
|
||||
<code>/var/lib/pgsql/<version>/data/pg_hba.conf</code>
|
||||
</p>
|
||||
{renderCodeBlock(pgHbaEntry)}
|
||||
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
Restart PostgreSQL to apply the changes:
|
||||
</p>
|
||||
{renderCodeBlock('sudo systemctl restart postgresql')}
|
||||
</div>
|
||||
);
|
||||
|
||||
const renderStep3Folder = () => (
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 3 — Configure pg_hba.conf</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
Add this line to <code>pg_hba.conf</code> in your PostgreSQL data directory to allow{' '}
|
||||
<code>pg_basebackup</code> to take full backups via a local replication connection. Adjust
|
||||
the address and auth method as needed.
|
||||
</p>
|
||||
{renderCodeBlock(pgHbaEntry)}
|
||||
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
Restart PostgreSQL to apply the changes:
|
||||
</p>
|
||||
{renderCodeBlock('pg_ctl -D <YOUR_PG_DATA_DIR> restart')}
|
||||
</div>
|
||||
);
|
||||
|
||||
const renderStep3Docker = () => (
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 3 — Configure pg_hba.conf</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
Add this line to <code>pg_hba.conf</code> inside the container to allow{' '}
|
||||
<code>pg_basebackup</code> to take full backups via a replication connection on the
|
||||
container's loopback interface. Adjust the address and auth method as needed.
|
||||
</p>
|
||||
{renderCodeBlock(pgHbaEntry)}
|
||||
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
Restart the container to apply the changes:
|
||||
</p>
|
||||
{renderCodeBlock('docker restart <CONTAINER_NAME>')}
|
||||
</div>
|
||||
);
|
||||
|
||||
// -- Step 5: WAL queue directory --
|
||||
|
||||
const renderStep5System = () => (
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 5 — Create WAL queue directory</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
PostgreSQL will place WAL archive files here for the agent to upload.
|
||||
</p>
|
||||
{renderCodeBlock('mkdir -p /opt/databasus/wal-queue')}
|
||||
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
Ensure the directory is writable by PostgreSQL and readable by the agent:
|
||||
</p>
|
||||
{renderCodeBlock(`chown postgres:postgres /opt/databasus/wal-queue
|
||||
chmod 755 /opt/databasus/wal-queue`)}
|
||||
</div>
|
||||
);
|
||||
|
||||
const renderStep5Folder = () => (
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 5 — Create WAL queue directory</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
PostgreSQL will place WAL archive files here for the agent to upload.
|
||||
</p>
|
||||
{renderCodeBlock('mkdir -p /opt/databasus/wal-queue')}
|
||||
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
Ensure the directory is writable by PostgreSQL and readable by the agent:
|
||||
</p>
|
||||
{renderCodeBlock(`chown postgres:postgres /opt/databasus/wal-queue
|
||||
chmod 755 /opt/databasus/wal-queue`)}
|
||||
</div>
|
||||
);
|
||||
|
||||
const renderStep5Docker = () => (
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 5 — Set up WAL queue volume</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
The WAL queue directory must be a <strong>volume mount</strong> shared between the
|
||||
PostgreSQL container and the host. The agent reads WAL files from the host path, while
|
||||
PostgreSQL writes to the container path via <code>archive_command</code>.
|
||||
</p>
|
||||
{renderCodeBlock('mkdir -p /opt/databasus/wal-queue')}
|
||||
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
Then mount it as a volume so both the container and the agent can access it:
|
||||
</p>
|
||||
{renderCodeBlock(`# In your docker run command:
|
||||
docker run ... -v /opt/databasus/wal-queue:/wal-queue ...
|
||||
|
||||
# Or in docker-compose.yml:
|
||||
volumes:
|
||||
- /opt/databasus/wal-queue:/wal-queue`;
|
||||
- /opt/databasus/wal-queue:/wal-queue`)}
|
||||
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
Ensure the directory inside the container is owned by the <code>postgres</code> user:
|
||||
</p>
|
||||
{renderCodeBlock(`# Inside the container (or via docker exec):
|
||||
chown postgres:postgres /wal-queue`)}
|
||||
</div>
|
||||
);
|
||||
|
||||
const buildStartCommand = () => {
|
||||
const baseFlags = [
|
||||
` --databasus-host=${databasusHost}`,
|
||||
` --db-id=${database.id}`,
|
||||
` --token=<YOUR_AGENT_TOKEN>`,
|
||||
` --pg-host=localhost`,
|
||||
` --pg-port=5432`,
|
||||
` --pg-user=<YOUR_PG_USER>`,
|
||||
` --pg-password=<YOUR_PG_PASSWORD>`,
|
||||
];
|
||||
// -- Step 6: Start the agent --
|
||||
|
||||
const baseFlagsWithContinuation = baseFlags.map((f) => f + ' \\');
|
||||
const buildBaseFlags = () => [
|
||||
` --databasus-host=${databasusHost} \\`,
|
||||
` --db-id=${database.id} \\`,
|
||||
` --token=<YOUR_AGENT_TOKEN> \\`,
|
||||
` --pg-host=localhost \\`,
|
||||
` --pg-port=5432 \\`,
|
||||
` --pg-user=<YOUR_PG_USER> \\`,
|
||||
` --pg-password=<YOUR_PG_PASSWORD> \\`,
|
||||
];
|
||||
|
||||
if (pgDeploymentType === 'system') {
|
||||
return [
|
||||
'./databasus-agent start \\',
|
||||
...baseFlagsWithContinuation,
|
||||
` --pg-type=host \\`,
|
||||
` --pg-wal-dir=/opt/databasus/wal-queue`,
|
||||
].join('\n');
|
||||
}
|
||||
|
||||
if (pgDeploymentType === 'folder') {
|
||||
return [
|
||||
'./databasus-agent start \\',
|
||||
...baseFlagsWithContinuation,
|
||||
` --pg-type=host \\`,
|
||||
` --pg-host-bin-dir=<PATH_TO_PG_BIN_DIR> \\`,
|
||||
` --pg-wal-dir=/opt/databasus/wal-queue`,
|
||||
].join('\n');
|
||||
}
|
||||
|
||||
return [
|
||||
const renderStep6System = () => {
|
||||
const startCommand = [
|
||||
'./databasus-agent start \\',
|
||||
...baseFlagsWithContinuation,
|
||||
...buildBaseFlags(),
|
||||
` --pg-type=host \\`,
|
||||
` --pg-wal-dir=/opt/databasus/wal-queue`,
|
||||
].join('\n');
|
||||
|
||||
return (
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 6 — Start the agent</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
Replace placeholders in <code>{'<ANGLE_BRACKETS>'}</code> with your actual values.
|
||||
</p>
|
||||
{renderCodeBlock(startCommand)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const renderStep6Folder = () => {
|
||||
const startCommand = [
|
||||
'./databasus-agent start \\',
|
||||
...buildBaseFlags(),
|
||||
` --pg-type=host \\`,
|
||||
` --pg-host-bin-dir=<PATH_TO_PG_BIN_DIR> \\`,
|
||||
` --pg-wal-dir=/opt/databasus/wal-queue`,
|
||||
].join('\n');
|
||||
|
||||
return (
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 6 — Start the agent</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
Replace placeholders in <code>{'<ANGLE_BRACKETS>'}</code> with your actual values.{' '}
|
||||
<code>--pg-host-bin-dir</code> should point to the directory containing{' '}
|
||||
<code>pg_basebackup</code> (e.g. <code>/usr/lib/postgresql/17/bin</code>).
|
||||
</p>
|
||||
{renderCodeBlock(startCommand)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const renderStep6Docker = () => {
|
||||
const startCommand = [
|
||||
'./databasus-agent start \\',
|
||||
...buildBaseFlags(),
|
||||
` --pg-type=docker \\`,
|
||||
` --pg-docker-container-name=<CONTAINER_NAME> \\`,
|
||||
` --pg-wal-dir=/opt/databasus/wal-queue`,
|
||||
].join('\n');
|
||||
|
||||
return (
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 6 — Start the agent</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
Replace placeholders in <code>{'<ANGLE_BRACKETS>'}</code> with your actual values.
|
||||
</p>
|
||||
<p className="mt-1 text-sm text-amber-600 dark:text-amber-400">
|
||||
Use the PostgreSQL port <strong>inside the container</strong> (usually 5432), not the
|
||||
host-mapped port.
|
||||
</p>
|
||||
{renderCodeBlock(startCommand)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
// -- Dispatch helpers --
|
||||
|
||||
const renderStep2 = () => {
|
||||
if (pgDeploymentType === 'system') return renderStep2System();
|
||||
if (pgDeploymentType === 'folder') return renderStep2Folder();
|
||||
return renderStep2Docker();
|
||||
};
|
||||
|
||||
const renderStep3 = () => {
|
||||
if (pgDeploymentType === 'system') return renderStep3System();
|
||||
if (pgDeploymentType === 'folder') return renderStep3Folder();
|
||||
return renderStep3Docker();
|
||||
};
|
||||
|
||||
const renderStep5 = () => {
|
||||
if (pgDeploymentType === 'system') return renderStep5System();
|
||||
if (pgDeploymentType === 'folder') return renderStep5Folder();
|
||||
return renderStep5Docker();
|
||||
};
|
||||
|
||||
const renderStep6 = () => {
|
||||
if (pgDeploymentType === 'system') return renderStep6System();
|
||||
if (pgDeploymentType === 'folder') return renderStep6Folder();
|
||||
return renderStep6Docker();
|
||||
};
|
||||
|
||||
return (
|
||||
@@ -247,31 +457,8 @@ volumes:
|
||||
{renderCodeBlock(downloadCommand)}
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 2 — Configure postgresql.conf</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
Add or update these settings in your <code>postgresql.conf</code>, then{' '}
|
||||
<strong>restart PostgreSQL</strong>.
|
||||
</p>
|
||||
{pgDeploymentType === 'docker' && (
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
The <code>archive_command</code> path (<code>/wal-queue</code>) is the path{' '}
|
||||
<strong>inside the container</strong>. It must match the volume mount target — see
|
||||
Step 5.
|
||||
</p>
|
||||
)}
|
||||
{renderCodeBlock(postgresqlConfSettings)}
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 3 — Configure pg_hba.conf</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
Add this line to <code>pg_hba.conf</code>. This is required for{' '}
|
||||
<code>pg_basebackup</code> to take full backups — not for streaming replication. Adjust
|
||||
the address and auth method as needed, then reload PostgreSQL.
|
||||
</p>
|
||||
{renderCodeBlock(pgHbaEntry)}
|
||||
</div>
|
||||
{renderStep2()}
|
||||
{renderStep3()}
|
||||
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 4 — Grant replication privilege</div>
|
||||
@@ -282,58 +469,8 @@ volumes:
|
||||
{renderCodeBlock(grantReplicationSql)}
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">
|
||||
Step 5 —{' '}
|
||||
{pgDeploymentType === 'docker'
|
||||
? 'Set up WAL queue volume'
|
||||
: 'Create WAL queue directory'}
|
||||
</div>
|
||||
{pgDeploymentType === 'docker' ? (
|
||||
<>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
The WAL queue directory must be a <strong>volume mount</strong> shared between the
|
||||
PostgreSQL container and the host. The agent reads WAL files from the host path,
|
||||
while PostgreSQL writes to the container path via <code>archive_command</code>.
|
||||
</p>
|
||||
{renderCodeBlock(createWalDirCommand)}
|
||||
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
Then mount it as a volume so both the container and the agent can access it:
|
||||
</p>
|
||||
{renderCodeBlock(dockerVolumeExample)}
|
||||
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
Ensure the directory inside the container is owned by the <code>postgres</code>{' '}
|
||||
user:
|
||||
</p>
|
||||
{renderCodeBlock(dockerWalDirPermissionsCommand)}
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
PostgreSQL will place WAL archive files here for the agent to upload.
|
||||
</p>
|
||||
{renderCodeBlock(createWalDirCommand)}
|
||||
<p className="mt-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
Ensure the directory is writable by PostgreSQL and readable by the agent:
|
||||
</p>
|
||||
{renderCodeBlock(walDirPermissionsCommand)}
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">Step 6 — Start the agent</div>
|
||||
<p className="mt-1 text-sm text-gray-600 dark:text-gray-400">
|
||||
Replace placeholders in <code>{'<ANGLE_BRACKETS>'}</code> with your actual values.
|
||||
</p>
|
||||
{pgDeploymentType === 'docker' && (
|
||||
<p className="mt-1 text-sm text-amber-600 dark:text-amber-400">
|
||||
Use the PostgreSQL port <strong>inside the container</strong> (usually 5432), not the
|
||||
host-mapped port.
|
||||
</p>
|
||||
)}
|
||||
{renderCodeBlock(buildStartCommand())}
|
||||
</div>
|
||||
{renderStep5()}
|
||||
{renderStep6()}
|
||||
|
||||
<div>
|
||||
<div className="font-semibold dark:text-white">After installation</div>
|
||||
|
||||
@@ -34,7 +34,7 @@ export function AuthNavbarComponent() {
|
||||
|
||||
{!IS_CLOUD && (
|
||||
<a
|
||||
className="!text-black hover:opacity-80 dark:!text-gray-200"
|
||||
className="!text-black !underline !decoration-blue-600 !decoration-2 underline-offset-2 hover:opacity-80 dark:!text-gray-200"
|
||||
href="https://databasus.com/cloud"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
|
||||
@@ -232,7 +232,7 @@ export const MainScreenComponent = () => {
|
||||
|
||||
{!IS_CLOUD && (
|
||||
<a
|
||||
className="!text-black hover:opacity-80 dark:!text-gray-200"
|
||||
className="!text-black !underline !decoration-blue-600 !decoration-2 underline-offset-2 hover:opacity-80 dark:!text-gray-200"
|
||||
href="https://databasus.com/cloud"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
|
||||
Reference in New Issue
Block a user