Compare commits

...

6 Commits

Author SHA1 Message Date
Rostislav Dugin
1f1d80245f Merge pull request #368 from databasus/develop
FIX (restores): Increase restore timeout to 23 hours instead of 1 hour
2026-02-17 14:56:58 +03:00
Rostislav Dugin
16a29cf458 FIX (restores): Increase restore timeout to 23 hours instead of 1 hour 2026-02-17 14:56:25 +03:00
Rostislav Dugin
43e04500ac Merge pull request #367 from databasus/develop
FEATURE (backups): Add meaningful names for backups
2026-02-17 14:50:21 +03:00
Rostislav Dugin
cee3022f85 FEATURE (backups): Add meaningful names for backups 2026-02-17 14:49:33 +03:00
Rostislav Dugin
f46d92c480 Merge pull request #365 from databasus/develop
FIX (audit logs): Get rid of IDs in audit logs and improve naming log…
2026-02-15 01:10:54 +03:00
Rostislav Dugin
10677238d7 FIX (audit logs): Get rid of IDs in audit logs and improve naming logging 2026-02-15 01:06:39 +03:00
41 changed files with 725 additions and 336 deletions

View File

@@ -196,7 +196,7 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) {
backupMetadata, err := n.createBackupUseCase.Execute(
ctx,
backup.ID,
backup,
backupConfig,
database,
storage,
@@ -263,7 +263,7 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) {
// Delete partial backup from storage
storage, storageErr := n.storageService.GetStorageByID(backup.StorageID)
if storageErr == nil {
if deleteErr := storage.DeleteFile(n.fieldEncryptor, backup.ID); deleteErr != nil {
if deleteErr := storage.DeleteFile(n.fieldEncryptor, backup.ID.String()); deleteErr != nil {
n.logger.Error(
"Failed to delete partial backup file",
"backupId",

View File

@@ -79,7 +79,7 @@ func (c *BackupCleaner) DeleteBackup(backup *backups_core.Backup) error {
return err
}
err = storage.DeleteFile(c.fieldEncryptor, backup.ID)
err = storage.DeleteFile(c.fieldEncryptor, backup.ID.String())
if err != nil {
// we do not return error here, because sometimes clean up performed
// before unavailable storage removal or change - therefore we should

View File

@@ -25,24 +25,24 @@ var backupRepository = &backups_core.BackupRepository{}
var taskCancelManager = tasks_cancellation.GetTaskCancelManager()
var backupCleaner = &BackupCleaner{
backupRepository: backupRepository,
storageService: storages.GetStorageService(),
backupConfigService: backups_config.GetBackupConfigService(),
fieldEncryptor: encryption.GetFieldEncryptor(),
logger: logger.GetLogger(),
backupRemoveListeners: []backups_core.BackupRemoveListener{},
runOnce: sync.Once{},
hasRun: atomic.Bool{},
backupRepository,
storages.GetStorageService(),
backups_config.GetBackupConfigService(),
encryption.GetFieldEncryptor(),
logger.GetLogger(),
[]backups_core.BackupRemoveListener{},
sync.Once{},
atomic.Bool{},
}
var backupNodesRegistry = &BackupNodesRegistry{
client: cache_utils.GetValkeyClient(),
logger: logger.GetLogger(),
timeout: cache_utils.DefaultCacheTimeout,
pubsubBackups: cache_utils.NewPubSubManager(),
pubsubCompletions: cache_utils.NewPubSubManager(),
runOnce: sync.Once{},
hasRun: atomic.Bool{},
cache_utils.GetValkeyClient(),
logger.GetLogger(),
cache_utils.DefaultCacheTimeout,
cache_utils.NewPubSubManager(),
cache_utils.NewPubSubManager(),
sync.Once{},
atomic.Bool{},
}
func getNodeID() uuid.UUID {
@@ -50,34 +50,35 @@ func getNodeID() uuid.UUID {
}
var backuperNode = &BackuperNode{
databaseService: databases.GetDatabaseService(),
fieldEncryptor: encryption.GetFieldEncryptor(),
workspaceService: workspaces_services.GetWorkspaceService(),
backupRepository: backupRepository,
backupConfigService: backups_config.GetBackupConfigService(),
storageService: storages.GetStorageService(),
notificationSender: notifiers.GetNotifierService(),
backupCancelManager: taskCancelManager,
backupNodesRegistry: backupNodesRegistry,
logger: logger.GetLogger(),
createBackupUseCase: usecases.GetCreateBackupUsecase(),
nodeID: getNodeID(),
lastHeartbeat: time.Time{},
runOnce: sync.Once{},
hasRun: atomic.Bool{},
databases.GetDatabaseService(),
encryption.GetFieldEncryptor(),
workspaces_services.GetWorkspaceService(),
backupRepository,
backups_config.GetBackupConfigService(),
storages.GetStorageService(),
notifiers.GetNotifierService(),
taskCancelManager,
backupNodesRegistry,
logger.GetLogger(),
usecases.GetCreateBackupUsecase(),
getNodeID(),
time.Time{},
sync.Once{},
atomic.Bool{},
}
var backupsScheduler = &BackupsScheduler{
backupRepository: backupRepository,
backupConfigService: backups_config.GetBackupConfigService(),
taskCancelManager: taskCancelManager,
backupNodesRegistry: backupNodesRegistry,
lastBackupTime: time.Now().UTC(),
logger: logger.GetLogger(),
backupToNodeRelations: make(map[uuid.UUID]BackupToNodeRelation),
backuperNode: backuperNode,
runOnce: sync.Once{},
hasRun: atomic.Bool{},
backupRepository,
backups_config.GetBackupConfigService(),
taskCancelManager,
backupNodesRegistry,
databases.GetDatabaseService(),
time.Now().UTC(),
logger.GetLogger(),
make(map[uuid.UUID]BackupToNodeRelation),
backuperNode,
sync.Once{},
atomic.Bool{},
}
func GetBackupsScheduler() *BackupsScheduler {

View File

@@ -7,6 +7,7 @@ import (
"time"
common "databasus-backend/internal/features/backups/backups/common"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/notifiers"
@@ -32,7 +33,7 @@ type CreateFailedBackupUsecase struct{}
func (uc *CreateFailedBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
@@ -46,7 +47,7 @@ type CreateSuccessBackupUsecase struct{}
func (uc *CreateSuccessBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
@@ -65,7 +66,7 @@ type CreateLargeBackupUsecase struct{}
func (uc *CreateLargeBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
@@ -84,7 +85,7 @@ type CreateProgressiveBackupUsecase struct{}
func (uc *CreateProgressiveBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
@@ -124,7 +125,7 @@ type CreateMediumBackupUsecase struct{}
func (uc *CreateMediumBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
@@ -152,7 +153,7 @@ func NewMockTrackingBackupUsecase() *MockTrackingBackupUsecase {
func (m *MockTrackingBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
@@ -162,7 +163,7 @@ func (m *MockTrackingBackupUsecase) Execute(
// Send backup ID to channel (non-blocking)
select {
case m.calledBackupIDs <- backupID:
case m.calledBackupIDs <- backup.ID:
default:
}

View File

@@ -13,7 +13,9 @@ import (
"databasus-backend/internal/config"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
task_cancellation "databasus-backend/internal/features/tasks/cancellation"
files_utils "databasus-backend/internal/util/files"
)
const (
@@ -27,6 +29,7 @@ type BackupsScheduler struct {
backupConfigService *backups_config.BackupConfigService
taskCancelManager *task_cancellation.TaskCancelManager
backupNodesRegistry *BackupNodesRegistry
databaseService *databases.DatabaseService
lastBackupTime time.Time
logger *slog.Logger
@@ -113,28 +116,28 @@ func (s *BackupsScheduler) IsBackupNodesAvailable() bool {
return len(nodes) > 0
}
func (s *BackupsScheduler) StartBackup(databaseID uuid.UUID, isCallNotifier bool) {
backupConfig, err := s.backupConfigService.GetBackupConfigByDbId(databaseID)
func (s *BackupsScheduler) StartBackup(database *databases.Database, isCallNotifier bool) {
backupConfig, err := s.backupConfigService.GetBackupConfigByDbId(database.ID)
if err != nil {
s.logger.Error("Failed to get backup config by database ID", "error", err)
return
}
if backupConfig.StorageID == nil {
s.logger.Error("Backup config storage ID is nil", "databaseId", databaseID)
s.logger.Error("Backup config storage ID is nil", "databaseId", database.ID)
return
}
// Check for existing in-progress backups
inProgressBackups, err := s.backupRepository.FindByDatabaseIdAndStatus(
databaseID,
database.ID,
backups_core.BackupStatusInProgress,
)
if err != nil {
s.logger.Error(
"Failed to check for in-progress backups",
"databaseId",
databaseID,
database.ID,
"error",
err,
)
@@ -145,7 +148,7 @@ func (s *BackupsScheduler) StartBackup(databaseID uuid.UUID, isCallNotifier bool
s.logger.Warn(
"Backup already in progress for database, skipping new backup",
"databaseId",
databaseID,
database.ID,
"existingBackupId",
inProgressBackups[0].ID,
)
@@ -164,13 +167,22 @@ func (s *BackupsScheduler) StartBackup(databaseID uuid.UUID, isCallNotifier bool
return
}
fmt.Println("make backup")
backupID := uuid.New()
timestamp := time.Now().UTC()
backup := &backups_core.Backup{
ID: backupID,
FileName: fmt.Sprintf(
"%s-%s-%s",
files_utils.SanitizeFilename(database.Name),
timestamp.Format("20060102-150405"),
backupID.String(),
),
DatabaseID: backupConfig.DatabaseID,
StorageID: *backupConfig.StorageID,
Status: backups_core.BackupStatusInProgress,
BackupSizeMb: 0,
CreatedAt: time.Now().UTC(),
CreatedAt: timestamp,
}
if err := s.backupRepository.Save(backup); err != nil {
@@ -224,8 +236,8 @@ func (s *BackupsScheduler) StartBackup(databaseID uuid.UUID, isCallNotifier bool
s.backupToNodeRelations[*leastBusyNodeID] = relation
} else {
s.backupToNodeRelations[*leastBusyNodeID] = BackupToNodeRelation{
NodeID: *leastBusyNodeID,
BackupsIDs: []uuid.UUID{backup.ID},
*leastBusyNodeID,
[]uuid.UUID{backup.ID},
}
}
@@ -329,7 +341,13 @@ func (s *BackupsScheduler) runPendingBackups() error {
backupConfig.BackupInterval.Interval,
)
s.StartBackup(backupConfig.DatabaseID, remainedBackupTryCount == 1)
database, err := s.databaseService.GetDatabaseByID(backupConfig.DatabaseID)
if err != nil {
s.logger.Error("Failed to get database by ID", "error", err)
continue
}
s.StartBackup(database, remainedBackupTryCount == 1)
continue
}
}

View File

@@ -492,7 +492,7 @@ func Test_CheckDeadNodesAndFailBackups_WhenNodeDies_FailsBackupAndCleansUpRegist
assert.NoError(t, err)
// Scheduler assigns backup to mock node
GetBackupsScheduler().StartBackup(database.ID, false)
GetBackupsScheduler().StartBackup(database, false)
time.Sleep(100 * time.Millisecond)
backups, err := backupRepository.FindByDatabaseID(database.ID)
@@ -595,7 +595,7 @@ func Test_OnBackupCompleted_WhenTaskIsNotBackup_SkipsProcessing(t *testing.T) {
assert.NoError(t, err)
// Start a backup and assign it to the node
GetBackupsScheduler().StartBackup(database.ID, false)
GetBackupsScheduler().StartBackup(database, false)
time.Sleep(100 * time.Millisecond)
backups, err := backupRepository.FindByDatabaseID(database.ID)
@@ -892,7 +892,7 @@ func Test_StartBackup_WhenBackupCompletes_DecrementsActiveTaskCount(t *testing.T
t.Logf("Initial active tasks: %d", initialActiveTasks)
// Start backup
scheduler.StartBackup(database.ID, false)
scheduler.StartBackup(database, false)
// Wait for backup to complete
WaitForBackupCompletion(t, database.ID, 0, 10*time.Second)
@@ -995,7 +995,7 @@ func Test_StartBackup_WhenBackupFails_DecrementsActiveTaskCount(t *testing.T) {
t.Logf("Initial active tasks: %d", initialActiveTasks)
// Start backup
scheduler.StartBackup(database.ID, false)
scheduler.StartBackup(database, false)
// Wait for backup to fail
WaitForBackupCompletion(t, database.ID, 0, 10*time.Second)
@@ -1088,7 +1088,7 @@ func Test_StartBackup_WhenBackupAlreadyInProgress_SkipsNewBackup(t *testing.T) {
assert.NoError(t, err)
// Try to start a new backup - should be skipped
GetBackupsScheduler().StartBackup(database.ID, false)
GetBackupsScheduler().StartBackup(database, false)
time.Sleep(200 * time.Millisecond)
@@ -1268,10 +1268,10 @@ func Test_StartBackup_When2BackupsStartedForDifferentDatabases_BothUseCasesAreCa
// Start 2 backups simultaneously
t.Log("Starting backup for database1")
scheduler.StartBackup(database1.ID, false)
scheduler.StartBackup(database1, false)
t.Log("Starting backup for database2")
scheduler.StartBackup(database2.ID, false)
scheduler.StartBackup(database2, false)
// Wait up to 10 seconds for both backups to complete
t.Log("Waiting for both backups to complete...")

View File

@@ -6,6 +6,7 @@ import (
backups_download "databasus-backend/internal/features/backups/backups/download"
"databasus-backend/internal/features/databases"
users_middleware "databasus-backend/internal/features/users/middleware"
files_utils "databasus-backend/internal/util/files"
"fmt"
"io"
"net/http"
@@ -322,7 +323,7 @@ func (c *BackupController) generateBackupFilename(
timestamp := backup.CreatedAt.Format("2006-01-02_15-04-05")
// Sanitize database name for filename (replace spaces and special chars)
safeName := sanitizeFilename(database.Name)
safeName := files_utils.SanitizeFilename(database.Name)
// Determine extension based on database type
extension := c.getBackupExtension(database.Type)
@@ -346,33 +347,6 @@ func (c *BackupController) getBackupExtension(
}
}
func sanitizeFilename(name string) string {
// Replace characters that are invalid in filenames
replacer := map[rune]rune{
' ': '_',
'/': '-',
'\\': '-',
':': '-',
'*': '-',
'?': '-',
'"': '-',
'<': '-',
'>': '-',
'|': '-',
}
result := make([]rune, 0, len(name))
for _, char := range name {
if replacement, exists := replacer[char]; exists {
result = append(result, replacement)
} else {
result = append(result, char)
}
}
return string(result)
}
func (c *BackupController) startDownloadHeartbeat(ctx context.Context, userID uuid.UUID) {
ticker := time.NewTicker(backups_download.GetDownloadHeartbeatInterval())
defer ticker.Stop()

View File

@@ -32,6 +32,7 @@ import (
workspaces_models "databasus-backend/internal/features/workspaces/models"
workspaces_testing "databasus-backend/internal/features/workspaces/testing"
"databasus-backend/internal/util/encryption"
files_utils "databasus-backend/internal/util/files"
test_utils "databasus-backend/internal/util/testing"
"databasus-backend/internal/util/tools"
)
@@ -956,7 +957,7 @@ func Test_SanitizeFilename(t *testing.T) {
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
result := sanitizeFilename(tt.input)
result := files_utils.SanitizeFilename(tt.input)
assert.Equal(t, tt.expected, result)
})
}
@@ -1407,7 +1408,7 @@ func createTestBackup(
context.Background(),
encryption.GetFieldEncryptor(),
logger,
backup.ID,
backup.ID.String(),
reader,
); err != nil {
panic(fmt.Sprintf("Failed to create test backup file: %v", err))

View File

@@ -8,8 +8,6 @@ import (
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/notifiers"
"databasus-backend/internal/features/storages"
"github.com/google/uuid"
)
type NotificationSender interface {
@@ -23,7 +21,7 @@ type NotificationSender interface {
type CreateBackupUsecase interface {
Execute(
ctx context.Context,
backupID uuid.UUID,
backup *Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,

View File

@@ -8,7 +8,8 @@ import (
)
type Backup struct {
ID uuid.UUID `json:"id" gorm:"column:id;type:uuid;primaryKey"`
ID uuid.UUID `json:"id" gorm:"column:id;type:uuid;primaryKey"`
FileName string `json:"fileName" gorm:"column:file_name;type:text;not null"`
DatabaseID uuid.UUID `json:"databaseId" gorm:"column:database_id;type:uuid;not null"`
StorageID uuid.UUID `json:"storageId" gorm:"column:storage_id;type:uuid;not null"`

View File

@@ -21,6 +21,7 @@ import (
users_models "databasus-backend/internal/features/users/models"
workspaces_services "databasus-backend/internal/features/workspaces/services"
util_encryption "databasus-backend/internal/util/encryption"
files_utils "databasus-backend/internal/util/files"
"github.com/google/uuid"
)
@@ -92,7 +93,7 @@ func (s *BackupService) MakeBackupWithAuth(
return errors.New("insufficient permissions to create backup for this database")
}
s.backupSchedulerService.StartBackup(databaseID, true)
s.backupSchedulerService.StartBackup(database, true)
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Backup manually initiated for database: %s", database.Name),
@@ -181,11 +182,7 @@ func (s *BackupService) DeleteBackup(
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf(
"Backup deleted for database: %s (ID: %s)",
database.Name,
backupID.String(),
),
fmt.Sprintf("Backup deleted for database: %s", database.Name),
&user.ID,
database.WorkspaceID,
)
@@ -232,11 +229,7 @@ func (s *BackupService) CancelBackup(
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf(
"Backup cancelled for database: %s (ID: %s)",
database.Name,
backupID.String(),
),
fmt.Sprintf("Backup cancelled for database: %s", database.Name),
&user.ID,
database.WorkspaceID,
)
@@ -276,11 +269,7 @@ func (s *BackupService) GetBackupFile(
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf(
"Backup file downloaded for database: %s (ID: %s)",
database.Name,
backupID.String(),
),
fmt.Sprintf("Backup file downloaded for database: %s", database.Name),
&user.ID,
database.WorkspaceID,
)
@@ -336,7 +325,7 @@ func (s *BackupService) getBackupReader(backupID uuid.UUID) (io.ReadCloser, erro
return nil, fmt.Errorf("failed to get storage: %w", err)
}
fileReader, err := storage.GetFile(s.fieldEncryptor, backup.ID)
fileReader, err := storage.GetFile(s.fieldEncryptor, backup.ID.String())
if err != nil {
return nil, fmt.Errorf("failed to get backup file: %w", err)
}
@@ -490,11 +479,7 @@ func (s *BackupService) WriteAuditLogForDownload(
database *databases.Database,
) {
s.auditLogService.WriteAuditLog(
fmt.Sprintf(
"Backup file downloaded for database: %s (ID: %s)",
database.Name,
backup.ID.String(),
),
fmt.Sprintf("Backup file downloaded for database: %s", database.Name),
&userID,
database.WorkspaceID,
)
@@ -521,7 +506,7 @@ func (s *BackupService) generateBackupFilename(
database *databases.Database,
) string {
timestamp := backup.CreatedAt.Format("2006-01-02_15-04-05")
safeName := sanitizeFilename(database.Name)
safeName := files_utils.SanitizeFilename(database.Name)
extension := s.getBackupExtension(database.Type)
return fmt.Sprintf("%s_backup_%s%s", safeName, timestamp, extension)
}

View File

@@ -5,6 +5,7 @@ import (
"errors"
common "databasus-backend/internal/features/backups/backups/common"
backups_core "databasus-backend/internal/features/backups/backups/core"
usecases_mariadb "databasus-backend/internal/features/backups/backups/usecases/mariadb"
usecases_mongodb "databasus-backend/internal/features/backups/backups/usecases/mongodb"
usecases_mysql "databasus-backend/internal/features/backups/backups/usecases/mysql"
@@ -12,8 +13,6 @@ import (
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/storages"
"github.com/google/uuid"
)
type CreateBackupUsecase struct {
@@ -25,7 +24,7 @@ type CreateBackupUsecase struct {
func (uc *CreateBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
@@ -35,7 +34,7 @@ func (uc *CreateBackupUsecase) Execute(
case databases.DatabaseTypePostgres:
return uc.CreatePostgresqlBackupUsecase.Execute(
ctx,
backupID,
backup,
backupConfig,
database,
storage,
@@ -45,7 +44,7 @@ func (uc *CreateBackupUsecase) Execute(
case databases.DatabaseTypeMysql:
return uc.CreateMysqlBackupUsecase.Execute(
ctx,
backupID,
backup,
backupConfig,
database,
storage,
@@ -55,7 +54,7 @@ func (uc *CreateBackupUsecase) Execute(
case databases.DatabaseTypeMariadb:
return uc.CreateMariadbBackupUsecase.Execute(
ctx,
backupID,
backup,
backupConfig,
database,
storage,
@@ -65,7 +64,7 @@ func (uc *CreateBackupUsecase) Execute(
case databases.DatabaseTypeMongodb:
return uc.CreateMongodbBackupUsecase.Execute(
ctx,
backupID,
backup,
backupConfig,
database,
storage,

View File

@@ -19,6 +19,7 @@ import (
"databasus-backend/internal/config"
common "databasus-backend/internal/features/backups/backups/common"
backups_core "databasus-backend/internal/features/backups/backups/core"
backup_encryption "databasus-backend/internal/features/backups/backups/encryption"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
@@ -52,7 +53,7 @@ type writeResult struct {
func (uc *CreateMariadbBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
db *databases.Database,
storage *storages.Storage,
@@ -82,7 +83,7 @@ func (uc *CreateMariadbBackupUsecase) Execute(
return uc.streamToStorage(
ctx,
backupID,
backup,
backupConfig,
tools.GetMariadbExecutable(
tools.MariadbExecutableMariadbDump,
@@ -136,7 +137,7 @@ func (uc *CreateMariadbBackupUsecase) buildMariadbDumpArgs(
func (uc *CreateMariadbBackupUsecase) streamToStorage(
parentCtx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
mariadbBin string,
args []string,
@@ -187,7 +188,7 @@ func (uc *CreateMariadbBackupUsecase) streamToStorage(
storageReader, storageWriter := io.Pipe()
finalWriter, encryptionWriter, backupMetadata, err := uc.setupBackupEncryption(
backupID,
backup.ID,
backupConfig,
storageWriter,
)
@@ -204,7 +205,13 @@ func (uc *CreateMariadbBackupUsecase) streamToStorage(
saveErrCh := make(chan error, 1)
go func() {
saveErr := storage.SaveFile(ctx, uc.fieldEncryptor, uc.logger, backupID, storageReader)
saveErr := storage.SaveFile(
ctx,
uc.fieldEncryptor,
uc.logger,
backup.FileName,
storageReader,
)
saveErrCh <- saveErr
}()

View File

@@ -16,6 +16,7 @@ import (
"databasus-backend/internal/config"
common "databasus-backend/internal/features/backups/backups/common"
backups_core "databasus-backend/internal/features/backups/backups/core"
backup_encryption "databasus-backend/internal/features/backups/backups/encryption"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
@@ -46,7 +47,7 @@ type writeResult struct {
func (uc *CreateMongodbBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
db *databases.Database,
storage *storages.Storage,
@@ -76,7 +77,7 @@ func (uc *CreateMongodbBackupUsecase) Execute(
return uc.streamToStorage(
ctx,
backupID,
backup,
backupConfig,
tools.GetMongodbExecutable(
tools.MongodbExecutableMongodump,
@@ -114,7 +115,7 @@ func (uc *CreateMongodbBackupUsecase) buildMongodumpArgs(
func (uc *CreateMongodbBackupUsecase) streamToStorage(
parentCtx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
mongodumpBin string,
args []string,
@@ -163,7 +164,7 @@ func (uc *CreateMongodbBackupUsecase) streamToStorage(
storageReader, storageWriter := io.Pipe()
finalWriter, encryptionWriter, backupMetadata, err := uc.setupBackupEncryption(
backupID,
backup.ID,
backupConfig,
storageWriter,
)
@@ -175,7 +176,13 @@ func (uc *CreateMongodbBackupUsecase) streamToStorage(
saveErrCh := make(chan error, 1)
go func() {
saveErr := storage.SaveFile(ctx, uc.fieldEncryptor, uc.logger, backupID, storageReader)
saveErr := storage.SaveFile(
ctx,
uc.fieldEncryptor,
uc.logger,
backup.FileName,
storageReader,
)
saveErrCh <- saveErr
}()

View File

@@ -19,6 +19,7 @@ import (
"databasus-backend/internal/config"
common "databasus-backend/internal/features/backups/backups/common"
backups_core "databasus-backend/internal/features/backups/backups/core"
backup_encryption "databasus-backend/internal/features/backups/backups/encryption"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
@@ -52,7 +53,7 @@ type writeResult struct {
func (uc *CreateMysqlBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
db *databases.Database,
storage *storages.Storage,
@@ -82,7 +83,7 @@ func (uc *CreateMysqlBackupUsecase) Execute(
return uc.streamToStorage(
ctx,
backupID,
backup,
backupConfig,
tools.GetMysqlExecutable(
my.Version,
@@ -149,7 +150,7 @@ func (uc *CreateMysqlBackupUsecase) getNetworkCompressionArgs(version tools.Mysq
func (uc *CreateMysqlBackupUsecase) streamToStorage(
parentCtx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
mysqlBin string,
args []string,
@@ -200,7 +201,7 @@ func (uc *CreateMysqlBackupUsecase) streamToStorage(
storageReader, storageWriter := io.Pipe()
finalWriter, encryptionWriter, backupMetadata, err := uc.setupBackupEncryption(
backupID,
backup.ID,
backupConfig,
storageWriter,
)
@@ -217,7 +218,13 @@ func (uc *CreateMysqlBackupUsecase) streamToStorage(
saveErrCh := make(chan error, 1)
go func() {
saveErr := storage.SaveFile(ctx, uc.fieldEncryptor, uc.logger, backupID, storageReader)
saveErr := storage.SaveFile(
ctx,
uc.fieldEncryptor,
uc.logger,
backup.FileName,
storageReader,
)
saveErrCh <- saveErr
}()

View File

@@ -16,6 +16,7 @@ import (
"databasus-backend/internal/config"
common "databasus-backend/internal/features/backups/backups/common"
backups_core "databasus-backend/internal/features/backups/backups/core"
backup_encryption "databasus-backend/internal/features/backups/backups/encryption"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
@@ -53,7 +54,7 @@ type writeResult struct {
func (uc *CreatePostgresqlBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
db *databases.Database,
storage *storages.Storage,
@@ -88,7 +89,7 @@ func (uc *CreatePostgresqlBackupUsecase) Execute(
return uc.streamToStorage(
ctx,
backupID,
backup,
backupConfig,
tools.GetPostgresqlExecutable(
pg.Version,
@@ -107,7 +108,7 @@ func (uc *CreatePostgresqlBackupUsecase) Execute(
// streamToStorage streams pg_dump output directly to storage
func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
parentCtx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
pgBin string,
args []string,
@@ -166,7 +167,7 @@ func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
storageReader, storageWriter := io.Pipe()
finalWriter, encryptionWriter, backupMetadata, err := uc.setupBackupEncryption(
backupID,
backup.ID,
backupConfig,
storageWriter,
)
@@ -181,7 +182,13 @@ func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
// Start streaming into storage in its own goroutine
saveErrCh := make(chan error, 1)
go func() {
saveErr := storage.SaveFile(ctx, uc.fieldEncryptor, uc.logger, backupID, storageReader)
saveErr := storage.SaveFile(
ctx,
uc.fieldEncryptor,
uc.logger,
backup.FileName,
storageReader,
)
saveErrCh <- saveErr
}()

View File

@@ -192,6 +192,8 @@ func (s *DatabaseService) UpdateDatabase(
}
}
oldName := existingDatabase.Name
if err := existingDatabase.EncryptSensitiveFields(s.fieldEncryptor); err != nil {
return fmt.Errorf("failed to encrypt sensitive fields: %w", err)
}
@@ -201,11 +203,23 @@ func (s *DatabaseService) UpdateDatabase(
return err
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Database updated: %s", existingDatabase.Name),
&user.ID,
existingDatabase.WorkspaceID,
)
if oldName != existingDatabase.Name {
s.auditLogService.WriteAuditLog(
fmt.Sprintf(
"Database updated and renamed from '%s' to '%s'",
oldName,
existingDatabase.Name,
),
&user.ID,
existingDatabase.WorkspaceID,
)
} else {
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Database updated: %s", existingDatabase.Name),
&user.ID,
existingDatabase.WorkspaceID,
)
}
return nil
}
@@ -571,9 +585,19 @@ func (s *DatabaseService) TransferDatabaseToWorkspace(
return err
}
sourceWorkspace, err := s.workspaceService.GetWorkspaceByID(*sourceWorkspaceID)
if err != nil {
return fmt.Errorf("failed to get source workspace: %w", err)
}
targetWorkspace, err := s.workspaceService.GetWorkspaceByID(targetWorkspaceID)
if err != nil {
return fmt.Errorf("failed to get target workspace: %w", err)
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Database transferred: %s from workspace %s to workspace %s",
database.Name, sourceWorkspaceID, targetWorkspaceID),
fmt.Sprintf("Database transferred: %s from workspace '%s' to workspace '%s'",
database.Name, sourceWorkspace.Name, targetWorkspace.Name),
nil,
&targetWorkspaceID,
)

View File

@@ -58,6 +58,8 @@ func (s *NotifierService) SaveNotifier(
return err
}
oldName := existingNotifier.Name
if err := existingNotifier.Validate(s.fieldEncryptor); err != nil {
return err
}
@@ -67,11 +69,23 @@ func (s *NotifierService) SaveNotifier(
return err
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Notifier updated: %s", existingNotifier.Name),
&user.ID,
&workspaceID,
)
if oldName != existingNotifier.Name {
s.auditLogService.WriteAuditLog(
fmt.Sprintf(
"Notifier updated and renamed from '%s' to '%s'",
oldName,
existingNotifier.Name,
),
&user.ID,
&workspaceID,
)
} else {
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Notifier updated: %s", existingNotifier.Name),
&user.ID,
&workspaceID,
)
}
} else {
notifier.WorkspaceID = workspaceID
@@ -343,9 +357,19 @@ func (s *NotifierService) TransferNotifierToWorkspace(
return err
}
sourceWorkspace, err := s.workspaceService.GetWorkspaceByID(sourceWorkspaceID)
if err != nil {
return fmt.Errorf("failed to get source workspace: %w", err)
}
targetWorkspace, err := s.workspaceService.GetWorkspaceByID(targetWorkspaceID)
if err != nil {
return fmt.Errorf("failed to get target workspace: %w", err)
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Notifier transferred: %s from workspace %s to workspace %s",
existingNotifier.Name, sourceWorkspaceID, targetWorkspaceID),
fmt.Sprintf("Notifier transferred: %s from workspace '%s' to workspace '%s'",
existingNotifier.Name, sourceWorkspace.Name, targetWorkspace.Name),
&user.ID,
&targetWorkspaceID,
)

View File

@@ -261,7 +261,7 @@ func Test_RestoreBackup_AuditLogWritten(t *testing.T) {
found := false
for _, log := range auditLogs.AuditLogs {
if strings.Contains(log.Message, "Database restored from backup") &&
if strings.Contains(log.Message, "Database restored for database") &&
strings.Contains(log.Message, database.Name) {
found = true
break
@@ -752,7 +752,7 @@ func createTestBackup(
context.Background(),
fieldEncryptor,
logger,
backup.ID,
backup.ID.String(),
reader,
); err != nil {
panic(fmt.Sprintf("Failed to create test backup file: %v", err))

View File

@@ -190,11 +190,7 @@ func (s *RestoreService) RestoreBackupWithAuth(
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf(
"Database restored from backup %s for database: %s",
backupID.String(),
database.Name,
),
fmt.Sprintf("Database restored for database: %s", database.Name),
&user.ID,
database.WorkspaceID,
)
@@ -412,11 +408,7 @@ func (s *RestoreService) CancelRestore(
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf(
"Restore cancelled for database: %s (ID: %s)",
database.Name,
restoreID.String(),
),
fmt.Sprintf("Restore cancelled for database: %s", database.Name),
&user.ID,
database.WorkspaceID,
)

View File

@@ -106,7 +106,7 @@ func (uc *RestoreMariadbBackupUsecase) restoreFromStorage(
storage *storages.Storage,
mdbConfig *mariadbtypes.MariadbDatabase,
) error {
ctx, cancel := context.WithTimeout(parentCtx, 60*time.Minute)
ctx, cancel := context.WithTimeout(parentCtx, 23*time.Hour)
defer cancel()
go func() {
@@ -141,7 +141,7 @@ func (uc *RestoreMariadbBackupUsecase) restoreFromStorage(
defer func() { _ = os.RemoveAll(filepath.Dir(myCnfFile)) }()
// Stream backup directly from storage
rawReader, err := storage.GetFile(fieldEncryptor, backup.ID)
rawReader, err := storage.GetFile(fieldEncryptor, backup.FileName)
if err != nil {
return fmt.Errorf("failed to get backup file from storage: %w", err)
}

View File

@@ -154,7 +154,7 @@ func (uc *RestoreMongodbBackupUsecase) restoreFromStorage(
// Stream backup directly from storage
fieldEncryptor := util_encryption.GetFieldEncryptor()
rawReader, err := storage.GetFile(fieldEncryptor, backup.ID)
rawReader, err := storage.GetFile(fieldEncryptor, backup.FileName)
if err != nil {
return fmt.Errorf("failed to get backup file from storage: %w", err)
}

View File

@@ -105,7 +105,7 @@ func (uc *RestoreMysqlBackupUsecase) restoreFromStorage(
storage *storages.Storage,
myConfig *mysqltypes.MysqlDatabase,
) error {
ctx, cancel := context.WithTimeout(parentCtx, 60*time.Minute)
ctx, cancel := context.WithTimeout(parentCtx, 23*time.Hour)
defer cancel()
go func() {
@@ -140,7 +140,7 @@ func (uc *RestoreMysqlBackupUsecase) restoreFromStorage(
defer func() { _ = os.RemoveAll(filepath.Dir(myCnfFile)) }()
// Stream backup directly from storage
rawReader, err := storage.GetFile(fieldEncryptor, backup.ID)
rawReader, err := storage.GetFile(fieldEncryptor, backup.FileName)
if err != nil {
return fmt.Errorf("failed to get backup file from storage: %w", err)
}

View File

@@ -152,7 +152,7 @@ func (uc *RestorePostgresqlBackupUsecase) restoreViaStdin(
"--no-acl",
}
ctx, cancel := context.WithTimeout(parentCtx, 60*time.Minute)
ctx, cancel := context.WithTimeout(parentCtx, 23*time.Hour)
defer cancel()
// Monitor for shutdown and parent cancellation
@@ -209,7 +209,7 @@ func (uc *RestorePostgresqlBackupUsecase) restoreViaStdin(
}
// Get backup stream from storage
rawReader, err := storage.GetFile(fieldEncryptor, backup.ID)
rawReader, err := storage.GetFile(fieldEncryptor, backup.FileName)
if err != nil {
return fmt.Errorf("failed to get backup file from storage: %w", err)
}
@@ -429,7 +429,7 @@ func (uc *RestorePostgresqlBackupUsecase) restoreFromStorage(
isExcludeExtensions,
)
ctx, cancel := context.WithTimeout(parentCtx, 60*time.Minute)
ctx, cancel := context.WithTimeout(parentCtx, 23*time.Hour)
defer cancel()
// Monitor for shutdown and parent cancellation
@@ -540,12 +540,14 @@ func (uc *RestorePostgresqlBackupUsecase) downloadBackupToTempFile(
"encrypted",
backup.Encryption == backups_config.BackupEncryptionEncrypted,
)
fieldEncryptor := util_encryption.GetFieldEncryptor()
rawReader, err := storage.GetFile(fieldEncryptor, backup.ID)
rawReader, err := storage.GetFile(fieldEncryptor, backup.FileName)
if err != nil {
cleanupFunc()
return "", nil, fmt.Errorf("failed to get backup file from storage: %w", err)
}
defer func() {
if err := rawReader.Close(); err != nil {
uc.logger.Error("Failed to close backup reader", "error", err)

View File

@@ -14,13 +14,13 @@ type StorageFileSaver interface {
ctx context.Context,
encryptor encryption.FieldEncryptor,
logger *slog.Logger,
fileID uuid.UUID,
fileName string,
file io.Reader,
) error
GetFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) (io.ReadCloser, error)
GetFile(encryptor encryption.FieldEncryptor, fileName string) (io.ReadCloser, error)
DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error
DeleteFile(encryptor encryption.FieldEncryptor, fileName string) error
Validate(encryptor encryption.FieldEncryptor) error

View File

@@ -41,10 +41,10 @@ func (s *Storage) SaveFile(
ctx context.Context,
encryptor encryption.FieldEncryptor,
logger *slog.Logger,
fileID uuid.UUID,
fileName string,
file io.Reader,
) error {
err := s.getSpecificStorage().SaveFile(ctx, encryptor, logger, fileID, file)
err := s.getSpecificStorage().SaveFile(ctx, encryptor, logger, fileName, file)
if err != nil {
lastSaveError := err.Error()
s.LastSaveError = &lastSaveError
@@ -58,13 +58,13 @@ func (s *Storage) SaveFile(
func (s *Storage) GetFile(
encryptor encryption.FieldEncryptor,
fileID uuid.UUID,
fileName string,
) (io.ReadCloser, error) {
return s.getSpecificStorage().GetFile(encryptor, fileID)
return s.getSpecificStorage().GetFile(encryptor, fileName)
}
func (s *Storage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error {
return s.getSpecificStorage().DeleteFile(encryptor, fileID)
func (s *Storage) DeleteFile(encryptor encryption.FieldEncryptor, fileName string) error {
return s.getSpecificStorage().DeleteFile(encryptor, fileName)
}
func (s *Storage) Validate(encryptor encryption.FieldEncryptor) error {

View File

@@ -229,12 +229,12 @@ acl = private`, s3Container.accessKey, s3Container.secretKey, s3Container.endpoi
context.Background(),
encryptor,
logger.GetLogger(),
fileID,
fileID.String(),
bytes.NewReader(fileData),
)
require.NoError(t, err, "SaveFile should succeed")
file, err := tc.storage.GetFile(encryptor, fileID)
file, err := tc.storage.GetFile(encryptor, fileID.String())
assert.NoError(t, err, "GetFile should succeed")
defer file.Close()
@@ -252,15 +252,15 @@ acl = private`, s3Container.accessKey, s3Container.secretKey, s3Container.endpoi
context.Background(),
encryptor,
logger.GetLogger(),
fileID,
fileID.String(),
bytes.NewReader(fileData),
)
require.NoError(t, err, "SaveFile should succeed")
err = tc.storage.DeleteFile(encryptor, fileID)
err = tc.storage.DeleteFile(encryptor, fileID.String())
assert.NoError(t, err, "DeleteFile should succeed")
file, err := tc.storage.GetFile(encryptor, fileID)
file, err := tc.storage.GetFile(encryptor, fileID.String())
assert.Error(t, err, "GetFile should fail for non-existent file")
if file != nil {
file.Close()
@@ -270,7 +270,7 @@ acl = private`, s3Container.accessKey, s3Container.secretKey, s3Container.endpoi
t.Run("Test_TestDeleteNonExistentFile_DoesNotError", func(t *testing.T) {
// Try to delete a non-existent file
nonExistentID := uuid.New()
err := tc.storage.DeleteFile(encryptor, nonExistentID)
err := tc.storage.DeleteFile(encryptor, nonExistentID.String())
assert.NoError(t, err, "DeleteFile should not error for non-existent file")
})
})

View File

@@ -68,7 +68,7 @@ func (s *AzureBlobStorage) SaveFile(
ctx context.Context,
encryptor encryption.FieldEncryptor,
logger *slog.Logger,
fileID uuid.UUID,
fileName string,
file io.Reader,
) error {
select {
@@ -82,7 +82,7 @@ func (s *AzureBlobStorage) SaveFile(
return err
}
blobName := s.buildBlobName(fileID.String())
blobName := s.buildBlobName(fileName)
blockBlobClient := client.ServiceClient().
NewContainerClient(s.ContainerName).
NewBlockBlobClient(blobName)
@@ -157,14 +157,14 @@ func (s *AzureBlobStorage) SaveFile(
func (s *AzureBlobStorage) GetFile(
encryptor encryption.FieldEncryptor,
fileID uuid.UUID,
fileName string,
) (io.ReadCloser, error) {
client, err := s.getClient(encryptor)
if err != nil {
return nil, err
}
blobName := s.buildBlobName(fileID.String())
blobName := s.buildBlobName(fileName)
response, err := client.DownloadStream(
context.TODO(),
@@ -179,13 +179,13 @@ func (s *AzureBlobStorage) GetFile(
return response.Body, nil
}
func (s *AzureBlobStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error {
func (s *AzureBlobStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileName string) error {
client, err := s.getClient(encryptor)
if err != nil {
return err
}
blobName := s.buildBlobName(fileID.String())
blobName := s.buildBlobName(fileName)
ctx, cancel := context.WithTimeout(context.Background(), azureDeleteTimeout)
defer cancel()

View File

@@ -41,7 +41,7 @@ func (f *FTPStorage) SaveFile(
ctx context.Context,
encryptor encryption.FieldEncryptor,
logger *slog.Logger,
fileID uuid.UUID,
fileName string,
file io.Reader,
) error {
select {
@@ -50,19 +50,19 @@ func (f *FTPStorage) SaveFile(
default:
}
logger.Info("Starting to save file to FTP storage", "fileId", fileID.String(), "host", f.Host)
logger.Info("Starting to save file to FTP storage", "fileName", fileName, "host", f.Host)
conn, err := f.connect(encryptor, ftpConnectTimeout)
if err != nil {
logger.Error("Failed to connect to FTP", "fileId", fileID.String(), "error", err)
logger.Error("Failed to connect to FTP", "fileName", fileName, "error", err)
return fmt.Errorf("failed to connect to FTP: %w", err)
}
defer func() {
if quitErr := conn.Quit(); quitErr != nil {
logger.Error(
"Failed to close FTP connection",
"fileId",
fileID.String(),
"fileName",
fileName,
"error",
quitErr,
)
@@ -73,8 +73,8 @@ func (f *FTPStorage) SaveFile(
if err := f.ensureDirectory(conn, f.Path); err != nil {
logger.Error(
"Failed to ensure directory",
"fileId",
fileID.String(),
"fileName",
fileName,
"path",
f.Path,
"error",
@@ -84,8 +84,8 @@ func (f *FTPStorage) SaveFile(
}
}
filePath := f.getFilePath(fileID.String())
logger.Debug("Uploading file to FTP", "fileId", fileID.String(), "filePath", filePath)
filePath := f.getFilePath(fileName)
logger.Debug("Uploading file to FTP", "fileName", fileName, "filePath", filePath)
ctxReader := &contextReader{ctx: ctx, reader: file}
@@ -93,18 +93,18 @@ func (f *FTPStorage) SaveFile(
if err != nil {
select {
case <-ctx.Done():
logger.Info("FTP upload cancelled", "fileId", fileID.String())
logger.Info("FTP upload cancelled", "fileName", fileName)
return ctx.Err()
default:
logger.Error("Failed to upload file to FTP", "fileId", fileID.String(), "error", err)
logger.Error("Failed to upload file to FTP", "fileName", fileName, "error", err)
return fmt.Errorf("failed to upload file to FTP: %w", err)
}
}
logger.Info(
"Successfully saved file to FTP storage",
"fileId",
fileID.String(),
"fileName",
fileName,
"filePath",
filePath,
)
@@ -113,14 +113,14 @@ func (f *FTPStorage) SaveFile(
func (f *FTPStorage) GetFile(
encryptor encryption.FieldEncryptor,
fileID uuid.UUID,
fileName string,
) (io.ReadCloser, error) {
conn, err := f.connect(encryptor, ftpConnectTimeout)
if err != nil {
return nil, fmt.Errorf("failed to connect to FTP: %w", err)
}
filePath := f.getFilePath(fileID.String())
filePath := f.getFilePath(fileName)
resp, err := conn.Retr(filePath)
if err != nil {
@@ -134,7 +134,7 @@ func (f *FTPStorage) GetFile(
}, nil
}
func (f *FTPStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error {
func (f *FTPStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileName string) error {
ctx, cancel := context.WithTimeout(context.Background(), ftpDeleteTimeout)
defer cancel()
@@ -146,7 +146,7 @@ func (f *FTPStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid
_ = conn.Quit()
}()
filePath := f.getFilePath(fileID.String())
filePath := f.getFilePath(fileName)
_, err = conn.FileSize(filePath)
if err != nil {

View File

@@ -50,21 +50,19 @@ func (s *GoogleDriveStorage) SaveFile(
ctx context.Context,
encryptor encryption.FieldEncryptor,
logger *slog.Logger,
fileID uuid.UUID,
fileName string,
file io.Reader,
) error {
return s.withRetryOnAuth(ctx, encryptor, func(driveService *drive.Service) error {
filename := fileID.String()
folderID, err := s.ensureBackupsFolderExists(ctx, driveService)
if err != nil {
return fmt.Errorf("failed to create/find backups folder: %w", err)
}
_ = s.deleteByName(ctx, driveService, filename, folderID)
_ = s.deleteByName(ctx, driveService, fileName, folderID)
fileMeta := &drive.File{
Name: filename,
Name: fileName,
Parents: []string{folderID},
}
@@ -91,7 +89,7 @@ func (s *GoogleDriveStorage) SaveFile(
logger.Info(
"file uploaded to Google Drive",
"name",
filename,
fileName,
"folder",
"databasus_backups",
)
@@ -152,7 +150,7 @@ func (r *backpressureReader) Read(p []byte) (n int, err error) {
func (s *GoogleDriveStorage) GetFile(
encryptor encryption.FieldEncryptor,
fileID uuid.UUID,
fileName string,
) (io.ReadCloser, error) {
var result io.ReadCloser
err := s.withRetryOnAuth(
@@ -164,7 +162,7 @@ func (s *GoogleDriveStorage) GetFile(
return fmt.Errorf("failed to find backups folder: %w", err)
}
fileIDGoogle, err := s.lookupFileID(driveService, fileID.String(), folderID)
fileIDGoogle, err := s.lookupFileID(driveService, fileName, folderID)
if err != nil {
return err
}
@@ -184,7 +182,7 @@ func (s *GoogleDriveStorage) GetFile(
func (s *GoogleDriveStorage) DeleteFile(
encryptor encryption.FieldEncryptor,
fileID uuid.UUID,
fileName string,
) error {
ctx, cancel := context.WithTimeout(context.Background(), gdDeleteTimeout)
defer cancel()
@@ -195,7 +193,7 @@ func (s *GoogleDriveStorage) DeleteFile(
return fmt.Errorf("failed to find backups folder: %w", err)
}
return s.deleteByName(ctx, driveService, fileID.String(), folderID)
return s.deleteByName(ctx, driveService, fileName, folderID)
})
}

View File

@@ -36,7 +36,7 @@ func (l *LocalStorage) SaveFile(
ctx context.Context,
encryptor encryption.FieldEncryptor,
logger *slog.Logger,
fileID uuid.UUID,
fileName string,
file io.Reader,
) error {
select {
@@ -45,7 +45,7 @@ func (l *LocalStorage) SaveFile(
default:
}
logger.Info("Starting to save file to local storage", "fileId", fileID.String())
logger.Info("Starting to save file to local storage", "fileName", fileName)
err := files_utils.EnsureDirectories([]string{
config.GetEnv().TempFolder,
@@ -54,15 +54,15 @@ func (l *LocalStorage) SaveFile(
return fmt.Errorf("failed to ensure directories: %w", err)
}
tempFilePath := filepath.Join(config.GetEnv().TempFolder, fileID.String())
logger.Debug("Creating temp file", "fileId", fileID.String(), "tempPath", tempFilePath)
tempFilePath := filepath.Join(config.GetEnv().TempFolder, fileName)
logger.Debug("Creating temp file", "fileName", fileName, "tempPath", tempFilePath)
tempFile, err := os.Create(tempFilePath)
if err != nil {
logger.Error(
"Failed to create temp file",
"fileId",
fileID.String(),
"fileName",
fileName,
"tempPath",
tempFilePath,
"error",
@@ -74,29 +74,29 @@ func (l *LocalStorage) SaveFile(
_ = tempFile.Close()
}()
logger.Debug("Copying file data to temp file", "fileId", fileID.String())
logger.Debug("Copying file data to temp file", "fileName", fileName)
_, err = copyWithContext(ctx, tempFile, file)
if err != nil {
logger.Error("Failed to write to temp file", "fileId", fileID.String(), "error", err)
logger.Error("Failed to write to temp file", "fileName", fileName, "error", err)
return fmt.Errorf("failed to write to temp file: %w", err)
}
if err = tempFile.Sync(); err != nil {
logger.Error("Failed to sync temp file", "fileId", fileID.String(), "error", err)
logger.Error("Failed to sync temp file", "fileName", fileName, "error", err)
return fmt.Errorf("failed to sync temp file: %w", err)
}
// Close the temp file explicitly before moving it (required on Windows)
if err = tempFile.Close(); err != nil {
logger.Error("Failed to close temp file", "fileId", fileID.String(), "error", err)
logger.Error("Failed to close temp file", "fileName", fileName, "error", err)
return fmt.Errorf("failed to close temp file: %w", err)
}
finalPath := filepath.Join(config.GetEnv().DataFolder, fileID.String())
finalPath := filepath.Join(config.GetEnv().DataFolder, fileName)
logger.Debug(
"Moving file from temp to final location",
"fileId",
fileID.String(),
"fileName",
fileName,
"finalPath",
finalPath,
)
@@ -105,8 +105,8 @@ func (l *LocalStorage) SaveFile(
if err = os.Rename(tempFilePath, finalPath); err != nil {
logger.Error(
"Failed to move file from temp to backups",
"fileId",
fileID.String(),
"fileName",
fileName,
"tempPath",
tempFilePath,
"finalPath",
@@ -119,8 +119,8 @@ func (l *LocalStorage) SaveFile(
logger.Info(
"Successfully saved file to local storage",
"fileId",
fileID.String(),
"fileName",
fileName,
"finalPath",
finalPath,
)
@@ -130,12 +130,12 @@ func (l *LocalStorage) SaveFile(
func (l *LocalStorage) GetFile(
encryptor encryption.FieldEncryptor,
fileID uuid.UUID,
fileName string,
) (io.ReadCloser, error) {
filePath := filepath.Join(config.GetEnv().DataFolder, fileID.String())
filePath := filepath.Join(config.GetEnv().DataFolder, fileName)
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return nil, fmt.Errorf("file not found: %s", fileID.String())
return nil, fmt.Errorf("file not found: %s", fileName)
}
file, err := os.Open(filePath)
@@ -146,8 +146,8 @@ func (l *LocalStorage) GetFile(
return file, nil
}
func (l *LocalStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error {
filePath := filepath.Join(config.GetEnv().DataFolder, fileID.String())
func (l *LocalStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileName string) error {
filePath := filepath.Join(config.GetEnv().DataFolder, fileName)
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return nil

View File

@@ -46,7 +46,7 @@ func (n *NASStorage) SaveFile(
ctx context.Context,
encryptor encryption.FieldEncryptor,
logger *slog.Logger,
fileID uuid.UUID,
fileName string,
file io.Reader,
) error {
select {
@@ -55,19 +55,19 @@ func (n *NASStorage) SaveFile(
default:
}
logger.Info("Starting to save file to NAS storage", "fileId", fileID.String(), "host", n.Host)
logger.Info("Starting to save file to NAS storage", "fileName", fileName, "host", n.Host)
session, err := n.createSessionWithContext(ctx, encryptor)
if err != nil {
logger.Error("Failed to create NAS session", "fileId", fileID.String(), "error", err)
logger.Error("Failed to create NAS session", "fileName", fileName, "error", err)
return fmt.Errorf("failed to create NAS session: %w", err)
}
defer func() {
if logoffErr := session.Logoff(); logoffErr != nil {
logger.Error(
"Failed to logoff NAS session",
"fileId",
fileID.String(),
"fileName",
fileName,
"error",
logoffErr,
)
@@ -78,8 +78,8 @@ func (n *NASStorage) SaveFile(
if err != nil {
logger.Error(
"Failed to mount NAS share",
"fileId",
fileID.String(),
"fileName",
fileName,
"share",
n.Share,
"error",
@@ -91,8 +91,8 @@ func (n *NASStorage) SaveFile(
if umountErr := fs.Umount(); umountErr != nil {
logger.Error(
"Failed to unmount NAS share",
"fileId",
fileID.String(),
"fileName",
fileName,
"error",
umountErr,
)
@@ -104,8 +104,8 @@ func (n *NASStorage) SaveFile(
if err := n.ensureDirectory(fs, n.Path); err != nil {
logger.Error(
"Failed to ensure directory",
"fileId",
fileID.String(),
"fileName",
fileName,
"path",
n.Path,
"error",
@@ -115,15 +115,15 @@ func (n *NASStorage) SaveFile(
}
}
filePath := n.getFilePath(fileID.String())
logger.Debug("Creating file on NAS", "fileId", fileID.String(), "filePath", filePath)
filePath := n.getFilePath(fileName)
logger.Debug("Creating file on NAS", "fileName", fileName, "filePath", filePath)
nasFile, err := fs.Create(filePath)
if err != nil {
logger.Error(
"Failed to create file on NAS",
"fileId",
fileID.String(),
"fileName",
fileName,
"filePath",
filePath,
"error",
@@ -133,21 +133,21 @@ func (n *NASStorage) SaveFile(
}
defer func() {
if closeErr := nasFile.Close(); closeErr != nil {
logger.Error("Failed to close NAS file", "fileId", fileID.String(), "error", closeErr)
logger.Error("Failed to close NAS file", "fileName", fileName, "error", closeErr)
}
}()
logger.Debug("Copying file data to NAS", "fileId", fileID.String())
logger.Debug("Copying file data to NAS", "fileName", fileName)
_, err = copyWithContext(ctx, nasFile, file)
if err != nil {
logger.Error("Failed to write file to NAS", "fileId", fileID.String(), "error", err)
logger.Error("Failed to write file to NAS", "fileName", fileName, "error", err)
return fmt.Errorf("failed to write file to NAS: %w", err)
}
logger.Info(
"Successfully saved file to NAS storage",
"fileId",
fileID.String(),
"fileName",
fileName,
"filePath",
filePath,
)
@@ -156,7 +156,7 @@ func (n *NASStorage) SaveFile(
func (n *NASStorage) GetFile(
encryptor encryption.FieldEncryptor,
fileID uuid.UUID,
fileName string,
) (io.ReadCloser, error) {
session, err := n.createSession(encryptor)
if err != nil {
@@ -169,14 +169,14 @@ func (n *NASStorage) GetFile(
return nil, fmt.Errorf("failed to mount share '%s': %w", n.Share, err)
}
filePath := n.getFilePath(fileID.String())
filePath := n.getFilePath(fileName)
// Check if file exists
_, err = fs.Stat(filePath)
if err != nil {
_ = fs.Umount()
_ = session.Logoff()
return nil, fmt.Errorf("file not found: %s", fileID.String())
return nil, fmt.Errorf("file not found: %s", fileName)
}
nasFile, err := fs.Open(filePath)
@@ -194,7 +194,7 @@ func (n *NASStorage) GetFile(
}, nil
}
func (n *NASStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error {
func (n *NASStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileName string) error {
ctx, cancel := context.WithTimeout(context.Background(), nasDeleteTimeout)
defer cancel()
@@ -214,7 +214,7 @@ func (n *NASStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid
_ = fs.Umount()
}()
filePath := n.getFilePath(fileID.String())
filePath := n.getFilePath(fileName)
_, err = fs.Stat(filePath)
if err != nil {

View File

@@ -41,7 +41,7 @@ func (r *RcloneStorage) SaveFile(
ctx context.Context,
encryptor encryption.FieldEncryptor,
logger *slog.Logger,
fileID uuid.UUID,
fileName string,
file io.Reader,
) error {
select {
@@ -50,28 +50,28 @@ func (r *RcloneStorage) SaveFile(
default:
}
logger.Info("Starting to save file to rclone storage", "fileId", fileID.String())
logger.Info("Starting to save file to rclone storage", "fileName", fileName)
remoteFs, err := r.getFs(ctx, encryptor)
if err != nil {
logger.Error("Failed to create rclone filesystem", "fileId", fileID.String(), "error", err)
logger.Error("Failed to create rclone filesystem", "fileName", fileName, "error", err)
return fmt.Errorf("failed to create rclone filesystem: %w", err)
}
filePath := r.getFilePath(fileID.String())
logger.Debug("Uploading file via rclone", "fileId", fileID.String(), "filePath", filePath)
filePath := r.getFilePath(fileName)
logger.Debug("Uploading file via rclone", "fileName", fileName, "filePath", filePath)
_, err = operations.Rcat(ctx, remoteFs, filePath, io.NopCloser(file), time.Now().UTC(), nil)
if err != nil {
select {
case <-ctx.Done():
logger.Info("Rclone upload cancelled", "fileId", fileID.String())
logger.Info("Rclone upload cancelled", "fileName", fileName)
return ctx.Err()
default:
logger.Error(
"Failed to upload file via rclone",
"fileId",
fileID.String(),
"fileName",
fileName,
"error",
err,
)
@@ -81,8 +81,8 @@ func (r *RcloneStorage) SaveFile(
logger.Info(
"Successfully saved file to rclone storage",
"fileId",
fileID.String(),
"fileName",
fileName,
"filePath",
filePath,
)
@@ -91,7 +91,7 @@ func (r *RcloneStorage) SaveFile(
func (r *RcloneStorage) GetFile(
encryptor encryption.FieldEncryptor,
fileID uuid.UUID,
fileName string,
) (io.ReadCloser, error) {
ctx := context.Background()
@@ -100,7 +100,7 @@ func (r *RcloneStorage) GetFile(
return nil, fmt.Errorf("failed to create rclone filesystem: %w", err)
}
filePath := r.getFilePath(fileID.String())
filePath := r.getFilePath(fileName)
obj, err := remoteFs.NewObject(ctx, filePath)
if err != nil {
@@ -115,7 +115,7 @@ func (r *RcloneStorage) GetFile(
return reader, nil
}
func (r *RcloneStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error {
func (r *RcloneStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileName string) error {
ctx, cancel := context.WithTimeout(context.Background(), rcloneDeleteTimeout)
defer cancel()
@@ -124,7 +124,7 @@ func (r *RcloneStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID u
return fmt.Errorf("failed to create rclone filesystem: %w", err)
}
filePath := r.getFilePath(fileID.String())
filePath := r.getFilePath(fileName)
obj, err := remoteFs.NewObject(ctx, filePath)
if err != nil {

View File

@@ -55,7 +55,7 @@ func (s *S3Storage) SaveFile(
ctx context.Context,
encryptor encryption.FieldEncryptor,
logger *slog.Logger,
fileID uuid.UUID,
fileName string,
file io.Reader,
) error {
select {
@@ -69,7 +69,7 @@ func (s *S3Storage) SaveFile(
return err
}
objectKey := s.buildObjectKey(fileID.String())
objectKey := s.buildObjectKey(fileName)
uploadID, err := coreClient.NewMultipartUpload(
ctx,
@@ -184,14 +184,14 @@ func (s *S3Storage) SaveFile(
func (s *S3Storage) GetFile(
encryptor encryption.FieldEncryptor,
fileID uuid.UUID,
fileName string,
) (io.ReadCloser, error) {
client, err := s.getClient(encryptor)
if err != nil {
return nil, err
}
objectKey := s.buildObjectKey(fileID.String())
objectKey := s.buildObjectKey(fileName)
object, err := client.GetObject(
context.TODO(),
@@ -221,13 +221,13 @@ func (s *S3Storage) GetFile(
return object, nil
}
func (s *S3Storage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error {
func (s *S3Storage) DeleteFile(encryptor encryption.FieldEncryptor, fileName string) error {
client, err := s.getClient(encryptor)
if err != nil {
return err
}
objectKey := s.buildObjectKey(fileID.String())
objectKey := s.buildObjectKey(fileName)
ctx, cancel := context.WithTimeout(context.Background(), s3DeleteTimeout)
defer cancel()

View File

@@ -41,7 +41,7 @@ func (s *SFTPStorage) SaveFile(
ctx context.Context,
encryptor encryption.FieldEncryptor,
logger *slog.Logger,
fileID uuid.UUID,
fileName string,
file io.Reader,
) error {
select {
@@ -50,19 +50,19 @@ func (s *SFTPStorage) SaveFile(
default:
}
logger.Info("Starting to save file to SFTP storage", "fileId", fileID.String(), "host", s.Host)
logger.Info("Starting to save file to SFTP storage", "fileName", fileName, "host", s.Host)
client, sshConn, err := s.connect(encryptor, sftpConnectTimeout)
if err != nil {
logger.Error("Failed to connect to SFTP", "fileId", fileID.String(), "error", err)
logger.Error("Failed to connect to SFTP", "fileName", fileName, "error", err)
return fmt.Errorf("failed to connect to SFTP: %w", err)
}
defer func() {
if closeErr := client.Close(); closeErr != nil {
logger.Error(
"Failed to close SFTP client",
"fileId",
fileID.String(),
"fileName",
fileName,
"error",
closeErr,
)
@@ -70,8 +70,8 @@ func (s *SFTPStorage) SaveFile(
if closeErr := sshConn.Close(); closeErr != nil {
logger.Error(
"Failed to close SSH connection",
"fileId",
fileID.String(),
"fileName",
fileName,
"error",
closeErr,
)
@@ -82,8 +82,8 @@ func (s *SFTPStorage) SaveFile(
if err := s.ensureDirectory(client, s.Path); err != nil {
logger.Error(
"Failed to ensure directory",
"fileId",
fileID.String(),
"fileName",
fileName,
"path",
s.Path,
"error",
@@ -93,12 +93,12 @@ func (s *SFTPStorage) SaveFile(
}
}
filePath := s.getFilePath(fileID.String())
logger.Debug("Uploading file to SFTP", "fileId", fileID.String(), "filePath", filePath)
filePath := s.getFilePath(fileName)
logger.Debug("Uploading file to SFTP", "fileName", fileName, "filePath", filePath)
remoteFile, err := client.Create(filePath)
if err != nil {
logger.Error("Failed to create remote file", "fileId", fileID.String(), "error", err)
logger.Error("Failed to create remote file", "fileName", fileName, "error", err)
return fmt.Errorf("failed to create remote file: %w", err)
}
defer func() {
@@ -111,18 +111,18 @@ func (s *SFTPStorage) SaveFile(
if err != nil {
select {
case <-ctx.Done():
logger.Info("SFTP upload cancelled", "fileId", fileID.String())
logger.Info("SFTP upload cancelled", "fileName", fileName)
return ctx.Err()
default:
logger.Error("Failed to upload file to SFTP", "fileId", fileID.String(), "error", err)
logger.Error("Failed to upload file to SFTP", "fileName", fileName, "error", err)
return fmt.Errorf("failed to upload file to SFTP: %w", err)
}
}
logger.Info(
"Successfully saved file to SFTP storage",
"fileId",
fileID.String(),
"fileName",
fileName,
"filePath",
filePath,
)
@@ -131,14 +131,14 @@ func (s *SFTPStorage) SaveFile(
func (s *SFTPStorage) GetFile(
encryptor encryption.FieldEncryptor,
fileID uuid.UUID,
fileName string,
) (io.ReadCloser, error) {
client, sshConn, err := s.connect(encryptor, sftpConnectTimeout)
if err != nil {
return nil, fmt.Errorf("failed to connect to SFTP: %w", err)
}
filePath := s.getFilePath(fileID.String())
filePath := s.getFilePath(fileName)
remoteFile, err := client.Open(filePath)
if err != nil {
@@ -154,7 +154,7 @@ func (s *SFTPStorage) GetFile(
}, nil
}
func (s *SFTPStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error {
func (s *SFTPStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileName string) error {
ctx, cancel := context.WithTimeout(context.Background(), sftpDeleteTimeout)
defer cancel()
@@ -167,7 +167,7 @@ func (s *SFTPStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uui
_ = sshConn.Close()
}()
filePath := s.getFilePath(fileID.String())
filePath := s.getFilePath(fileName)
_, err = client.Stat(filePath)
if err != nil {

View File

@@ -92,6 +92,8 @@ func (s *StorageService) SaveStorage(
existingStorage.Update(storage)
oldName := existingStorage.Name
if err := existingStorage.EncryptSensitiveData(s.fieldEncryptor); err != nil {
return err
}
@@ -105,11 +107,19 @@ func (s *StorageService) SaveStorage(
return err
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Storage updated: %s", existingStorage.Name),
&user.ID,
&workspaceID,
)
if oldName != existingStorage.Name {
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Storage renamed from '%s' to '%s'", oldName, existingStorage.Name),
&user.ID,
&workspaceID,
)
} else {
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Storage updated: %s", existingStorage.Name),
&user.ID,
&workspaceID,
)
}
} else {
storage.WorkspaceID = workspaceID
@@ -368,9 +378,26 @@ func (s *StorageService) TransferStorageToWorkspace(
return err
}
sourceWorkspace, err := s.workspaceService.GetWorkspaceByID(sourceWorkspaceID)
if err != nil {
return fmt.Errorf("failed to get source workspace: %w", err)
}
targetWorkspace, err := s.workspaceService.GetWorkspaceByID(targetWorkspaceID)
if err != nil {
return fmt.Errorf("failed to get target workspace: %w", err)
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Storage transferred: %s from workspace %s to workspace %s",
existingStorage.Name, sourceWorkspaceID, targetWorkspaceID),
fmt.Sprintf("Storage transferred out: %s to workspace '%s'",
existingStorage.Name, targetWorkspace.Name),
&user.ID,
&sourceWorkspaceID,
)
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Storage transferred in: %s from workspace '%s'",
existingStorage.Name, sourceWorkspace.Name),
&user.ID,
&targetWorkspaceID,
)

View File

@@ -390,7 +390,7 @@ func (s *UserService) InviteUser(
message := fmt.Sprintf("User invited: %s", request.Email)
if request.IntendedWorkspaceID != nil {
message += fmt.Sprintf(" for workspace %s", request.IntendedWorkspaceID.String())
message += " for workspace"
}
s.auditLogWriter.WriteAuditLog(
message,
@@ -437,6 +437,9 @@ func (s *UserService) UpdateUserInfo(
return fmt.Errorf("failed to get user: %w", err)
}
oldEmail := user.Email
oldName := user.Name
if user.Email == "admin" && request.Email != nil && *request.Email != user.Email {
return errors.New("admin email cannot be changed")
}
@@ -455,7 +458,28 @@ func (s *UserService) UpdateUserInfo(
return fmt.Errorf("failed to update user info: %w", err)
}
s.auditLogWriter.WriteAuditLog("User info updated", &userID, nil)
var auditMessages []string
if request.Email != nil && *request.Email != oldEmail {
auditMessages = append(
auditMessages,
fmt.Sprintf("Email changed from '%s' to '%s'", oldEmail, *request.Email),
)
}
if request.Name != nil && *request.Name != oldName {
auditMessages = append(
auditMessages,
fmt.Sprintf("Name changed from '%s' to '%s'", oldName, *request.Name),
)
}
if len(auditMessages) > 0 {
for _, message := range auditMessages {
s.auditLogWriter.WriteAuditLog(message, &userID, nil)
}
} else {
s.auditLogWriter.WriteAuditLog("User info updated", &userID, nil)
}
return nil
}

View File

@@ -129,6 +129,8 @@ func (s *WorkspaceService) UpdateWorkspace(
return nil, fmt.Errorf("failed to get workspace: %w", err)
}
oldName := existingWorkspace.Name
updateDTO.ID = workspaceID
updateDTO.CreatedAt = existingWorkspace.CreatedAt
@@ -138,11 +140,19 @@ func (s *WorkspaceService) UpdateWorkspace(
return nil, fmt.Errorf("failed to update workspace: %w", err)
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Workspace updated: %s", updateDTO.Name),
&user.ID,
&workspaceID,
)
if oldName != updateDTO.Name {
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Workspace updated and renamed from '%s' to '%s'", oldName, updateDTO.Name),
&user.ID,
&workspaceID,
)
} else {
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Workspace updated: %s", updateDTO.Name),
&user.ID,
&workspaceID,
)
}
return existingWorkspace, nil
}

View File

@@ -0,0 +1,48 @@
package files_utils
// SanitizeFilename replaces characters that are invalid or problematic in filenames
// across different operating systems (Windows, Linux, macOS) and storage systems
// (local filesystem, S3, FTP, SFTP, NAS, rclone, Azure Blob, Google Drive).
//
// The following characters are replaced:
// - Space (' ') -> underscore ('_')
// - Forward slash ('/') -> hyphen ('-')
// - Backslash ('\') -> hyphen ('-')
// - Colon (':') -> hyphen ('-')
// - Asterisk ('*') -> hyphen ('-')
// - Question mark ('?') -> hyphen ('-')
// - Double quote ('"') -> hyphen ('-')
// - Less than ('<') -> hyphen ('-')
// - Greater than ('>') -> hyphen ('-')
// - Pipe ('|') -> hyphen ('-')
//
// This ensures filenames work correctly on:
// - Windows (strict filename rules)
// - Unix/Linux/macOS (forward slashes are path separators)
// - All cloud storage providers (S3, Azure Blob, Google Drive)
// - Network storage (FTP, SFTP, NAS, rclone)
func SanitizeFilename(name string) string {
replacer := map[rune]rune{
' ': '_',
'/': '-',
'\\': '-',
':': '-',
'*': '-',
'?': '-',
'"': '-',
'<': '-',
'>': '-',
'|': '-',
}
result := make([]rune, 0, len(name))
for _, char := range name {
if replacement, exists := replacer[char]; exists {
result = append(result, replacement)
} else {
result = append(result, char)
}
}
return string(result)
}

View File

@@ -0,0 +1,217 @@
package files_utils
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_SanitizeFilename_ReplacesSpecialCharacters(t *testing.T) {
tests := []struct {
name string
input string
expected string
}{
{
name: "replaces spaces with underscores",
input: "my database name",
expected: "my_database_name",
},
{
name: "replaces forward slashes",
input: "db/prod/main",
expected: "db-prod-main",
},
{
name: "replaces backslashes",
input: "db\\prod\\main",
expected: "db-prod-main",
},
{
name: "replaces colons",
input: "db:production:main",
expected: "db-production-main",
},
{
name: "replaces asterisks",
input: "db*wildcard",
expected: "db-wildcard",
},
{
name: "replaces question marks",
input: "db?query",
expected: "db-query",
},
{
name: "replaces double quotes",
input: "db\"quoted\"name",
expected: "db-quoted-name",
},
{
name: "replaces less than signs",
input: "db<redirect",
expected: "db-redirect",
},
{
name: "replaces greater than signs",
input: "db>output",
expected: "db-output",
},
{
name: "replaces pipes",
input: "db|pipe",
expected: "db-pipe",
},
{
name: "replaces multiple different special characters",
input: "my db:/backup\\file*2024?",
expected: "my_db--backup-file-2024-",
},
{
name: "handles all special characters at once",
input: " /\\:*?\"<>|",
expected: "_---------",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := SanitizeFilename(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}
func Test_SanitizeFilename_HandlesEdgeCases(t *testing.T) {
tests := []struct {
name string
input string
expected string
}{
{
name: "empty string returns empty string",
input: "",
expected: "",
},
{
name: "string with no special characters remains unchanged",
input: "simple_database_name",
expected: "simple_database_name",
},
{
name: "string with hyphens and underscores remains unchanged",
input: "my-database_name-123",
expected: "my-database_name-123",
},
{
name: "preserves alphanumeric characters",
input: "Database123ABC",
expected: "Database123ABC",
},
{
name: "preserves dots and parentheses",
input: "db.production.(v2)",
expected: "db.production.(v2)",
},
{
name: "handles unicode characters",
input: "базаанных_テスト",
expected: "базаанных_テスト",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := SanitizeFilename(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}
func Test_SanitizeFilename_WindowsReservedNames(t *testing.T) {
// Windows reserved names are case-insensitive: CON, PRN, AUX, NUL, COM1-COM9, LPT1-LPT9
// Our function doesn't handle these specifically because:
// 1. Database names in our system are typically lowercase
// 2. These are combined with timestamps and UUIDs in filenames (e.g., "CON-20240102-150405-uuid")
// 3. The timestamp and UUID suffix make the final filename safe on Windows
tests := []struct {
name string
input string
expected string
}{
{
name: "CON remains as CON (will be safe with timestamp suffix)",
input: "CON",
expected: "CON",
},
{
name: "PRN remains as PRN (will be safe with timestamp suffix)",
input: "PRN",
expected: "PRN",
},
{
name: "COM1 remains as COM1 (will be safe with timestamp suffix)",
input: "COM1",
expected: "COM1",
},
{
name: "handles database name with reserved name as part",
input: "my:CON/database",
expected: "my-CON-database",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := SanitizeFilename(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}
func Test_SanitizeFilename_RealWorldExamples(t *testing.T) {
tests := []struct {
name string
input string
expected string
}{
{
name: "production database with environment",
input: "prod:main/db",
expected: "prod-main-db",
},
{
name: "database with spaces and version",
input: "My App Database v2.0",
expected: "My_App_Database_v2.0",
},
{
name: "database with special query chars",
input: "analytics?region=us*",
expected: "analytics-region=us-",
},
{
name: "windows-style path in database name",
input: "C:\\databases\\prod",
expected: "C--databases-prod",
},
{
name: "unix-style path in database name",
input: "/var/lib/postgres/main",
expected: "-var-lib-postgres-main",
},
{
name: "database name with quotes",
input: "\"production\" database",
expected: "-production-_database",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := SanitizeFilename(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}

View File

@@ -0,0 +1,17 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE backups ADD COLUMN file_name TEXT;
-- +goose StatementEnd
-- +goose StatementBegin
UPDATE backups SET file_name = id::TEXT WHERE file_name IS NULL;
-- +goose StatementEnd
-- +goose StatementBegin
ALTER TABLE backups ALTER COLUMN file_name SET NOT NULL;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE backups DROP COLUMN file_name;
-- +goose StatementEnd