Merge pull request #214 from databasus/develop

FIX (databases): Add timeout for deletion in case of storage stuck
This commit is contained in:
Rostislav Dugin
2026-01-04 22:54:13 +03:00
committed by GitHub
9 changed files with 104 additions and 31 deletions

View File

@@ -26,6 +26,7 @@ const (
azureResponseTimeout = 30 * time.Second
azureIdleConnTimeout = 90 * time.Second
azureTLSHandshakeTimeout = 30 * time.Second
azureDeleteTimeout = 30 * time.Second
// Chunk size for block blob uploads - 16MB provides good balance between
// memory usage and upload efficiency. This creates backpressure to pg_dump
@@ -186,8 +187,11 @@ func (s *AzureBlobStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileI
blobName := s.buildBlobName(fileID.String())
ctx, cancel := context.WithTimeout(context.Background(), azureDeleteTimeout)
defer cancel()
_, err = client.DeleteBlob(
context.TODO(),
ctx,
s.ContainerName,
blobName,
nil,

View File

@@ -18,6 +18,7 @@ import (
const (
ftpConnectTimeout = 30 * time.Second
ftpTestConnectTimeout = 10 * time.Second
ftpDeleteTimeout = 30 * time.Second
ftpChunkSize = 16 * 1024 * 1024
)
@@ -134,7 +135,10 @@ func (f *FTPStorage) GetFile(
}
func (f *FTPStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error {
conn, err := f.connect(encryptor, ftpConnectTimeout)
ctx, cancel := context.WithTimeout(context.Background(), ftpDeleteTimeout)
defer cancel()
conn, err := f.connectWithContext(ctx, encryptor, ftpDeleteTimeout)
if err != nil {
return fmt.Errorf("failed to connect to FTP: %w", err)
}

View File

@@ -27,6 +27,7 @@ const (
gdResponseTimeout = 30 * time.Second
gdIdleConnTimeout = 90 * time.Second
gdTLSHandshakeTimeout = 30 * time.Second
gdDeleteTimeout = 30 * time.Second
// Chunk size for Google Drive resumable uploads - 16MB provides good balance
// between memory usage and upload efficiency. Google Drive requires chunks
@@ -185,7 +186,9 @@ func (s *GoogleDriveStorage) DeleteFile(
encryptor encryption.FieldEncryptor,
fileID uuid.UUID,
) error {
ctx := context.Background()
ctx, cancel := context.WithTimeout(context.Background(), gdDeleteTimeout)
defer cancel()
return s.withRetryOnAuth(ctx, encryptor, func(driveService *drive.Service) error {
folderID, err := s.findBackupsFolder(driveService)
if err != nil {

View File

@@ -18,6 +18,8 @@ import (
)
const (
nasDeleteTimeout = 30 * time.Second
// Chunk size for NAS uploads - 16MB provides good balance between
// memory usage and upload efficiency. This creates backpressure to pg_dump
// by only reading one chunk at a time and waiting for NAS to confirm receipt.
@@ -193,7 +195,10 @@ func (n *NASStorage) GetFile(
}
func (n *NASStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error {
session, err := n.createSession(encryptor)
ctx, cancel := context.WithTimeout(context.Background(), nasDeleteTimeout)
defer cancel()
session, err := n.createSessionWithContext(ctx, encryptor)
if err != nil {
return fmt.Errorf("failed to create NAS session: %w", err)
}
@@ -211,10 +216,8 @@ func (n *NASStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid
filePath := n.getFilePath(fileID.String())
// Check if file exists before trying to delete
_, err = fs.Stat(filePath)
if err != nil {
// File doesn't exist, consider it already deleted
return nil
}

View File

@@ -22,6 +22,7 @@ import (
const (
rcloneOperationTimeout = 30 * time.Second
rcloneDeleteTimeout = 30 * time.Second
)
var rcloneConfigMu sync.Mutex
@@ -115,7 +116,8 @@ func (r *RcloneStorage) GetFile(
}
func (r *RcloneStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error {
ctx := context.Background()
ctx, cancel := context.WithTimeout(context.Background(), rcloneDeleteTimeout)
defer cancel()
remoteFs, err := r.getFs(ctx, encryptor)
if err != nil {

View File

@@ -26,6 +26,7 @@ const (
s3ResponseTimeout = 30 * time.Second
s3IdleConnTimeout = 90 * time.Second
s3TLSHandshakeTimeout = 30 * time.Second
s3DeleteTimeout = 30 * time.Second
// Chunk size for multipart uploads - 16MB provides good balance between
// memory usage and upload efficiency. This creates backpressure to pg_dump
@@ -228,9 +229,11 @@ func (s *S3Storage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.
objectKey := s.buildObjectKey(fileID.String())
// Delete the object using MinIO client
ctx, cancel := context.WithTimeout(context.Background(), s3DeleteTimeout)
defer cancel()
err = client.RemoveObject(
context.TODO(),
ctx,
s.S3Bucket,
objectKey,
minio.RemoveObjectOptions{},

View File

@@ -19,6 +19,7 @@ import (
const (
sftpConnectTimeout = 30 * time.Second
sftpTestConnectTimeout = 10 * time.Second
sftpDeleteTimeout = 30 * time.Second
)
type SFTPStorage struct {
@@ -154,7 +155,10 @@ func (s *SFTPStorage) GetFile(
}
func (s *SFTPStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error {
client, sshConn, err := s.connect(encryptor, sftpConnectTimeout)
ctx, cancel := context.WithTimeout(context.Background(), sftpDeleteTimeout)
defer cancel()
client, sshConn, err := s.connectWithContext(ctx, encryptor, sftpDeleteTimeout)
if err != nil {
return fmt.Errorf("failed to connect to SFTP: %w", err)
}

View File

@@ -32,21 +32,23 @@ import (
test_utils "databasus-backend/internal/util/testing"
)
const createAndFillTableQuery = `
DROP TABLE IF EXISTS test_data;
func createAndFillTableQuery(tableName string) string {
return fmt.Sprintf(`
DROP TABLE IF EXISTS %s;
CREATE TABLE test_data (
CREATE TABLE %s (
id SERIAL PRIMARY KEY,
name TEXT NOT NULL,
value INTEGER NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
INSERT INTO test_data (name, value) VALUES
INSERT INTO %s (name, value) VALUES
('test1', 100),
('test2', 200),
('test3', 300);
`
`, tableName, tableName, tableName)
}
type PostgresContainer struct {
Host string
@@ -378,9 +380,14 @@ func testBackupRestoreForVersion(t *testing.T, pgVersion string, port string, cp
}
}()
_, err = container.DB.Exec(createAndFillTableQuery)
tableName := fmt.Sprintf("test_data_%s", uuid.New().String()[:8])
_, err = container.DB.Exec(createAndFillTableQuery(tableName))
assert.NoError(t, err)
defer func() {
_, _ = container.DB.Exec(fmt.Sprintf("DROP TABLE IF EXISTS %s;", tableName))
}()
router := createTestRouter()
user := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", user, router)
@@ -436,12 +443,19 @@ func testBackupRestoreForVersion(t *testing.T, pgVersion string, port string, cp
var tableExists bool
err = newDB.Get(
&tableExists,
"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = 'test_data')",
fmt.Sprintf(
"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = '%s')",
tableName,
),
)
assert.NoError(t, err)
assert.True(t, tableExists, "Table 'test_data' should exist in restored database")
assert.True(
t,
tableExists,
fmt.Sprintf("Table '%s' should exist in restored database", tableName),
)
verifyDataIntegrity(t, container.DB, newDB)
verifyDataIntegrity(t, container.DB, newDB, tableName)
err = os.Remove(filepath.Join(config.GetEnv().DataFolder, backup.ID.String()))
if err != nil {
@@ -875,9 +889,14 @@ func testBackupRestoreWithReadOnlyUserForVersion(t *testing.T, pgVersion string,
}
}()
_, err = container.DB.Exec(createAndFillTableQuery)
tableName := fmt.Sprintf("test_data_%s", uuid.New().String()[:8])
_, err = container.DB.Exec(createAndFillTableQuery(tableName))
assert.NoError(t, err)
defer func() {
_, _ = container.DB.Exec(fmt.Sprintf("DROP TABLE IF EXISTS %s;", tableName))
}()
router := createTestRouter()
user := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("ReadOnly Test Workspace", user, router)
@@ -941,12 +960,19 @@ func testBackupRestoreWithReadOnlyUserForVersion(t *testing.T, pgVersion string,
var tableExists bool
err = newDB.Get(
&tableExists,
"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = 'test_data')",
fmt.Sprintf(
"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = '%s')",
tableName,
),
)
assert.NoError(t, err)
assert.True(t, tableExists, "Table 'test_data' should exist in restored database")
assert.True(
t,
tableExists,
fmt.Sprintf("Table '%s' should exist in restored database", tableName),
)
verifyDataIntegrity(t, container.DB, newDB)
verifyDataIntegrity(t, container.DB, newDB, tableName)
err = os.Remove(filepath.Join(config.GetEnv().DataFolder, backup.ID.String()))
if err != nil {
@@ -1106,9 +1132,14 @@ func testBackupRestoreWithEncryptionForVersion(t *testing.T, pgVersion string, p
}
}()
_, err = container.DB.Exec(createAndFillTableQuery)
tableName := fmt.Sprintf("test_data_%s", uuid.New().String()[:8])
_, err = container.DB.Exec(createAndFillTableQuery(tableName))
assert.NoError(t, err)
defer func() {
_, _ = container.DB.Exec(fmt.Sprintf("DROP TABLE IF EXISTS %s;", tableName))
}()
router := createTestRouter()
user := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", user, router)
@@ -1163,12 +1194,19 @@ func testBackupRestoreWithEncryptionForVersion(t *testing.T, pgVersion string, p
var tableExists bool
err = newDB.Get(
&tableExists,
"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = 'test_data')",
fmt.Sprintf(
"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = '%s')",
tableName,
),
)
assert.NoError(t, err)
assert.True(t, tableExists, "Table 'test_data' should exist in restored database")
assert.True(
t,
tableExists,
fmt.Sprintf("Table '%s' should exist in restored database", tableName),
)
verifyDataIntegrity(t, container.DB, newDB)
verifyDataIntegrity(t, container.DB, newDB, tableName)
err = os.Remove(filepath.Join(config.GetEnv().DataFolder, backup.ID.String()))
if err != nil {
@@ -1630,14 +1668,14 @@ func createSupabaseRestoreViaAPI(
)
}
func verifyDataIntegrity(t *testing.T, originalDB *sqlx.DB, restoredDB *sqlx.DB) {
func verifyDataIntegrity(t *testing.T, originalDB *sqlx.DB, restoredDB *sqlx.DB, tableName string) {
var originalData []TestDataItem
var restoredData []TestDataItem
err := originalDB.Select(&originalData, "SELECT * FROM test_data ORDER BY id")
err := originalDB.Select(&originalData, fmt.Sprintf("SELECT * FROM %s ORDER BY id", tableName))
assert.NoError(t, err)
err = restoredDB.Select(&restoredData, "SELECT * FROM test_data ORDER BY id")
err = restoredDB.Select(&restoredData, fmt.Sprintf("SELECT * FROM %s ORDER BY id", tableName))
assert.NoError(t, err)
assert.Equal(t, len(originalData), len(restoredData), "Should have same number of rows")

View File

@@ -121,6 +121,7 @@ export const DatabaseConfigComponent = ({
const remove = () => {
if (!database) return;
setIsShowRemoveConfirm(false);
setIsRemoving(true);
databaseApi
.deleteDatabase(database.id)
@@ -165,7 +166,18 @@ export const DatabaseConfigComponent = ({
};
return (
<div className="w-full rounded-tr-md rounded-br-md rounded-bl-md bg-white p-3 shadow sm:p-5 dark:bg-gray-800">
<div className="relative w-full rounded-tr-md rounded-br-md rounded-bl-md bg-white p-3 shadow sm:p-5 dark:bg-gray-800">
{isRemoving && (
<div className="absolute inset-0 z-10 flex items-center justify-center rounded-tr-md rounded-br-md rounded-bl-md bg-white/80 dark:bg-gray-800/80">
<div className="flex flex-col items-center gap-3">
<div className="h-8 w-8 animate-spin rounded-full border-4 border-gray-300 border-t-blue-500" />
<span className="text-sm font-medium text-gray-600 dark:text-gray-300">
Removing database...
</span>
</div>
</div>
)}
{!isEditName ? (
<div className="mb-5 flex items-center text-xl font-bold sm:text-2xl">
{database.name}