Merge pull request #491 from databasus/develop

Develop
This commit is contained in:
Rostislav Dugin
2026-04-02 09:05:03 +03:00
committed by GitHub
11 changed files with 142 additions and 15 deletions

View File

@@ -312,8 +312,6 @@ if [ "\$CURRENT_UID" != "\$PUID" ]; then
usermod -o -u "\$PUID" postgres
fi
chown -R postgres:postgres /var/run/postgresql
# PostgreSQL 17 binary paths
PG_BIN="/usr/lib/postgresql/17/bin"
@@ -426,7 +424,12 @@ fi
# Function to start PostgreSQL and wait for it to be ready
start_postgres() {
echo "Starting PostgreSQL..."
gosu postgres \$PG_BIN/postgres -D /databasus-data/pgdata -p 5437 &
# -k /tmp: create Unix socket and lock file in /tmp instead of /var/run/postgresql/.
# On NAS systems (e.g. TrueNAS Scale), the ZFS-backed Docker overlay filesystem
# ignores chown/chmod on directories from image layers, so PostgreSQL gets
# "Permission denied" when creating .s.PGSQL.5437.lock in /var/run/postgresql/.
# All internal connections use TCP (-h localhost), so the socket location does not matter.
gosu postgres \$PG_BIN/postgres -D /databasus-data/pgdata -p 5437 -k /tmp &
POSTGRES_PID=\$!
echo "Waiting for PostgreSQL to be ready..."

View File

@@ -70,6 +70,14 @@ func (uc *RestoreMariadbBackupUsecase) Execute(
"--verbose",
}
// Disable Galera Cluster replication for the restore session to prevent
// "Maximum writeset size exceeded" errors on large restores.
// wsrep_on is available in MariaDB 10.1+ (all builds with Galera support).
// On non-Galera instances the variable still exists but is a no-op.
if mdb.Version != tools.MariadbVersion55 {
args = append(args, "--init-command=SET SESSION wsrep_on=OFF")
}
if !config.GetEnv().IsCloud {
args = append(args, "--max-allowed-packet=1G")
}
@@ -379,6 +387,13 @@ func (uc *RestoreMariadbBackupUsecase) handleMariadbRestoreError(
)
}
if containsIgnoreCase(stderrStr, "writeset size exceeded") {
return fmt.Errorf(
"MariaDB Galera Cluster writeset size limit exceeded. Try increasing wsrep_max_ws_size on your cluster nodes. stderr: %s",
stderrStr,
)
}
return errors.New(errorMsg)
}

View File

@@ -110,6 +110,18 @@ func Test_Storage_BasicOperations(t *testing.T) {
S3Endpoint: "http://" + s3Container.endpoint,
},
},
{
name: "S3Storage_WithStorageClass",
storage: &s3_storage.S3Storage{
StorageID: uuid.New(),
S3Bucket: s3Container.bucketName,
S3Region: s3Container.region,
S3AccessKey: s3Container.accessKey,
S3SecretKey: s3Container.secretKey,
S3Endpoint: "http://" + s3Container.endpoint,
S3StorageClass: s3_storage.S3StorageClassStandard,
},
},
{
name: "NASStorage",
storage: &nas_storage.NASStorage{

View File

@@ -0,0 +1,13 @@
package s3_storage
type S3StorageClass string
const (
S3StorageClassDefault S3StorageClass = ""
S3StorageClassStandard S3StorageClass = "STANDARD"
S3StorageClassStandardIA S3StorageClass = "STANDARD_IA"
S3StorageClassOnezoneIA S3StorageClass = "ONEZONE_IA"
S3StorageClassIntelligentTiering S3StorageClass = "INTELLIGENT_TIERING"
S3StorageClassReducedRedundancy S3StorageClass = "REDUCED_REDUNDANCY"
S3StorageClassGlacierIR S3StorageClass = "GLACIER_IR"
)

View File

@@ -43,9 +43,10 @@ type S3Storage struct {
S3SecretKey string `json:"s3SecretKey" gorm:"not null;type:text;column:s3_secret_key"`
S3Endpoint string `json:"s3Endpoint" gorm:"type:text;column:s3_endpoint"`
S3Prefix string `json:"s3Prefix" gorm:"type:text;column:s3_prefix"`
S3UseVirtualHostedStyle bool `json:"s3UseVirtualHostedStyle" gorm:"default:false;column:s3_use_virtual_hosted_style"`
SkipTLSVerify bool `json:"skipTLSVerify" gorm:"default:false;column:skip_tls_verify"`
S3Prefix string `json:"s3Prefix" gorm:"type:text;column:s3_prefix"`
S3UseVirtualHostedStyle bool `json:"s3UseVirtualHostedStyle" gorm:"default:false;column:s3_use_virtual_hosted_style"`
SkipTLSVerify bool `json:"skipTLSVerify" gorm:"default:false;column:skip_tls_verify"`
S3StorageClass S3StorageClass `json:"s3StorageClass" gorm:"type:text;column:s3_storage_class;default:''"`
}
func (s *S3Storage) TableName() string {
@@ -76,7 +77,7 @@ func (s *S3Storage) SaveFile(
ctx,
s.S3Bucket,
objectKey,
minio.PutObjectOptions{},
s.putObjectOptions(),
)
if err != nil {
return fmt.Errorf("failed to initiate multipart upload: %w", err)
@@ -151,15 +152,16 @@ func (s *S3Storage) SaveFile(
if err != nil {
return err
}
opts := s.putObjectOptions()
opts.SendContentMd5 = true
_, err = client.PutObject(
ctx,
s.S3Bucket,
objectKey,
bytes.NewReader([]byte{}),
0,
minio.PutObjectOptions{
SendContentMd5: true,
},
opts,
)
if err != nil {
return fmt.Errorf("failed to upload empty file: %w", err)
@@ -173,7 +175,7 @@ func (s *S3Storage) SaveFile(
objectKey,
uploadID,
parts,
minio.PutObjectOptions{},
s.putObjectOptions(),
)
if err != nil {
_ = coreClient.AbortMultipartUpload(ctx, s.S3Bucket, objectKey, uploadID)
@@ -350,6 +352,7 @@ func (s *S3Storage) Update(incoming *S3Storage) {
s.S3Endpoint = incoming.S3Endpoint
s.S3UseVirtualHostedStyle = incoming.S3UseVirtualHostedStyle
s.SkipTLSVerify = incoming.SkipTLSVerify
s.S3StorageClass = incoming.S3StorageClass
if incoming.S3AccessKey != "" {
s.S3AccessKey = incoming.S3AccessKey
@@ -363,6 +366,12 @@ func (s *S3Storage) Update(incoming *S3Storage) {
// otherwise we will have to transfer all the data to the new prefix
}
func (s *S3Storage) putObjectOptions() minio.PutObjectOptions {
return minio.PutObjectOptions{
StorageClass: string(s.S3StorageClass),
}
}
func (s *S3Storage) buildObjectKey(fileName string) string {
if s.S3Prefix == "" {
return fileName

View File

@@ -0,0 +1,11 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE s3_storages
ADD COLUMN s3_storage_class TEXT NOT NULL DEFAULT '';
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE s3_storages
DROP COLUMN s3_storage_class;
-- +goose StatementEnd

View File

@@ -3,6 +3,7 @@ export { type Storage } from './models/Storage';
export { StorageType } from './models/StorageType';
export { type LocalStorage } from './models/LocalStorage';
export { type S3Storage } from './models/S3Storage';
export { S3StorageClass, S3StorageClassLabels } from './models/S3StorageClass';
export { type NASStorage } from './models/NASStorage';
export { getStorageLogoFromType } from './models/getStorageLogoFromType';
export { getStorageNameFromType } from './models/getStorageNameFromType';

View File

@@ -7,4 +7,5 @@ export interface S3Storage {
s3Prefix?: string;
s3UseVirtualHostedStyle?: boolean;
skipTLSVerify?: boolean;
s3StorageClass?: string;
}

View File

@@ -0,0 +1,19 @@
export enum S3StorageClass {
DEFAULT = '',
STANDARD = 'STANDARD',
STANDARD_IA = 'STANDARD_IA',
ONEZONE_IA = 'ONEZONE_IA',
INTELLIGENT_TIERING = 'INTELLIGENT_TIERING',
REDUCED_REDUNDANCY = 'REDUCED_REDUNDANCY',
GLACIER_IR = 'GLACIER_IR',
}
export const S3StorageClassLabels: Record<S3StorageClass, string> = {
[S3StorageClass.DEFAULT]: 'Default (Standard)',
[S3StorageClass.STANDARD]: 'Standard',
[S3StorageClass.STANDARD_IA]: 'Standard - Infrequent Access',
[S3StorageClass.ONEZONE_IA]: 'One Zone - Infrequent Access',
[S3StorageClass.INTELLIGENT_TIERING]: 'Intelligent Tiering',
[S3StorageClass.REDUCED_REDUNDANCY]: 'Reduced Redundancy',
[S3StorageClass.GLACIER_IR]: 'Glacier Instant Retrieval',
};

View File

@@ -1,8 +1,8 @@
import { DownOutlined, InfoCircleOutlined, UpOutlined } from '@ant-design/icons';
import { Checkbox, Input, Tooltip } from 'antd';
import { Checkbox, Input, Select, Tooltip } from 'antd';
import { useEffect, useState } from 'react';
import type { Storage } from '../../../../../entity/storages';
import { S3StorageClass, S3StorageClassLabels, type Storage } from '../../../../../entity/storages';
interface Props {
storage: Storage;
@@ -20,7 +20,8 @@ export function EditS3StorageComponent({
const hasAdvancedValues =
!!storage?.s3Storage?.s3Prefix ||
!!storage?.s3Storage?.s3UseVirtualHostedStyle ||
!!storage?.s3Storage?.skipTLSVerify;
!!storage?.s3Storage?.skipTLSVerify ||
!!storage?.s3Storage?.s3StorageClass;
const [showAdvanced, setShowAdvanced] = useState(hasAdvancedValues);
useEffect(() => {
@@ -278,6 +279,40 @@ export function EditS3StorageComponent({
</Tooltip>
</div>
</div>
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[110px] sm:mb-0">Storage class</div>
<div className="flex items-center">
<Select
value={storage?.s3Storage?.s3StorageClass || S3StorageClass.DEFAULT}
options={Object.entries(S3StorageClassLabels).map(([value, label]) => ({
value,
label,
}))}
onChange={(value) => {
if (!storage?.s3Storage) return;
setStorage({
...storage,
s3Storage: {
...storage.s3Storage,
s3StorageClass: value,
},
});
setUnsaved();
}}
size="small"
className="w-[250px] max-w-[250px]"
/>
<Tooltip
className="cursor-pointer"
title="S3 storage class for uploaded objects. Leave as default for Standard. Some providers offer cheaper classes like One Zone IA. Do not use Glacier/Deep Archive — files must be immediately accessible for restores."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
</>
)}

View File

@@ -1,4 +1,4 @@
import type { Storage } from '../../../../../entity/storages';
import { S3StorageClass, S3StorageClassLabels, type Storage } from '../../../../../entity/storages';
interface Props {
storage: Storage;
@@ -52,6 +52,14 @@ export function ShowS3StorageComponent({ storage }: Props) {
Enabled
</div>
)}
{storage?.s3Storage?.s3StorageClass && (
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Storage Class</div>
{S3StorageClassLabels[storage.s3Storage.s3StorageClass as S3StorageClass] ||
storage.s3Storage.s3StorageClass}
</div>
)}
</>
);
}