Compare commits

...

11 Commits

Author SHA1 Message Date
Rostislav Dugin
a0f02b253e Merge pull request #427 from databasus/develop
FIX (retention): Fix GFS retention while hourly backups prevent daily…
2026-03-11 15:36:27 +03:00
Rostislav Dugin
812f11bc2f FIX (retention): Fix GFS retention while hourly backups prevent daily from cleanup 2026-03-11 15:35:53 +03:00
Rostislav Dugin
e796e3ddf0 Merge pull request #426 from databasus/develop
FIX (mysql): Detect supported compression levels
2026-03-11 12:53:35 +03:00
Rostislav Dugin
c96d3db337 FIX (mysql): Detect supported compression levels 2026-03-11 12:52:41 +03:00
Rostislav Dugin
ed6c3a2034 Merge pull request #425 from databasus/develop
Develop
2026-03-11 12:31:19 +03:00
Rostislav Dugin
05115047c3 FEATURE (version): Reload frontend if faced version mismatch with backend 2026-03-11 12:28:07 +03:00
Rostislav Dugin
446b96c6c0 FEATURE (arch): Add architecture to Databasus version in the bottom left of UI 2026-03-11 11:39:53 +03:00
Rostislav Dugin
36a0448da1 Merge pull request #420 from databasus/develop
FEATURE (email): Add skipping TLS for email notifier
2026-03-08 22:53:45 +03:00
Rostislav Dugin
8e392cfeab FEATURE (email): Add skipping TLS for email notifier 2026-03-08 22:48:28 +03:00
Rostislav Dugin
6683db1e52 Merge pull request #419 from databasus/develop
FIX (issues): Add DB version to issues template
2026-03-08 22:22:52 +03:00
Rostislav Dugin
703b883936 FIX (issues): Add DB version to issues template 2026-03-08 22:22:26 +03:00
26 changed files with 1547 additions and 582 deletions

View File

@@ -4,7 +4,9 @@ about: Report a bug or unexpected behavior in Databasus
labels: bug
---
## Databasus version
## Databasus version (screenshot)
It is displayed in the bottom left corner of the Databasus UI. Please attach screenshot, not just version text
<!-- e.g. 1.4.2 -->
@@ -12,6 +14,10 @@ labels: bug
<!-- e.g. Ubuntu 22.04 x64, macOS 14 ARM, Windows 11 x64 -->
## Database type and version (optional, for DB-related bugs)
<!-- e.g. PostgreSQL 16 in Docker, MySQL 8.0 installed on server, MariaDB 11.4 in AWS Cloud -->
## Describe the bug (please write manually, do not ask AI to summarize)
**What happened:**

View File

@@ -71,8 +71,10 @@ FROM debian:bookworm-slim
# Add version metadata to runtime image
ARG APP_VERSION=dev
ARG TARGETARCH
LABEL org.opencontainers.image.version=$APP_VERSION
ENV APP_VERSION=$APP_VERSION
ENV CONTAINER_ARCH=$TARGETARCH
# Set production mode for Docker containers
ENV ENV_MODE=production
@@ -269,7 +271,8 @@ window.__RUNTIME_CONFIG__ = {
GITHUB_CLIENT_ID: '\${GITHUB_CLIENT_ID:-}',
GOOGLE_CLIENT_ID: '\${GOOGLE_CLIENT_ID:-}',
IS_EMAIL_CONFIGURED: '\$IS_EMAIL_CONFIGURED',
CLOUDFLARE_TURNSTILE_SITE_KEY: '\${CLOUDFLARE_TURNSTILE_SITE_KEY:-}'
CLOUDFLARE_TURNSTILE_SITE_KEY: '\${CLOUDFLARE_TURNSTILE_SITE_KEY:-}',
CONTAINER_ARCH: '\${CONTAINER_ARCH:-unknown}'
};
JSEOF

View File

@@ -29,6 +29,7 @@ import (
"databasus-backend/internal/features/restores/restoring"
"databasus-backend/internal/features/storages"
system_healthcheck "databasus-backend/internal/features/system/healthcheck"
system_version "databasus-backend/internal/features/system/version"
task_cancellation "databasus-backend/internal/features/tasks/cancellation"
users_controllers "databasus-backend/internal/features/users/controllers"
users_middleware "databasus-backend/internal/features/users/middleware"
@@ -210,6 +211,7 @@ func setUpRoutes(r *gin.Engine) {
userController := users_controllers.GetUserController()
userController.RegisterRoutes(v1)
system_healthcheck.GetHealthcheckController().RegisterRoutes(v1)
system_version.GetVersionController().RegisterRoutes(v1)
backups_controllers.GetBackupController().RegisterPublicRoutes(v1)
backups_controllers.GetPostgresWalBackupController().RegisterRoutes(v1)
databases.GetDatabaseController().RegisterPublicRoutes(v1)

View File

@@ -80,8 +80,7 @@ func (c *BackupCleaner) DeleteBackup(backup *backups_core.Backup) error {
return err
}
err = storage.DeleteFile(c.fieldEncryptor, backup.FileName)
if err != nil {
if err := storage.DeleteFile(c.fieldEncryptor, backup.FileName); err != nil {
// we do not return error here, because sometimes clean up performed
// before unavailable storage removal or change - therefore we should
// proceed even in case of error. It's possible that some S3 or
@@ -408,6 +407,10 @@ func buildGFSKeepSet(
) map[uuid.UUID]bool {
keep := make(map[uuid.UUID]bool)
if len(backups) == 0 {
return keep
}
hoursSeen := make(map[string]bool)
daysSeen := make(map[string]bool)
weeksSeen := make(map[string]bool)
@@ -416,6 +419,52 @@ func buildGFSKeepSet(
hoursKept, daysKept, weeksKept, monthsKept, yearsKept := 0, 0, 0, 0, 0
// Compute per-level time-window cutoffs so higher-frequency slots
// cannot absorb backups that belong to lower-frequency levels.
ref := backups[0].CreatedAt
rawHourlyCutoff := ref.Add(-time.Duration(hours) * time.Hour)
rawDailyCutoff := ref.Add(-time.Duration(days) * 24 * time.Hour)
rawWeeklyCutoff := ref.Add(-time.Duration(weeks) * 7 * 24 * time.Hour)
rawMonthlyCutoff := ref.AddDate(0, -months, 0)
rawYearlyCutoff := ref.AddDate(-years, 0, 0)
// Hierarchical capping: each level's window cannot extend further back
// than the nearest active lower-frequency level's window.
yearlyCutoff := rawYearlyCutoff
monthlyCutoff := rawMonthlyCutoff
if years > 0 {
monthlyCutoff = laterOf(monthlyCutoff, yearlyCutoff)
}
weeklyCutoff := rawWeeklyCutoff
if months > 0 {
weeklyCutoff = laterOf(weeklyCutoff, monthlyCutoff)
} else if years > 0 {
weeklyCutoff = laterOf(weeklyCutoff, yearlyCutoff)
}
dailyCutoff := rawDailyCutoff
if weeks > 0 {
dailyCutoff = laterOf(dailyCutoff, weeklyCutoff)
} else if months > 0 {
dailyCutoff = laterOf(dailyCutoff, monthlyCutoff)
} else if years > 0 {
dailyCutoff = laterOf(dailyCutoff, yearlyCutoff)
}
hourlyCutoff := rawHourlyCutoff
if days > 0 {
hourlyCutoff = laterOf(hourlyCutoff, dailyCutoff)
} else if weeks > 0 {
hourlyCutoff = laterOf(hourlyCutoff, weeklyCutoff)
} else if months > 0 {
hourlyCutoff = laterOf(hourlyCutoff, monthlyCutoff)
} else if years > 0 {
hourlyCutoff = laterOf(hourlyCutoff, yearlyCutoff)
}
for _, backup := range backups {
t := backup.CreatedAt
@@ -426,31 +475,31 @@ func buildGFSKeepSet(
monthKey := t.Format("2006-01")
yearKey := t.Format("2006")
if hours > 0 && hoursKept < hours && !hoursSeen[hourKey] {
if hours > 0 && hoursKept < hours && !hoursSeen[hourKey] && t.After(hourlyCutoff) {
keep[backup.ID] = true
hoursSeen[hourKey] = true
hoursKept++
}
if days > 0 && daysKept < days && !daysSeen[dayKey] {
if days > 0 && daysKept < days && !daysSeen[dayKey] && t.After(dailyCutoff) {
keep[backup.ID] = true
daysSeen[dayKey] = true
daysKept++
}
if weeks > 0 && weeksKept < weeks && !weeksSeen[weekKey] {
if weeks > 0 && weeksKept < weeks && !weeksSeen[weekKey] && t.After(weeklyCutoff) {
keep[backup.ID] = true
weeksSeen[weekKey] = true
weeksKept++
}
if months > 0 && monthsKept < months && !monthsSeen[monthKey] {
if months > 0 && monthsKept < months && !monthsSeen[monthKey] && t.After(monthlyCutoff) {
keep[backup.ID] = true
monthsSeen[monthKey] = true
monthsKept++
}
if years > 0 && yearsKept < years && !yearsSeen[yearKey] {
if years > 0 && yearsKept < years && !yearsSeen[yearKey] && t.After(yearlyCutoff) {
keep[backup.ID] = true
yearsSeen[yearKey] = true
yearsKept++
@@ -459,3 +508,11 @@ func buildGFSKeepSet(
return keep
}
func laterOf(a, b time.Time) time.Time {
if a.After(b) {
return a
}
return b
}

File diff suppressed because it is too large Load Diff

View File

@@ -697,160 +697,6 @@ func Test_CleanByCount_DoesNotDeleteInProgressBackups(t *testing.T) {
assert.True(t, inProgressFound, "In-progress backup should not be deleted by count policy")
}
func Test_CleanByGFS_KeepsCorrectBackupsPerSlot(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeGFS,
RetentionGfsDays: 3,
RetentionGfsWeeks: 0,
RetentionGfsMonths: 0,
RetentionGfsYears: 0,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
now := time.Now().UTC()
// Create 5 backups on 5 different days; only the 3 newest days should be kept
var backupIDs []uuid.UUID
for i := 0; i < 5; i++ {
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-time.Duration(4-i) * 24 * time.Hour).Truncate(24 * time.Hour),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
backupIDs = append(backupIDs, backup.ID)
}
cleaner := GetBackupCleaner()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 3, len(remainingBackups))
remainingIDs := make(map[uuid.UUID]bool)
for _, backup := range remainingBackups {
remainingIDs[backup.ID] = true
}
assert.False(t, remainingIDs[backupIDs[0]], "Oldest daily backup should be deleted")
assert.False(t, remainingIDs[backupIDs[1]], "2nd oldest daily backup should be deleted")
assert.True(t, remainingIDs[backupIDs[2]], "3rd backup should remain")
assert.True(t, remainingIDs[backupIDs[3]], "4th backup should remain")
assert.True(t, remainingIDs[backupIDs[4]], "Newest backup should remain")
}
func Test_CleanByGFS_WithWeeklyAndMonthlySlots_KeepsWiderSpread(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeGFS,
RetentionGfsDays: 2,
RetentionGfsWeeks: 2,
RetentionGfsMonths: 1,
RetentionGfsYears: 0,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
now := time.Now().UTC()
// Create one backup per week for 6 weeks (each on Monday of that week)
// GFS should keep: 2 daily (most recent 2 unique days) + 2 weekly + 1 monthly = up to 5 unique
var createdIDs []uuid.UUID
for i := 0; i < 6; i++ {
weekOffset := time.Duration(5-i) * 7 * 24 * time.Hour
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-weekOffset).Truncate(24 * time.Hour),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
createdIDs = append(createdIDs, backup.ID)
}
cleaner := GetBackupCleaner()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
// We should have at most 5 backups kept (2 daily + 2 weekly + 1 monthly, but with overlap possible)
// The exact count depends on how many unique periods are covered
assert.LessOrEqual(t, len(remainingBackups), 5)
assert.GreaterOrEqual(t, len(remainingBackups), 1)
// The two most recent backups should always be retained (daily slots)
remainingIDs := make(map[uuid.UUID]bool)
for _, backup := range remainingBackups {
remainingIDs[backup.ID] = true
}
assert.True(t, remainingIDs[createdIDs[4]], "Second newest backup should be retained (daily)")
assert.True(t, remainingIDs[createdIDs[5]], "Newest backup should be retained (daily)")
}
// Test_DeleteBackup_WhenStorageDeleteFails_BackupStillRemovedFromDatabase verifies resilience
// when storage becomes unavailable. Even if storage.DeleteFile fails (e.g., storage is offline,
// credentials changed, or storage was deleted), the backup record should still be removed from
@@ -897,292 +743,6 @@ func Test_DeleteBackup_WhenStorageDeleteFails_BackupStillRemovedFromDatabase(t *
assert.Nil(t, deletedBackup)
}
func Test_CleanByGFS_WithHourlySlots_KeepsCorrectBackups(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
testStorage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, testStorage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(testStorage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeGFS,
RetentionGfsHours: 3,
StorageID: &testStorage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
now := time.Now().UTC()
// Create 5 backups spaced 1 hour apart; only the 3 newest hours should be kept
var backupIDs []uuid.UUID
for i := 0; i < 5; i++ {
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: testStorage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-time.Duration(4-i) * time.Hour).Truncate(time.Hour),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
backupIDs = append(backupIDs, backup.ID)
}
cleaner := GetBackupCleaner()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 3, len(remainingBackups))
remainingIDs := make(map[uuid.UUID]bool)
for _, backup := range remainingBackups {
remainingIDs[backup.ID] = true
}
assert.False(t, remainingIDs[backupIDs[0]], "Oldest hourly backup should be deleted")
assert.False(t, remainingIDs[backupIDs[1]], "2nd oldest hourly backup should be deleted")
assert.True(t, remainingIDs[backupIDs[2]], "3rd backup should remain")
assert.True(t, remainingIDs[backupIDs[3]], "4th backup should remain")
assert.True(t, remainingIDs[backupIDs[4]], "Newest backup should remain")
}
func Test_BuildGFSKeepSet(t *testing.T) {
// Fixed reference time: a Wednesday mid-month to avoid boundary edge cases in the default tests.
// Use time.Date for determinism across test runs.
ref := time.Date(2025, 6, 18, 12, 0, 0, 0, time.UTC) // Wednesday, 2025-06-18
day := 24 * time.Hour
week := 7 * day
newBackup := func(createdAt time.Time) *backups_core.Backup {
return &backups_core.Backup{ID: uuid.New(), CreatedAt: createdAt}
}
// backupsEveryDay returns n backups, newest-first, each 1 day apart.
backupsEveryDay := func(n int) []*backups_core.Backup {
bs := make([]*backups_core.Backup, n)
for i := 0; i < n; i++ {
bs[i] = newBackup(ref.Add(-time.Duration(i) * day))
}
return bs
}
// backupsEveryWeek returns n backups, newest-first, each 7 days apart.
backupsEveryWeek := func(n int) []*backups_core.Backup {
bs := make([]*backups_core.Backup, n)
for i := 0; i < n; i++ {
bs[i] = newBackup(ref.Add(-time.Duration(i) * week))
}
return bs
}
hour := time.Hour
// backupsEveryHour returns n backups, newest-first, each 1 hour apart.
backupsEveryHour := func(n int) []*backups_core.Backup {
bs := make([]*backups_core.Backup, n)
for i := 0; i < n; i++ {
bs[i] = newBackup(ref.Add(-time.Duration(i) * hour))
}
return bs
}
tests := []struct {
name string
backups []*backups_core.Backup
hours int
days int
weeks int
months int
years int
keptIndices []int // which indices in backups should be kept
deletedRange *[2]int // optional: all indices in [from, to) must be deleted
}{
{
name: "OnlyHourlySlots_KeepsNewest3Of5",
backups: backupsEveryHour(5),
hours: 3,
keptIndices: []int{0, 1, 2},
},
{
name: "SameHourDedup_OnlyNewestKeptForHourlySlot",
backups: []*backups_core.Backup{
newBackup(ref.Truncate(hour).Add(45 * time.Minute)),
newBackup(ref.Truncate(hour).Add(10 * time.Minute)),
},
hours: 1,
keptIndices: []int{0},
},
{
name: "OnlyDailySlots_KeepsNewest3Of5",
backups: backupsEveryDay(5),
days: 3,
keptIndices: []int{0, 1, 2},
},
{
name: "OnlyDailySlots_FewerBackupsThanSlots_KeepsAll",
backups: backupsEveryDay(2),
days: 5,
keptIndices: []int{0, 1},
},
{
name: "OnlyWeeklySlots_KeepsNewest2Weeks",
backups: backupsEveryWeek(4),
weeks: 2,
keptIndices: []int{0, 1},
},
{
name: "OnlyMonthlySlots_KeepsNewest2Months",
backups: []*backups_core.Backup{
newBackup(time.Date(2025, 6, 1, 12, 0, 0, 0, time.UTC)),
newBackup(time.Date(2025, 5, 1, 12, 0, 0, 0, time.UTC)),
newBackup(time.Date(2025, 4, 1, 12, 0, 0, 0, time.UTC)),
},
months: 2,
keptIndices: []int{0, 1},
},
{
name: "OnlyYearlySlots_KeepsNewest2Years",
backups: []*backups_core.Backup{
newBackup(time.Date(2025, 6, 1, 12, 0, 0, 0, time.UTC)),
newBackup(time.Date(2024, 6, 1, 12, 0, 0, 0, time.UTC)),
newBackup(time.Date(2023, 6, 1, 12, 0, 0, 0, time.UTC)),
},
years: 2,
keptIndices: []int{0, 1},
},
{
name: "SameDayDedup_OnlyNewestKeptForDailySlot",
backups: []*backups_core.Backup{
// Two backups on the same day; newest-first order
newBackup(ref.Truncate(day).Add(10 * time.Hour)),
newBackup(ref.Truncate(day).Add(2 * time.Hour)),
},
days: 1,
keptIndices: []int{0},
},
{
name: "SameWeekDedup_OnlyNewestKeptForWeeklySlot",
backups: []*backups_core.Backup{
// ref is Wednesday; add Thursday of same week
newBackup(ref.Add(1 * day)), // Thursday same week
newBackup(ref), // Wednesday same week
},
weeks: 1,
keptIndices: []int{0},
},
{
name: "AdditiveSlots_NewestFillsDailyAndWeeklyAndMonthly",
// Newest backup fills daily + weekly + monthly simultaneously
backups: []*backups_core.Backup{
newBackup(time.Date(2025, 6, 18, 12, 0, 0, 0, time.UTC)), // newest
newBackup(time.Date(2025, 6, 11, 12, 0, 0, 0, time.UTC)), // 1 week ago
newBackup(time.Date(2025, 5, 18, 12, 0, 0, 0, time.UTC)), // 1 month ago
newBackup(time.Date(2025, 4, 18, 12, 0, 0, 0, time.UTC)), // 2 months ago
},
days: 1,
weeks: 2,
months: 2,
keptIndices: []int{0, 1, 2},
},
{
name: "YearBoundary_CorrectlySplitsAcrossYears",
backups: []*backups_core.Backup{
newBackup(time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC)),
newBackup(time.Date(2024, 12, 31, 12, 0, 0, 0, time.UTC)),
newBackup(time.Date(2024, 6, 1, 12, 0, 0, 0, time.UTC)),
newBackup(time.Date(2023, 6, 1, 12, 0, 0, 0, time.UTC)),
},
years: 2,
keptIndices: []int{0, 1}, // 2025 and 2024 kept; 2024-06 and 2023 deleted
},
{
name: "ISOWeekBoundary_Jan1UsesCorrectISOWeek",
// 2025-01-01 is ISO week 1 of 2025; 2024-12-28 is ISO week 52 of 2024
backups: []*backups_core.Backup{
newBackup(time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC)), // ISO week 2025-W01
newBackup(time.Date(2024, 12, 28, 12, 0, 0, 0, time.UTC)), // ISO week 2024-W52
},
weeks: 2,
keptIndices: []int{0, 1}, // different ISO weeks → both kept
},
{
name: "EmptyBackups_ReturnsEmptyKeepSet",
backups: []*backups_core.Backup{},
hours: 3,
days: 3,
weeks: 2,
months: 1,
years: 1,
keptIndices: []int{},
},
{
name: "AllZeroSlots_KeepsNothing",
backups: backupsEveryDay(5),
hours: 0,
days: 0,
weeks: 0,
months: 0,
years: 0,
keptIndices: []int{},
},
{
name: "AllSlotsActive_FullCombination",
backups: backupsEveryWeek(12),
days: 2,
weeks: 3,
months: 2,
years: 1,
// 2 daily (indices 0,1) + 3rd weekly slot (index 2) + 2nd monthly slot (index 3 or later).
// Additive slots: newest fills daily+weekly+monthly+yearly; each subsequent week fills another weekly,
// and a backup ~4 weeks later fills the 2nd monthly slot.
keptIndices: []int{0, 1, 2, 3},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
keepSet := buildGFSKeepSet(tc.backups, tc.hours, tc.days, tc.weeks, tc.months, tc.years)
keptIndexSet := make(map[int]bool, len(tc.keptIndices))
for _, idx := range tc.keptIndices {
keptIndexSet[idx] = true
}
for i, backup := range tc.backups {
if keptIndexSet[i] {
assert.True(t, keepSet[backup.ID], "backup at index %d should be kept", i)
} else {
assert.False(t, keepSet[backup.ID], "backup at index %d should be deleted", i)
}
}
})
}
}
func Test_CleanByTimePeriod_SkipsRecentBackup_EvenIfOlderThanRetention(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
@@ -1354,114 +914,6 @@ func Test_CleanByCount_SkipsRecentBackup_EvenIfOverLimit(t *testing.T) {
assert.True(t, remainingIDs[newestBackup.ID], "Newest backup should be preserved")
}
func Test_CleanByGFS_SkipsRecentBackup_WhenNotInKeepSet(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
interval := createTestInterval()
// Keep only 1 daily slot. We create 2 old backups plus two recent backups on today.
// Backups are ordered newest-first, so the 15-min-old backup fills the single daily slot.
// The 30-min-old backup is the same day → not in the GFS keep-set, but it is still recent
// (within grace period) and must be preserved.
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeGFS,
RetentionGfsDays: 1,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
now := time.Now().UTC()
oldBackup1 := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-3 * 24 * time.Hour).Truncate(24 * time.Hour),
}
oldBackup2 := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-2 * 24 * time.Hour).Truncate(24 * time.Hour),
}
// Newest backup today — will fill the single GFS daily slot.
newestTodayBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-15 * time.Minute),
}
// Slightly older backup, also today — NOT in GFS keep-set (duplicate day),
// but within the 60-minute grace period so it must survive.
recentNotInKeepSet := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-30 * time.Minute),
}
for _, b := range []*backups_core.Backup{oldBackup1, oldBackup2, newestTodayBackup, recentNotInKeepSet} {
err = backupRepository.Save(b)
assert.NoError(t, err)
}
cleaner := GetBackupCleaner()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
remainingIDs := make(map[uuid.UUID]bool)
for _, backup := range remainingBackups {
remainingIDs[backup.ID] = true
}
assert.False(t, remainingIDs[oldBackup1.ID], "Old backup 1 should be deleted by GFS")
assert.False(t, remainingIDs[oldBackup2.ID], "Old backup 2 should be deleted by GFS")
assert.True(
t,
remainingIDs[newestTodayBackup.ID],
"Newest backup fills GFS daily slot and must remain",
)
assert.True(
t,
remainingIDs[recentNotInKeepSet.ID],
"Recent backup not in keep-set must be preserved by grace period",
)
}
func Test_CleanExceededBackups_SkipsRecentBackup_WhenOverTotalSizeLimit(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)

View File

@@ -118,7 +118,7 @@ func (uc *CreateMysqlBackupUsecase) buildMysqldumpArgs(my *mysqltypes.MysqlDatab
args = append(args, "--events")
}
args = append(args, uc.getNetworkCompressionArgs(my.Version)...)
args = append(args, uc.getNetworkCompressionArgs(my)...)
if !config.GetEnv().IsCloud {
args = append(args, "--max-allowed-packet=1G")
@@ -135,15 +135,21 @@ func (uc *CreateMysqlBackupUsecase) buildMysqldumpArgs(my *mysqltypes.MysqlDatab
return args
}
func (uc *CreateMysqlBackupUsecase) getNetworkCompressionArgs(version tools.MysqlVersion) []string {
func (uc *CreateMysqlBackupUsecase) getNetworkCompressionArgs(
my *mysqltypes.MysqlDatabase,
) []string {
const zstdCompressionLevel = 5
switch version {
switch my.Version {
case tools.MysqlVersion80, tools.MysqlVersion84, tools.MysqlVersion9:
return []string{
"--compression-algorithms=zstd",
fmt.Sprintf("--zstd-compression-level=%d", zstdCompressionLevel),
if my.IsZstdSupported {
return []string{
"--compression-algorithms=zstd",
fmt.Sprintf("--zstd-compression-level=%d", zstdCompressionLevel),
}
}
return []string{"--compress"}
case tools.MysqlVersion57:
return []string{"--compress"}
default:
@@ -589,6 +595,15 @@ func (uc *CreateMysqlBackupUsecase) handleConnectionErrors(stderrStr string) err
)
}
if containsIgnoreCase(stderrStr, "compression algorithm") ||
containsIgnoreCase(stderrStr, "2066") {
return fmt.Errorf(
"MySQL connection failed due to unsupported compression algorithm. "+
"Try re-saving the database connection to re-detect compression support. stderr: %s",
stderrStr,
)
}
if containsIgnoreCase(stderrStr, "unknown database") {
return fmt.Errorf(
"MySQL database does not exist. stderr: %s",

View File

@@ -25,13 +25,14 @@ type MysqlDatabase struct {
Version tools.MysqlVersion `json:"version" gorm:"type:text;not null"`
Host string `json:"host" gorm:"type:text;not null"`
Port int `json:"port" gorm:"type:int;not null"`
Username string `json:"username" gorm:"type:text;not null"`
Password string `json:"password" gorm:"type:text;not null"`
Database *string `json:"database" gorm:"type:text"`
IsHttps bool `json:"isHttps" gorm:"type:boolean;default:false"`
Privileges string `json:"privileges" gorm:"column:privileges;type:text;not null;default:''"`
Host string `json:"host" gorm:"type:text;not null"`
Port int `json:"port" gorm:"type:int;not null"`
Username string `json:"username" gorm:"type:text;not null"`
Password string `json:"password" gorm:"type:text;not null"`
Database *string `json:"database" gorm:"type:text"`
IsHttps bool `json:"isHttps" gorm:"type:boolean;default:false"`
Privileges string `json:"privileges" gorm:"column:privileges;type:text;not null;default:''"`
IsZstdSupported bool `json:"isZstdSupported" gorm:"column:is_zstd_supported;type:boolean;not null;default:true"`
}
func (m *MysqlDatabase) TableName() string {
@@ -102,6 +103,7 @@ func (m *MysqlDatabase) TestConnection(
return err
}
m.Privileges = privileges
m.IsZstdSupported = detectZstdSupport(ctx, db)
if err := checkBackupPermissions(m.Privileges); err != nil {
return err
@@ -125,6 +127,7 @@ func (m *MysqlDatabase) Update(incoming *MysqlDatabase) {
m.Database = incoming.Database
m.IsHttps = incoming.IsHttps
m.Privileges = incoming.Privileges
m.IsZstdSupported = incoming.IsZstdSupported
if incoming.Password != "" {
m.Password = incoming.Password
@@ -185,6 +188,7 @@ func (m *MysqlDatabase) PopulateDbData(
return err
}
m.Privileges = privileges
m.IsZstdSupported = detectZstdSupport(ctx, db)
return nil
}
@@ -223,6 +227,7 @@ func (m *MysqlDatabase) PopulateVersion(
return err
}
m.Version = detectedVersion
m.IsZstdSupported = detectZstdSupport(ctx, db)
return nil
}
@@ -575,6 +580,22 @@ func checkBackupPermissions(privileges string) error {
return nil
}
// detectZstdSupport checks if the MySQL server supports zstd network compression.
// The protocol_compression_algorithms variable was introduced in MySQL 8.0.18.
// Managed MySQL providers (e.g. PlanetScale) may not support zstd even on 8.0+.
func detectZstdSupport(ctx context.Context, db *sql.DB) bool {
var varName, value string
err := db.QueryRowContext(ctx,
"SHOW VARIABLES LIKE 'protocol_compression_algorithms'",
).Scan(&varName, &value)
if err != nil {
return false
}
return strings.Contains(strings.ToLower(value), "zstd")
}
func decryptPasswordIfNeeded(
password string,
encryptor encryption.FieldEncryptor,

View File

@@ -177,6 +177,38 @@ func Test_TestConnection_SufficientPermissions_Success(t *testing.T) {
}
}
func Test_TestConnection_DetectsZstdSupport(t *testing.T) {
env := config.GetEnv()
cases := []struct {
name string
version tools.MysqlVersion
port string
isExpectZstd bool
}{
{"MySQL 5.7", tools.MysqlVersion57, env.TestMysql57Port, false},
{"MySQL 8.0", tools.MysqlVersion80, env.TestMysql80Port, true},
{"MySQL 8.4", tools.MysqlVersion84, env.TestMysql84Port, true},
{"MySQL 9", tools.MysqlVersion9, env.TestMysql90Port, true},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
container := connectToMysqlContainer(t, tc.port, tc.version)
defer container.DB.Close()
mysqlModel := createMysqlModel(container)
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
err := mysqlModel.TestConnection(logger, nil, uuid.New())
assert.NoError(t, err)
assert.Equal(t, tc.isExpectZstd, mysqlModel.IsZstdSupported,
"IsZstdSupported mismatch for %s", tc.name)
})
}
}
func Test_IsUserReadOnly_AdminUser_ReturnsFalse(t *testing.T) {
env := config.GetEnv()
cases := []struct {

View File

@@ -23,13 +23,14 @@ const (
)
type EmailNotifier struct {
NotifierID uuid.UUID `json:"notifierId" gorm:"primaryKey;type:uuid;column:notifier_id"`
TargetEmail string `json:"targetEmail" gorm:"not null;type:varchar(255);column:target_email"`
SMTPHost string `json:"smtpHost" gorm:"not null;type:varchar(255);column:smtp_host"`
SMTPPort int `json:"smtpPort" gorm:"not null;column:smtp_port"`
SMTPUser string `json:"smtpUser" gorm:"type:varchar(255);column:smtp_user"`
SMTPPassword string `json:"smtpPassword" gorm:"type:varchar(255);column:smtp_password"`
From string `json:"from" gorm:"type:varchar(255);column:from_email"`
NotifierID uuid.UUID `json:"notifierId" gorm:"primaryKey;type:uuid;column:notifier_id"`
TargetEmail string `json:"targetEmail" gorm:"not null;type:varchar(255);column:target_email"`
SMTPHost string `json:"smtpHost" gorm:"not null;type:varchar(255);column:smtp_host"`
SMTPPort int `json:"smtpPort" gorm:"not null;column:smtp_port"`
SMTPUser string `json:"smtpUser" gorm:"type:varchar(255);column:smtp_user"`
SMTPPassword string `json:"smtpPassword" gorm:"type:varchar(255);column:smtp_password"`
From string `json:"from" gorm:"type:varchar(255);column:from_email"`
IsInsecureSkipVerify bool `json:"isInsecureSkipVerify" gorm:"default:false;column:is_insecure_skip_verify"`
}
func (e *EmailNotifier) TableName() string {
@@ -99,6 +100,7 @@ func (e *EmailNotifier) Update(incoming *EmailNotifier) {
e.SMTPPort = incoming.SMTPPort
e.SMTPUser = incoming.SMTPUser
e.From = incoming.From
e.IsInsecureSkipVerify = incoming.IsInsecureSkipVerify
if incoming.SMTPPassword != "" {
e.SMTPPassword = incoming.SMTPPassword
@@ -198,7 +200,10 @@ func (e *EmailNotifier) sendStartTLS(
func (e *EmailNotifier) createImplicitTLSClient() (*smtp.Client, func(), error) {
addr := net.JoinHostPort(e.SMTPHost, fmt.Sprintf("%d", e.SMTPPort))
tlsConfig := &tls.Config{ServerName: e.SMTPHost}
tlsConfig := &tls.Config{
ServerName: e.SMTPHost,
InsecureSkipVerify: e.IsInsecureSkipVerify,
}
dialer := &net.Dialer{Timeout: DefaultTimeout}
conn, err := tls.DialWithDialer(dialer, "tcp", addr, tlsConfig)
@@ -237,7 +242,10 @@ func (e *EmailNotifier) createStartTLSClient() (*smtp.Client, func(), error) {
}
if ok, _ := client.Extension("STARTTLS"); ok {
if err := client.StartTLS(&tls.Config{ServerName: e.SMTPHost}); err != nil {
if err := client.StartTLS(&tls.Config{
ServerName: e.SMTPHost,
InsecureSkipVerify: e.IsInsecureSkipVerify,
}); err != nil {
_ = client.Quit()
_ = conn.Close()
return nil, nil, fmt.Errorf("STARTTLS failed: %w", err)

View File

@@ -0,0 +1,30 @@
package system_version
import (
"net/http"
"os"
"github.com/gin-gonic/gin"
)
type VersionController struct{}
func (c *VersionController) RegisterRoutes(router *gin.RouterGroup) {
router.GET("/system/version", c.GetVersion)
}
// GetVersion
// @Summary Get application version
// @Description Returns the current application version
// @Tags system/version
// @Produce json
// @Success 200 {object} VersionResponse
// @Router /system/version [get]
func (c *VersionController) GetVersion(ctx *gin.Context) {
version := os.Getenv("APP_VERSION")
if version == "" {
version = "dev"
}
ctx.JSON(http.StatusOK, VersionResponse{Version: version})
}

View File

@@ -0,0 +1,7 @@
package system_version
var versionController = &VersionController{}
func GetVersionController() *VersionController {
return versionController
}

View File

@@ -0,0 +1,5 @@
package system_version
type VersionResponse struct {
Version string `json:"version"`
}

View File

@@ -0,0 +1,11 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE email_notifiers
ADD COLUMN is_insecure_skip_verify BOOLEAN NOT NULL DEFAULT FALSE;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE email_notifiers
DROP COLUMN is_insecure_skip_verify;
-- +goose StatementEnd

View File

@@ -0,0 +1,11 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE mysql_databases
ADD COLUMN is_zstd_supported BOOLEAN NOT NULL DEFAULT TRUE;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE mysql_databases
DROP COLUMN is_zstd_supported;
-- +goose StatementEnd

View File

@@ -3,6 +3,8 @@ import { useEffect, useState } from 'react';
import { BrowserRouter, Route } from 'react-router';
import { Routes } from 'react-router';
import { useVersionCheck } from './shared/hooks/useVersionCheck';
import { userApi } from './entity/users';
import { AuthPageComponent } from './pages/AuthPageComponent';
import { OAuthCallbackPage } from './pages/OAuthCallbackPage';
@@ -14,6 +16,8 @@ function AppContent() {
const [isAuthorized, setIsAuthorized] = useState(false);
const { resolvedTheme } = useTheme();
useVersionCheck();
useEffect(() => {
const isAuthorized = userApi.isAuthorized();
setIsAuthorized(isAuthorized);

View File

@@ -4,6 +4,7 @@ interface RuntimeConfig {
GOOGLE_CLIENT_ID?: string;
IS_EMAIL_CONFIGURED?: string;
CLOUDFLARE_TURNSTILE_SITE_KEY?: string;
CONTAINER_ARCH?: string;
}
declare global {
@@ -45,6 +46,10 @@ export const CLOUDFLARE_TURNSTILE_SITE_KEY =
import.meta.env.VITE_CLOUDFLARE_TURNSTILE_SITE_KEY ||
'';
const archMap: Record<string, string> = { amd64: 'x64', arm64: 'arm64' };
const rawArch = window.__RUNTIME_CONFIG__?.CONTAINER_ARCH || 'unknown';
export const CONTAINER_ARCH = archMap[rawArch] || rawArch;
export function getOAuthRedirectUri(): string {
return `${window.location.origin}/auth/callback`;
}

View File

@@ -5,4 +5,5 @@ export interface EmailNotifier {
smtpUser: string;
smtpPassword: string;
from: string;
isInsecureSkipVerify: boolean;
}

View File

@@ -0,0 +1,14 @@
import { getApplicationServer } from '../../../constants';
import type { VersionResponse } from '../model/VersionResponse';
export const systemApi = {
async getVersion(): Promise<VersionResponse> {
const response = await fetch(`${getApplicationServer()}/api/v1/system/version`);
if (!response.ok) {
throw new Error(`Failed to fetch version: ${response.status}`);
}
return response.json();
},
};

View File

@@ -0,0 +1,2 @@
export { systemApi } from './api/systemApi';
export { type VersionResponse } from './model/VersionResponse';

View File

@@ -0,0 +1,3 @@
export type VersionResponse = {
version: string;
};

View File

@@ -112,6 +112,7 @@ export function EditNotifierComponent({
smtpUser: '',
smtpPassword: '',
from: '',
isInsecureSkipVerify: false,
};
}

View File

@@ -1,5 +1,6 @@
import { InfoCircleOutlined } from '@ant-design/icons';
import { Input, Tooltip } from 'antd';
import { DownOutlined, InfoCircleOutlined, UpOutlined } from '@ant-design/icons';
import { Checkbox, Input, Tooltip } from 'antd';
import { useState } from 'react';
import type { Notifier } from '../../../../../entity/notifiers';
@@ -10,6 +11,9 @@ interface Props {
}
export function EditEmailNotifierComponent({ notifier, setNotifier, setUnsaved }: Props) {
const hasAdvancedValues = !!notifier?.emailNotifier?.isInsecureSkipVerify;
const [showAdvanced, setShowAdvanced] = useState(hasAdvancedValues);
return (
<>
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
@@ -163,6 +167,53 @@ export function EditEmailNotifierComponent({ notifier, setNotifier, setUnsaved }
</Tooltip>
</div>
</div>
<div className="mt-4 mb-3 flex items-center">
<div
className="flex cursor-pointer items-center text-sm text-blue-600 hover:text-blue-800"
onClick={() => setShowAdvanced(!showAdvanced)}
>
<span className="mr-2">Advanced settings</span>
{showAdvanced ? (
<UpOutlined style={{ fontSize: '12px' }} />
) : (
<DownOutlined style={{ fontSize: '12px' }} />
)}
</div>
</div>
{showAdvanced && (
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Skip TLS verify</div>
<div className="flex items-center">
<Checkbox
checked={notifier?.emailNotifier?.isInsecureSkipVerify || false}
onChange={(e) => {
if (!notifier?.emailNotifier) return;
setNotifier({
...notifier,
emailNotifier: {
...notifier.emailNotifier,
isInsecureSkipVerify: e.target.checked,
},
});
setUnsaved();
}}
>
Skip TLS
</Checkbox>
<Tooltip
className="cursor-pointer"
title="Skip TLS certificate verification. Enable this if your SMTP server uses a self-signed certificate. Warning: this reduces security."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
)}
</>
);
}

View File

@@ -36,6 +36,13 @@ export function ShowEmailNotifierComponent({ notifier }: Props) {
<div className="min-w-[110px]">From</div>
{notifier?.emailNotifier?.from || '(auto)'}
</div>
{notifier?.emailNotifier?.isInsecureSkipVerify && (
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Skip TLS</div>
Enabled
</div>
)}
</>
);
}

View File

@@ -0,0 +1,40 @@
import { useEffect } from 'react';
import { APP_VERSION } from '../../constants';
import { systemApi } from '../../entity/system';
const VERSION_CHECK_INTERVAL_MS = 5 * 60 * 1000;
const RELOAD_COOLDOWN_MS = 10 * 1000;
const LAST_RELOAD_KEY = 'lastVersionReload';
export function useVersionCheck() {
useEffect(() => {
if (APP_VERSION === 'dev') {
return;
}
const checkVersion = async () => {
try {
const { version } = await systemApi.getVersion();
if (version && version !== APP_VERSION) {
const lastReload = Number(localStorage.getItem(LAST_RELOAD_KEY) || '0');
if (Date.now() - lastReload < RELOAD_COOLDOWN_MS) {
return;
}
localStorage.setItem(LAST_RELOAD_KEY, String(Date.now()));
window.location.reload();
}
} catch {
// Silently ignore errors — network issues shouldn't break the app
}
};
checkVersion();
const interval = setInterval(checkVersion, VERSION_CHECK_INTERVAL_MS);
return () => clearInterval(interval);
}, []);
}

View File

@@ -2,7 +2,7 @@ import { LoadingOutlined, MenuOutlined } from '@ant-design/icons';
import { App, Button, Spin, Tooltip } from 'antd';
import { useEffect, useState } from 'react';
import { APP_VERSION } from '../../constants';
import { APP_VERSION, CONTAINER_ARCH } from '../../constants';
import { type DiskUsage, diskApi } from '../../entity/disk';
import {
type UserProfile,
@@ -365,6 +365,8 @@ export const MainScreenComponent = () => {
<div className="absolute bottom-1 left-2 mb-[0px] hidden text-sm text-gray-400 md:block">
v{APP_VERSION}
<br />
{CONTAINER_ARCH}
</div>
</div>
)}