Compare commits

...

26 Commits

Author SHA1 Message Date
Rostislav Dugin
c325d42b89 Merge pull request #191 from databasus/develop
Develop
2025-12-30 15:46:51 +03:00
Rostislav Dugin
04a19cead1 FIX (readme): Update readme 2025-12-30 15:45:57 +03:00
Rostislav Dugin
648c315312 FIX (readme): Update README 2025-12-30 15:40:54 +03:00
github-actions[bot]
3a205c2f1d Update CITATION.cff to v2.18.3 2025-12-29 17:57:56 +00:00
Rostislav Dugin
49ebb01ffd Merge pull request #186 from databasus/develop
Develop
2025-12-29 20:36:39 +03:00
Rostislav Dugin
e957fb67dd FIX (s3): Include checksum over file upload 2025-12-29 20:35:10 +03:00
Rostislav Dugin
7cda83122a FIX (read-only): Use read-only user via frontend for MariaDB and MongoDB after creation 2025-12-29 20:31:27 +03:00
github-actions[bot]
11195d9078 Update CITATION.cff to v2.18.2 2025-12-29 15:27:04 +00:00
Rostislav Dugin
64d7a12f9f Merge pull request #184 from databasus/develop
Develop
2025-12-29 15:48:09 +03:00
Rostislav Dugin
9853ac425a FIX (sftp): Fix initial value in case of private key 2025-12-29 15:47:00 +03:00
Rostislav Dugin
6ad38228ce Merge pull request #182 from m4tt72/fix/sftp-storage-auth-method-radio-selection
fix(storages): SFTP auth method radio button now correctly switches to Private Key
2025-12-29 15:44:22 +03:00
Rostislav Dugin
7d576b50a9 Merge pull request #183 from databasus/main
Merge changes to develop
2025-12-29 15:43:01 +03:00
Rostislav Dugin
db3bd98425 FIX (readme): Fix installation methods 2025-12-29 15:41:22 +03:00
Yassine Fathi
7d8d0846cb fix(storages): SFTP auth method radio button now correctly switches to Private Key 2025-12-29 12:10:00 +01:00
github-actions[bot]
05540a8d8d Update CITATION.cff to v2.18.1 2025-12-28 17:14:24 +00:00
Rostislav Dugin
8250db9ce5 FIX (readme): Add AI disclaimer 2025-12-28 19:49:16 +03:00
github-actions[bot]
1e8cc46672 Update CITATION.cff to v2.18.0 2025-12-27 20:21:44 +00:00
Rostislav Dugin
9d30406d83 FEATURE (audit logs): Add retention for audit logs within 1 year 2025-12-27 22:58:35 +03:00
Rostislav Dugin
22e9c605da FEATURE (dockefile): Recover internal PostgreSQL in case of corruption 2025-12-27 22:39:39 +03:00
github-actions[bot]
60fe0322f1 Update CITATION.cff to v2.17.0 2025-12-27 18:15:43 +00:00
Rostislav Dugin
0ab734f947 FEATURE (cpu): Move CPU settings to DB level from backup config level 2025-12-27 20:49:43 +03:00
github-actions[bot]
908fe337d4 Update CITATION.cff to v2.16.3 2025-12-25 21:49:12 +00:00
Rostislav Dugin
2364b78e18 FIX (readme): Fix readme 2025-12-26 00:27:41 +03:00
github-actions[bot]
94fe41f66f Update CITATION.cff to v2.16.2 2025-12-25 20:29:37 +00:00
Rostislav Dugin
91ad57c003 FIX (migration): Add check for postgresus-data folder 2025-12-25 23:09:10 +03:00
github-actions[bot]
4e208a2586 Update CITATION.cff to v2.16.1 2025-12-25 19:21:39 +00:00
46 changed files with 576 additions and 124 deletions

View File

@@ -32,5 +32,5 @@ keywords:
- mongodb
- mariadb
license: Apache-2.0
version: 2.16.0
date-released: "2025-12-25"
version: 2.18.3
date-released: "2025-12-29"

View File

@@ -218,6 +218,23 @@ COPY <<EOF /app/start.sh
#!/bin/bash
set -e
# Check for legacy postgresus-data volume mount
if [ -d "/postgresus-data" ] && [ "\$(ls -A /postgresus-data 2>/dev/null)" ]; then
echo ""
echo "=========================================="
echo "ERROR: Legacy volume detected!"
echo "=========================================="
echo ""
echo "You are using the \`postgresus-data\` folder. It seems you changed the image name from Postgresus to Databasus without changing the volume."
echo ""
echo "Please either:"
echo " 1. Switch back to image rostislavdugin/postgresus:latest (supported until ~Dec 2026)"
echo " 2. Read the migration guide: https://databasus.com/installation/#postgresus-migration"
echo ""
echo "=========================================="
exit 1
fi
# PostgreSQL 17 binary paths
PG_BIN="/usr/lib/postgresql/17/bin"
@@ -240,24 +257,68 @@ if [ ! -s "/databasus-data/pgdata/PG_VERSION" ]; then
echo "max_connections = 100" >> /databasus-data/pgdata/postgresql.conf
fi
# Start PostgreSQL in background
echo "Starting PostgreSQL..."
gosu postgres \$PG_BIN/postgres -D /databasus-data/pgdata -p 5437 &
POSTGRES_PID=\$!
# Function to start PostgreSQL and wait for it to be ready
start_postgres() {
echo "Starting PostgreSQL..."
gosu postgres \$PG_BIN/postgres -D /databasus-data/pgdata -p 5437 &
POSTGRES_PID=\$!
echo "Waiting for PostgreSQL to be ready..."
for i in {1..30}; do
if gosu postgres \$PG_BIN/pg_isready -p 5437 -h localhost >/dev/null 2>&1; then
echo "PostgreSQL is ready!"
return 0
fi
sleep 1
done
return 1
}
# Wait for PostgreSQL to be ready
echo "Waiting for PostgreSQL to be ready..."
for i in {1..30}; do
if gosu postgres \$PG_BIN/pg_isready -p 5437 -h localhost >/dev/null 2>&1; then
echo "PostgreSQL is ready!"
break
fi
if [ \$i -eq 30 ]; then
echo "PostgreSQL failed to start"
# Try to start PostgreSQL
if ! start_postgres; then
echo ""
echo "=========================================="
echo "PostgreSQL failed to start. Attempting WAL reset recovery..."
echo "=========================================="
echo ""
# Kill any remaining postgres processes
pkill -9 postgres 2>/dev/null || true
sleep 2
# Attempt pg_resetwal to recover from WAL corruption
echo "Running pg_resetwal to reset WAL..."
if gosu postgres \$PG_BIN/pg_resetwal -f /databasus-data/pgdata; then
echo "WAL reset successful. Restarting PostgreSQL..."
# Try starting PostgreSQL again after WAL reset
if start_postgres; then
echo "PostgreSQL recovered successfully after WAL reset!"
else
echo ""
echo "=========================================="
echo "ERROR: PostgreSQL failed to start even after WAL reset."
echo "The database may be severely corrupted."
echo ""
echo "Options:"
echo " 1. Delete the volume and start fresh (data loss)"
echo " 2. Manually inspect /databasus-data/pgdata for issues"
echo "=========================================="
exit 1
fi
else
echo ""
echo "=========================================="
echo "ERROR: pg_resetwal failed."
echo "The database may be severely corrupted."
echo ""
echo "Options:"
echo " 1. Delete the volume and start fresh (data loss)"
echo " 2. Manually inspect /databasus-data/pgdata for issues"
echo "=========================================="
exit 1
fi
sleep 1
done
fi
# Create database and set password for postgres user
echo "Setting up database and user..."

View File

@@ -114,7 +114,7 @@ You have four ways to install Databasus:
## 📦 Installation
You have three ways to install Databasus: automated script (recommended), simple Docker run, or Docker Compose setup.
You have four ways to install Databasus: automated script (recommended), simple Docker run, or Docker Compose setup.
### Option 1: Automated installation script (recommended, Linux only)
@@ -245,6 +245,8 @@ This project is licensed under the Apache 2.0 License - see the [LICENSE](LICENS
Contributions are welcome! Read the <a href="https://databasus.com/contribute">contributing guide</a> for more details, priorities and rules. If you want to contribute but don't know where to start, message me on Telegram [@rostislav_dugin](https://t.me/rostislav_dugin)
Also you can join our large community of developers, DBAs and DevOps engineers on Telegram [@databasus_community](https://t.me/databasus_community).
--
## 📖 Migration guide
@@ -271,10 +273,44 @@ Then manually move databases from Postgresus to Databasus.
### Why was Postgresus renamed to Databasus?
Databasus has been developed since 2023. It was internal tool to backup production and home projects databases. In start of 2025 it was released as open source project on GitHub. By the end of 2025 it became popular and the time for renaming has come in December 2025.
It was an important step for the project to grow. Actually, there are a couple of reasons:
1. Postgresus is no longer a little tool that just adds UI for pg_dump for little projects. It became a tool both for individual users, DevOps, DBAs,teams, companies and even large enterprises. Tens of thousands of users use Postgresus every day. Postgresus grew into a reliable backup management tool. Initial positioning is no longer suitable: the project is not just a UI wrapper, it's a solid backup management system now (despite it's still easy to use).
1. Postgresus is no longer a little tool that just adds UI for pg_dump for little projects. It became a tool both for individual users, DevOps, DBAs, teams, companies and even large enterprises. Tens of thousands of users use Postgresus every day. Postgresus grew into a reliable backup management tool. Initial positioning is no longer suitable: the project is not just a UI wrapper, it's a solid backup management system now (despite it's still easy to use).
2. New databases are supported: although the primary focus is PostgreSQL (with 100% support in the most efficient way) and always will be, Databasus added support for MySQL, MariaDB and MongoDB. Later more databases will be supported.
3. Trademark issue: "postgres" is a trademark of PostgreSQL Inc. and cannot be used in the project name. So for safety and legal reasons, we had to rename the project.
## AI disclaimer
There have been questions about AI usage in project development in issues and discussions. As the project focuses on security, reliability and production usage, it's important to explain how AI is used in the development process.
AI is used as a helper for:
- verification of code quality and searching for vulnerabilities
- cleaning up and improving documentation, comments and code
- assistance during development
- double-checking PRs and commits after human review
AI is not used for:
- writing entire code
- "vibe code" approach
- code without line-by-line verification by a human
- code without tests
The project has:
- solid test coverage (both unit and integration tests)
- CI/CD pipeline automation with tests and linting to ensure code quality
- verification by experienced developers with experience in large and secure projects
So AI is just an assistant and a tool for developers to increase productivity and ensure code quality. The work is done by developers.
Moreover, it's important to note that we do not differentiate between bad human code and AI vibe code. There are strict requirements for any code to be merged to keep the codebase maintainable.
Even if code is written manually by a human, it's not guaranteed to be merged. Vibe code is not allowed at all and all such PRs are rejected by default (see [contributing guide](https://databasus.com/contribute)).
We also draw attention to fast issue resolution and security [vulnerability reporting](https://github.com/databasus/databasus?tab=security-ov-file#readme).

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 767 KiB

After

Width:  |  Height:  |  Size: 766 KiB

View File

@@ -238,6 +238,10 @@ func runBackgroundTasks(log *slog.Logger) {
go runWithPanicLogging(log, "healthcheck attempt background service", func() {
healthcheck_attempt.GetHealthcheckAttemptBackgroundService().Run()
})
go runWithPanicLogging(log, "audit log cleanup background service", func() {
audit_logs.GetAuditLogBackgroundService().Run()
})
}
func runWithPanicLogging(log *slog.Logger, serviceName string, fn func()) {

View File

@@ -0,0 +1,36 @@
package audit_logs
import (
"databasus-backend/internal/config"
"log/slog"
"time"
)
type AuditLogBackgroundService struct {
auditLogService *AuditLogService
logger *slog.Logger
}
func (s *AuditLogBackgroundService) Run() {
s.logger.Info("Starting audit log cleanup background service")
if config.IsShouldShutdown() {
return
}
for {
if config.IsShouldShutdown() {
return
}
if err := s.cleanOldAuditLogs(); err != nil {
s.logger.Error("Failed to clean old audit logs", "error", err)
}
time.Sleep(1 * time.Hour)
}
}
func (s *AuditLogBackgroundService) cleanOldAuditLogs() error {
return s.auditLogService.CleanOldAuditLogs()
}

View File

@@ -0,0 +1,141 @@
package audit_logs
import (
"databasus-backend/internal/storage"
"fmt"
"testing"
"time"
user_enums "databasus-backend/internal/features/users/enums"
users_testing "databasus-backend/internal/features/users/testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
)
func Test_CleanOldAuditLogs_DeletesLogsOlderThanOneYear(t *testing.T) {
service := GetAuditLogService()
user := users_testing.CreateTestUser(user_enums.UserRoleMember)
db := storage.GetDb()
baseTime := time.Now().UTC()
// Create old logs (more than 1 year old)
createTimedAuditLog(db, &user.UserID, "Old log 1", baseTime.Add(-400*24*time.Hour))
createTimedAuditLog(db, &user.UserID, "Old log 2", baseTime.Add(-370*24*time.Hour))
// Create recent logs (less than 1 year old)
createAuditLog(service, "Recent log 1", &user.UserID, nil)
createAuditLog(service, "Recent log 2", &user.UserID, nil)
// Run cleanup
err := service.CleanOldAuditLogs()
assert.NoError(t, err)
// Verify old logs were deleted
oneYearAgo := baseTime.Add(-365 * 24 * time.Hour)
var oldLogs []*AuditLog
db.Where("created_at < ?", oneYearAgo).Find(&oldLogs)
assert.Equal(t, 0, len(oldLogs), "All logs older than 1 year should be deleted")
// Verify recent logs still exist
var recentLogs []*AuditLog
db.Where("created_at >= ?", oneYearAgo).Find(&recentLogs)
assert.GreaterOrEqual(t, len(recentLogs), 2, "Recent logs should not be deleted")
}
func Test_CleanOldAuditLogs_PreservesLogsNewerThanOneYear(t *testing.T) {
service := GetAuditLogService()
user := users_testing.CreateTestUser(user_enums.UserRoleMember)
db := storage.GetDb()
baseTime := time.Now().UTC()
// Create logs exactly at boundary (1 year old)
boundaryTime := baseTime.Add(-365 * 24 * time.Hour)
createTimedAuditLog(db, &user.UserID, "Boundary log", boundaryTime)
// Create recent logs
createTimedAuditLog(db, &user.UserID, "Recent log 1", baseTime.Add(-364*24*time.Hour))
createTimedAuditLog(db, &user.UserID, "Recent log 2", baseTime.Add(-100*24*time.Hour))
createAuditLog(service, "Current log", &user.UserID, nil)
// Get count before cleanup
var countBefore int64
db.Model(&AuditLog{}).Count(&countBefore)
// Run cleanup
err := service.CleanOldAuditLogs()
assert.NoError(t, err)
// Get count after cleanup
var countAfter int64
db.Model(&AuditLog{}).Count(&countAfter)
// Verify logs newer than 1 year are preserved
oneYearAgo := baseTime.Add(-365 * 24 * time.Hour)
var recentLogs []*AuditLog
db.Where("created_at >= ?", oneYearAgo).Find(&recentLogs)
messages := make([]string, len(recentLogs))
for i, log := range recentLogs {
messages[i] = log.Message
}
assert.Contains(t, messages, "Recent log 1")
assert.Contains(t, messages, "Recent log 2")
assert.Contains(t, messages, "Current log")
}
func Test_CleanOldAuditLogs_HandlesEmptyDatabase(t *testing.T) {
service := GetAuditLogService()
// Run cleanup on database that may have no old logs
err := service.CleanOldAuditLogs()
assert.NoError(t, err)
}
func Test_CleanOldAuditLogs_DeletesMultipleOldLogs(t *testing.T) {
service := GetAuditLogService()
user := users_testing.CreateTestUser(user_enums.UserRoleMember)
db := storage.GetDb()
baseTime := time.Now().UTC()
// Create many old logs with specific UUIDs to track them
testLogIDs := make([]uuid.UUID, 5)
for i := 0; i < 5; i++ {
testLogIDs[i] = uuid.New()
daysAgo := 400 + (i * 10)
log := &AuditLog{
ID: testLogIDs[i],
UserID: &user.UserID,
Message: fmt.Sprintf("Test old log %d", i),
CreatedAt: baseTime.Add(-time.Duration(daysAgo) * 24 * time.Hour),
}
result := db.Create(log)
assert.NoError(t, result.Error)
}
// Verify logs exist before cleanup
var logsBeforeCleanup []*AuditLog
db.Where("id IN ?", testLogIDs).Find(&logsBeforeCleanup)
assert.Equal(t, 5, len(logsBeforeCleanup), "All test logs should exist before cleanup")
// Run cleanup
err := service.CleanOldAuditLogs()
assert.NoError(t, err)
// Verify test logs were deleted
var logsAfterCleanup []*AuditLog
db.Where("id IN ?", testLogIDs).Find(&logsAfterCleanup)
assert.Equal(t, 0, len(logsAfterCleanup), "All old test logs should be deleted")
}
func createTimedAuditLog(db *gorm.DB, userID *uuid.UUID, message string, createdAt time.Time) {
log := &AuditLog{
ID: uuid.New(),
UserID: userID,
Message: message,
CreatedAt: createdAt,
}
db.Create(log)
}

View File

@@ -7,11 +7,15 @@ import (
var auditLogRepository = &AuditLogRepository{}
var auditLogService = &AuditLogService{
auditLogRepository: auditLogRepository,
logger: logger.GetLogger(),
auditLogRepository,
logger.GetLogger(),
}
var auditLogController = &AuditLogController{
auditLogService: auditLogService,
auditLogService,
}
var auditLogBackgroundService = &AuditLogBackgroundService{
auditLogService,
logger.GetLogger(),
}
func GetAuditLogService() *AuditLogService {
@@ -22,6 +26,10 @@ func GetAuditLogController() *AuditLogController {
return auditLogController
}
func GetAuditLogBackgroundService() *AuditLogBackgroundService {
return auditLogBackgroundService
}
func SetupDependencies() {
users_services.GetUserService().SetAuditLogWriter(auditLogService)
users_services.GetSettingsService().SetAuditLogWriter(auditLogService)

View File

@@ -137,3 +137,15 @@ func (r *AuditLogRepository) CountGlobal(beforeDate *time.Time) (int64, error) {
err := query.Count(&count).Error
return count, err
}
func (r *AuditLogRepository) DeleteOlderThan(beforeDate time.Time) (int64, error) {
result := storage.GetDb().
Where("created_at < ?", beforeDate).
Delete(&AuditLog{})
if result.Error != nil {
return 0, result.Error
}
return result.RowsAffected, nil
}

View File

@@ -135,3 +135,19 @@ func (s *AuditLogService) GetWorkspaceAuditLogs(
Offset: offset,
}, nil
}
func (s *AuditLogService) CleanOldAuditLogs() error {
oneYearAgo := time.Now().UTC().Add(-365 * 24 * time.Hour)
deletedCount, err := s.auditLogRepository.DeleteOlderThan(oneYearAgo)
if err != nil {
s.logger.Error("Failed to delete old audit logs", "error", err)
return err
}
if deletedCount > 0 {
s.logger.Info("Deleted old audit logs", "count", deletedCount, "olderThan", oneYearAgo)
}
return nil
}

View File

@@ -585,6 +585,7 @@ func createTestDatabase(
Username: "postgres",
Password: "postgres",
Database: &testDbName,
CpuCount: 1,
},
}

View File

@@ -106,6 +106,13 @@ func (uc *CreateMongodbBackupUsecase) buildMongodumpArgs(
"--gzip",
}
// Use numParallelCollections based on CPU count
// Cap between 1 and 16 to balance performance and resource usage
parallelCollections := max(1, min(mdb.CpuCount, 16))
if parallelCollections > 1 {
args = append(args, "--numParallelCollections="+fmt.Sprintf("%d", parallelCollections))
}
return args
}

View File

@@ -139,7 +139,7 @@ func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
cmd := exec.CommandContext(ctx, pgBin, args...)
uc.logger.Info("Executing PostgreSQL backup command", "command", cmd.String())
if err := uc.setupPgEnvironment(cmd, pgpassFile, db.Postgresql.IsHttps, password, backupConfig.CpuCount, pgBin); err != nil {
if err := uc.setupPgEnvironment(cmd, pgpassFile, db.Postgresql.IsHttps, password, db.Postgresql.CpuCount, pgBin); err != nil {
return nil, err
}
@@ -335,6 +335,11 @@ func (uc *CreatePostgresqlBackupUsecase) buildPgDumpArgs(pg *pgtypes.PostgresqlD
"--verbose",
}
// Add parallel jobs based on CPU count
if pg.CpuCount > 1 {
args = append(args, "-j", strconv.Itoa(pg.CpuCount))
}
for _, schema := range pg.IncludeSchemas {
args = append(args, "-n", schema)
}

View File

@@ -109,7 +109,6 @@ func Test_SaveBackupConfig_PermissionsEnforced(t *testing.T) {
SendNotificationsOn: []BackupNotificationType{
NotificationBackupFailed,
},
CpuCount: 2,
IsRetryIfFailed: true,
MaxFailedTriesCount: 3,
}
@@ -129,7 +128,6 @@ func Test_SaveBackupConfig_PermissionsEnforced(t *testing.T) {
assert.Equal(t, database.ID, response.DatabaseID)
assert.True(t, response.IsBackupsEnabled)
assert.Equal(t, period.PeriodWeek, response.StorePeriod)
assert.Equal(t, 2, response.CpuCount)
} else {
assert.Contains(t, string(testResp.Body), "insufficient permissions")
}
@@ -158,7 +156,6 @@ func Test_SaveBackupConfig_WhenUserIsNotWorkspaceMember_ReturnsForbidden(t *test
SendNotificationsOn: []BackupNotificationType{
NotificationBackupFailed,
},
CpuCount: 2,
IsRetryIfFailed: true,
MaxFailedTriesCount: 3,
}
@@ -290,7 +287,6 @@ func Test_GetBackupConfigByDbID_ReturnsDefaultConfigForNewDatabase(t *testing.T)
assert.Equal(t, database.ID, response.DatabaseID)
assert.False(t, response.IsBackupsEnabled)
assert.Equal(t, period.PeriodWeek, response.StorePeriod)
assert.Equal(t, 1, response.CpuCount)
assert.True(t, response.IsRetryIfFailed)
assert.Equal(t, 3, response.MaxFailedTriesCount)
assert.NotNil(t, response.BackupInterval)
@@ -387,7 +383,6 @@ func Test_SaveBackupConfig_WithEncryptionNone_ConfigSaved(t *testing.T) {
SendNotificationsOn: []BackupNotificationType{
NotificationBackupFailed,
},
CpuCount: 2,
IsRetryIfFailed: true,
MaxFailedTriesCount: 3,
Encryption: BackupEncryptionNone,
@@ -427,7 +422,6 @@ func Test_SaveBackupConfig_WithEncryptionEncrypted_ConfigSaved(t *testing.T) {
SendNotificationsOn: []BackupNotificationType{
NotificationBackupFailed,
},
CpuCount: 2,
IsRetryIfFailed: true,
MaxFailedTriesCount: 3,
Encryption: BackupEncryptionEncrypted,
@@ -466,6 +460,7 @@ func createTestDatabaseViaAPI(
Username: "postgres",
Password: "postgres",
Database: &testDbName,
CpuCount: 1,
},
}

View File

@@ -30,8 +30,6 @@ type BackupConfig struct {
IsRetryIfFailed bool `json:"isRetryIfFailed" gorm:"column:is_retry_if_failed;type:boolean;not null"`
MaxFailedTriesCount int `json:"maxFailedTriesCount" gorm:"column:max_failed_tries_count;type:int;not null"`
CpuCount int `json:"cpuCount" gorm:"type:int;not null"`
Encryption BackupEncryption `json:"encryption" gorm:"column:encryption;type:text;not null;default:'NONE'"`
}
@@ -82,10 +80,6 @@ func (b *BackupConfig) Validate() error {
return errors.New("store period is required")
}
if b.CpuCount == 0 {
return errors.New("cpu count is required")
}
if b.IsRetryIfFailed && b.MaxFailedTriesCount <= 0 {
return errors.New("max failed tries count must be greater than 0")
}
@@ -109,7 +103,6 @@ func (b *BackupConfig) Copy(newDatabaseID uuid.UUID) *BackupConfig {
SendNotificationsOn: b.SendNotificationsOn,
IsRetryIfFailed: b.IsRetryIfFailed,
MaxFailedTriesCount: b.MaxFailedTriesCount,
CpuCount: b.CpuCount,
Encryption: b.Encryption,
}
}

View File

@@ -168,7 +168,6 @@ func (s *BackupConfigService) initializeDefaultConfig(
NotificationBackupFailed,
NotificationBackupSuccess,
},
CpuCount: 1,
IsRetryIfFailed: true,
MaxFailedTriesCount: 3,
Encryption: BackupEncryptionNone,

View File

@@ -28,7 +28,6 @@ func EnableBackupsForTestDatabase(
NotificationBackupFailed,
NotificationBackupSuccess,
},
CpuCount: 1,
}
backupConfig, err := GetBackupConfigService().SaveBackupConfig(backupConfig)

View File

@@ -100,6 +100,7 @@ func Test_CreateDatabase_PermissionsEnforced(t *testing.T) {
Username: "postgres",
Password: "postgres",
Database: &testDbName,
CpuCount: 1,
},
}
@@ -143,6 +144,7 @@ func Test_CreateDatabase_WhenUserIsNotWorkspaceMember_ReturnsForbidden(t *testin
Username: "postgres",
Password: "postgres",
Database: &testDbName,
CpuCount: 1,
},
}
@@ -747,6 +749,7 @@ func createTestDatabaseViaAPI(
Username: "postgres",
Password: "postgres",
Database: &testDbName,
CpuCount: 1,
},
}
@@ -790,6 +793,7 @@ func Test_CreateDatabase_PasswordIsEncryptedInDB(t *testing.T) {
Username: "postgres",
Password: plainPassword,
Database: &testDbName,
CpuCount: 1,
},
}
@@ -862,6 +866,7 @@ func Test_DatabaseSensitiveDataLifecycle_AllTypes(t *testing.T) {
Username: "postgres",
Password: "original-password-secret",
Database: &testDbName,
CpuCount: 1,
},
}
},
@@ -879,6 +884,7 @@ func Test_DatabaseSensitiveDataLifecycle_AllTypes(t *testing.T) {
Username: "updated_user",
Password: "",
Database: &testDbName,
CpuCount: 1,
},
}
},
@@ -961,6 +967,7 @@ func Test_DatabaseSensitiveDataLifecycle_AllTypes(t *testing.T) {
Database: "test_db",
AuthDatabase: "admin",
IsHttps: false,
CpuCount: 1,
},
}
},
@@ -979,6 +986,7 @@ func Test_DatabaseSensitiveDataLifecycle_AllTypes(t *testing.T) {
Database: "updated_test_db",
AuthDatabase: "admin",
IsHttps: false,
CpuCount: 1,
},
}
},

View File

@@ -30,6 +30,7 @@ type MongodbDatabase struct {
Database string `json:"database" gorm:"type:text;not null"`
AuthDatabase string `json:"authDatabase" gorm:"type:text;not null;default:'admin'"`
IsHttps bool `json:"isHttps" gorm:"type:boolean;default:false"`
CpuCount int `json:"cpuCount" gorm:"column:cpu_count;type:int;not null;default:1"`
}
func (m *MongodbDatabase) TableName() string {
@@ -52,6 +53,9 @@ func (m *MongodbDatabase) Validate() error {
if m.Database == "" {
return errors.New("database is required")
}
if m.CpuCount <= 0 {
return errors.New("cpu count must be greater than 0")
}
return nil
}
@@ -109,6 +113,7 @@ func (m *MongodbDatabase) Update(incoming *MongodbDatabase) {
m.Database = incoming.Database
m.AuthDatabase = incoming.AuthDatabase
m.IsHttps = incoming.IsHttps
m.CpuCount = incoming.CpuCount
if incoming.Password != "" {
m.Password = incoming.Password

View File

@@ -34,6 +34,7 @@ type PostgresqlDatabase struct {
// backup settings
IncludeSchemas []string `json:"includeSchemas" gorm:"-"`
IncludeSchemasString string `json:"-" gorm:"column:include_schemas;type:text;not null;default:''"`
CpuCount int `json:"cpuCount" gorm:"column:cpu_count;type:int;not null;default:1"`
// restore settings (not saved to DB)
IsExcludeExtensions bool `json:"isExcludeExtensions" gorm:"-"`
@@ -80,6 +81,10 @@ func (p *PostgresqlDatabase) Validate() error {
return errors.New("password is required")
}
if p.CpuCount <= 0 {
return errors.New("cpu count must be greater than 0")
}
return nil
}
@@ -110,6 +115,7 @@ func (p *PostgresqlDatabase) Update(incoming *PostgresqlDatabase) {
p.Database = incoming.Database
p.IsHttps = incoming.IsHttps
p.IncludeSchemas = incoming.IncludeSchemas
p.CpuCount = incoming.CpuCount
if incoming.Password != "" {
p.Password = incoming.Password

View File

@@ -396,15 +396,17 @@ func (s *DatabaseService) CopyDatabase(
case DatabaseTypePostgres:
if existingDatabase.Postgresql != nil {
newDatabase.Postgresql = &postgresql.PostgresqlDatabase{
ID: uuid.Nil,
DatabaseID: nil,
Version: existingDatabase.Postgresql.Version,
Host: existingDatabase.Postgresql.Host,
Port: existingDatabase.Postgresql.Port,
Username: existingDatabase.Postgresql.Username,
Password: existingDatabase.Postgresql.Password,
Database: existingDatabase.Postgresql.Database,
IsHttps: existingDatabase.Postgresql.IsHttps,
ID: uuid.Nil,
DatabaseID: nil,
Version: existingDatabase.Postgresql.Version,
Host: existingDatabase.Postgresql.Host,
Port: existingDatabase.Postgresql.Port,
Username: existingDatabase.Postgresql.Username,
Password: existingDatabase.Postgresql.Password,
Database: existingDatabase.Postgresql.Database,
IsHttps: existingDatabase.Postgresql.IsHttps,
IncludeSchemas: existingDatabase.Postgresql.IncludeSchemas,
CpuCount: existingDatabase.Postgresql.CpuCount,
}
}
case DatabaseTypeMysql:
@@ -448,6 +450,7 @@ func (s *DatabaseService) CopyDatabase(
Database: existingDatabase.Mongodb.Database,
AuthDatabase: existingDatabase.Mongodb.AuthDatabase,
IsHttps: existingDatabase.Mongodb.IsHttps,
CpuCount: existingDatabase.Mongodb.CpuCount,
}
}
}

View File

@@ -25,6 +25,7 @@ func CreateTestDatabase(
Port: 5432,
Username: "postgres",
Password: "postgres",
CpuCount: 1,
},
Notifiers: []notifiers.Notifier{

View File

@@ -217,6 +217,7 @@ func createTestDatabaseViaAPI(
Username: "postgres",
Password: "postgres",
Database: &testDbName,
CpuCount: 1,
},
}

View File

@@ -305,6 +305,7 @@ func createTestDatabaseViaAPI(
Username: "postgres",
Password: "postgres",
Database: &testDbName,
CpuCount: 1,
},
}

View File

@@ -295,6 +295,7 @@ func createTestDatabase(
Username: "postgres",
Password: "postgres",
Database: &testDbName,
CpuCount: 1,
},
}

View File

@@ -111,6 +111,16 @@ func (uc *RestoreMongodbBackupUsecase) buildMongorestoreArgs(
args = append(args, "--nsInclude="+mdb.Database+".*")
}
// Use numInsertionWorkersPerCollection based on CPU count
// Cap between 1 and 16 to balance performance and resource usage
parallelWorkers := max(1, min(mdb.CpuCount, 16))
if parallelWorkers > 1 {
args = append(
args,
"--numInsertionWorkersPerCollection="+fmt.Sprintf("%d", parallelWorkers),
)
}
return args
}

View File

@@ -67,7 +67,7 @@ func (uc *RestorePostgresqlBackupUsecase) Execute(
// Use parallel jobs based on CPU count (same as backup)
// Cap between 1 and 8 to avoid overwhelming the server
parallelJobs := max(1, min(backupConfig.CpuCount, 8))
parallelJobs := max(1, min(restoringToDB.Postgresql.CpuCount, 8))
args := []string{
"-Fc", // expect custom format (same as backup)

View File

@@ -147,7 +147,9 @@ func (s *S3Storage) SaveFile(
objectKey,
bytes.NewReader([]byte{}),
0,
minio.PutObjectOptions{},
minio.PutObjectOptions{
SendContentMd5: true,
},
)
if err != nil {
return fmt.Errorf("failed to upload empty file: %w", err)
@@ -283,7 +285,9 @@ func (s *S3Storage) TestConnection(encryptor encryption.FieldEncryptor) error {
testObjectKey,
testReader,
int64(len(testData)),
minio.PutObjectOptions{},
minio.PutObjectOptions{
SendContentMd5: true,
},
)
if err != nil {
return fmt.Errorf("failed to upload test file to S3: %w", err)

View File

@@ -394,6 +394,7 @@ func createMongodbDatabaseViaAPI(
AuthDatabase: authDatabase,
Version: version,
IsHttps: false,
CpuCount: 1,
},
}
@@ -440,6 +441,7 @@ func createMongodbRestoreViaAPI(
AuthDatabase: authDatabase,
Version: version,
IsHttps: false,
CpuCount: 1,
},
}

View File

@@ -1269,6 +1269,7 @@ func createDatabaseViaAPI(
Username: username,
Password: password,
Database: &database,
CpuCount: 1,
},
}
@@ -1387,6 +1388,7 @@ func createRestoreWithOptionsViaAPI(
Password: password,
Database: &database,
IsExcludeExtensions: isExcludeExtensions,
CpuCount: 1,
},
}
@@ -1424,6 +1426,7 @@ func createDatabaseWithSchemasViaAPI(
Password: password,
Database: &database,
IncludeSchemas: includeSchemas,
CpuCount: 1,
},
}
@@ -1476,6 +1479,7 @@ func createSupabaseDatabaseViaAPI(
Database: &database,
IsHttps: true,
IncludeSchemas: includeSchemas,
CpuCount: 1,
},
}
@@ -1522,6 +1526,7 @@ func createSupabaseRestoreViaAPI(
Password: password,
Database: &database,
IsHttps: true,
CpuCount: 1,
},
}

View File

@@ -0,0 +1,25 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE backup_configs DROP COLUMN cpu_count;
-- +goose StatementEnd
-- +goose StatementBegin
ALTER TABLE postgresql_databases ADD COLUMN cpu_count INT NOT NULL DEFAULT 1;
-- +goose StatementEnd
-- +goose StatementBegin
ALTER TABLE mongodb_databases ADD COLUMN cpu_count INT NOT NULL DEFAULT 1;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE backup_configs ADD COLUMN cpu_count INT NOT NULL DEFAULT 1;
-- +goose StatementEnd
-- +goose StatementBegin
ALTER TABLE postgresql_databases DROP COLUMN cpu_count;
-- +goose StatementEnd
-- +goose StatementBegin
ALTER TABLE mongodb_databases DROP COLUMN cpu_count;
-- +goose StatementEnd

View File

@@ -12,7 +12,6 @@ export interface BackupConfig {
backupInterval?: Interval;
storage?: Storage;
sendNotificationsOn: BackupNotificationType[];
cpuCount: number;
isRetryIfFailed: boolean;
maxFailedTriesCount: number;
encryption: BackupEncryption;

View File

@@ -10,4 +10,5 @@ export interface MongodbDatabase {
database: string;
authDatabase: string;
useTls: boolean;
cpuCount: number;
}

View File

@@ -14,6 +14,7 @@ export interface PostgresqlDatabase {
// backup settings
includeSchemas?: string[];
cpuCount: number;
// restore settings (not saved to DB)
isExcludeExtensions?: boolean;

View File

@@ -541,7 +541,7 @@ export const BackupsComponent = ({ database, isCanManageDBs, scrollContainerRef
<h2 className="text-lg font-bold md:text-xl dark:text-white">Backups</h2>
{!isBackupConfigLoading && !backupConfig?.isBackupsEnabled && (
<div className="text-sm text-red-600 md:text-base">
<div className="text-sm text-red-600">
Scheduled backups are disabled (you can enable it back in the backup configuration)
</div>
)}

View File

@@ -154,7 +154,6 @@ export const EditBackupConfigComponent = ({
timeOfDay: '00:00',
},
storage: undefined,
cpuCount: 1,
storePeriod: Period.THREE_MONTH,
sendNotificationsOn: [],
isRetryIfFailed: true,
@@ -201,7 +200,6 @@ export const EditBackupConfigComponent = ({
!backupConfig.isBackupsEnabled ||
(Boolean(backupConfig.storePeriod) &&
Boolean(backupConfig.storage?.id) &&
Boolean(backupConfig.cpuCount) &&
Boolean(backupConfig.encryption) &&
Boolean(backupInterval?.interval) &&
(!backupInterval ||
@@ -211,16 +209,18 @@ export const EditBackupConfigComponent = ({
return (
<div>
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Backups enabled</div>
<Switch
checked={backupConfig.isBackupsEnabled}
onChange={(checked) => {
updateBackupConfig({ isBackupsEnabled: checked });
}}
size="small"
/>
</div>
{database.id && (
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Backups enabled</div>
<Switch
checked={backupConfig.isBackupsEnabled}
onChange={(checked) => {
updateBackupConfig({ isBackupsEnabled: checked });
}}
size="small"
/>
</div>
)}
{backupConfig.isBackupsEnabled && (
<>
@@ -404,27 +404,6 @@ export const EditBackupConfigComponent = ({
</div>
)}
<div className="mt-5 mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">CPU count</div>
<div className="flex items-center">
<InputNumber
min={1}
max={16}
value={backupConfig.cpuCount}
onChange={(value) => updateBackupConfig({ cpuCount: value || 1 })}
size="small"
className="w-full max-w-[200px] grow"
/>
<Tooltip
className="cursor-pointer"
title="Number of CPU cores to use for restore processing. Higher values may speed up restores, but use more resources."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Store period</div>
<div className="flex items-center">

View File

@@ -31,8 +31,6 @@ const createInitialDatabase = (workspaceId: string): Database =>
workspaceId,
storePeriod: Period.MONTH,
postgresql: {} as PostgresqlDatabase,
type: DatabaseType.POSTGRES,
storage: {} as unknown as Storage,
@@ -52,13 +50,13 @@ const initializeDatabaseTypeData = (db: Database): Database => {
switch (db.type) {
case DatabaseType.POSTGRES:
return { ...base, postgresql: db.postgresql ?? ({} as PostgresqlDatabase) };
return { ...base, postgresql: db.postgresql ?? ({ cpuCount: 1 } as PostgresqlDatabase) };
case DatabaseType.MYSQL:
return { ...base, mysql: db.mysql ?? ({} as MysqlDatabase) };
case DatabaseType.MARIADB:
return { ...base, mariadb: db.mariadb ?? ({} as MariadbDatabase) };
case DatabaseType.MONGODB:
return { ...base, mongodb: db.mongodb ?? ({} as MongodbDatabase) };
return { ...base, mongodb: db.mongodb ?? ({ cpuCount: 1 } as MongodbDatabase) };
default:
return db;
}

View File

@@ -23,7 +23,17 @@ export const CreateReadOnlyComponent = ({
const isPostgres = database.type === DatabaseType.POSTGRES;
const isMysql = database.type === DatabaseType.MYSQL;
const databaseTypeName = isPostgres ? 'PostgreSQL' : isMysql ? 'MySQL' : 'database';
const isMariadb = database.type === DatabaseType.MARIADB;
const isMongodb = database.type === DatabaseType.MONGODB;
const databaseTypeName = isPostgres
? 'PostgreSQL'
: isMysql
? 'MySQL'
: isMariadb
? 'MariaDB'
: isMongodb
? 'MongoDB'
: 'database';
const checkReadOnlyUser = async (): Promise<boolean> => {
try {
@@ -47,6 +57,12 @@ export const CreateReadOnlyComponent = ({
} else if (isMysql && database.mysql) {
database.mysql.username = response.username;
database.mysql.password = response.password;
} else if (isMariadb && database.mariadb) {
database.mariadb.username = response.username;
database.mariadb.password = response.password;
} else if (isMongodb && database.mongodb) {
database.mongodb.username = response.username;
database.mongodb.password = response.password;
}
onReadOnlyUserUpdated(database);

View File

@@ -65,7 +65,8 @@ export const EditDatabaseBaseInfoComponent = ({
switch (newType) {
case DatabaseType.POSTGRES:
updatedDatabase.postgresql = editingDatabase.postgresql ?? ({} as PostgresqlDatabase);
updatedDatabase.postgresql =
editingDatabase.postgresql ?? ({ cpuCount: 1 } as PostgresqlDatabase);
break;
case DatabaseType.MYSQL:
updatedDatabase.mysql = editingDatabase.mysql ?? ({} as MysqlDatabase);
@@ -74,7 +75,7 @@ export const EditDatabaseBaseInfoComponent = ({
updatedDatabase.mariadb = editingDatabase.mariadb ?? ({} as MariadbDatabase);
break;
case DatabaseType.MONGODB:
updatedDatabase.mongodb = editingDatabase.mongodb ?? ({} as MongodbDatabase);
updatedDatabase.mongodb = editingDatabase.mongodb ?? ({ cpuCount: 1 } as MongodbDatabase);
break;
}

View File

@@ -1,5 +1,5 @@
import { CopyOutlined, DownOutlined, UpOutlined } from '@ant-design/icons';
import { App, Button, Input, InputNumber, Switch } from 'antd';
import { CopyOutlined, DownOutlined, InfoCircleOutlined, UpOutlined } from '@ant-design/icons';
import { App, Button, Input, InputNumber, Switch, Tooltip } from 'antd';
import { useEffect, useState } from 'react';
import { type Database, databaseApi } from '../../../../entity/databases';
@@ -78,6 +78,7 @@ export const EditMongoDbSpecificDataComponent = ({
database: result.database,
authDatabase: result.authDatabase,
useTls: result.useTls,
cpuCount: 1,
},
};
@@ -285,7 +286,7 @@ export const EditMongoDbSpecificDataComponent = ({
</div>
)}
<div className="mb-3 flex w-full items-center">
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Use TLS</div>
<Switch
checked={editingDatabase.mongodb?.useTls}
@@ -302,7 +303,36 @@ export const EditMongoDbSpecificDataComponent = ({
/>
</div>
<div className="mt-4 mb-3 flex items-center">
<div className="mb-5 flex w-full items-center">
<div className="min-w-[150px]">CPU count</div>
<div className="flex items-center">
<InputNumber
min={1}
max={16}
value={editingDatabase.mongodb?.cpuCount || 1}
onChange={(value) => {
if (!editingDatabase.mongodb) return;
setEditingDatabase({
...editingDatabase,
mongodb: { ...editingDatabase.mongodb, cpuCount: value || 1 },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
/>
<Tooltip
className="cursor-pointer"
title="Number of CPU cores to use for backup and restore operations. Higher values may speed up operations but use more resources."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
<div className="mt-4 mb-1 flex items-center">
<div
className="flex cursor-pointer items-center text-sm text-blue-600 hover:text-blue-800"
onClick={() => setShowAdvanced(!isShowAdvanced)}
@@ -318,24 +348,26 @@ export const EditMongoDbSpecificDataComponent = ({
</div>
{isShowAdvanced && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Auth database</div>
<Input
value={editingDatabase.mongodb?.authDatabase}
onChange={(e) => {
if (!editingDatabase.mongodb) return;
<>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Auth database</div>
<Input
value={editingDatabase.mongodb?.authDatabase}
onChange={(e) => {
if (!editingDatabase.mongodb) return;
setEditingDatabase({
...editingDatabase,
mongodb: { ...editingDatabase.mongodb, authDatabase: e.target.value.trim() },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="admin"
/>
</div>
setEditingDatabase({
...editingDatabase,
mongodb: { ...editingDatabase.mongodb, authDatabase: e.target.value.trim() },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="admin"
/>
</div>
</>
)}
<div className="mt-5 flex">

View File

@@ -82,6 +82,7 @@ export const EditPostgreSqlSpecificDataComponent = ({
password: result.password,
database: result.database,
isHttps: result.isHttps,
cpuCount: 1,
},
};
@@ -338,7 +339,7 @@ export const EditPostgreSqlSpecificDataComponent = ({
</div>
)}
<div className="mb-3 flex w-full items-center">
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Use HTTPS</div>
<Switch
checked={editingDatabase.postgresql?.isHttps}
@@ -355,7 +356,38 @@ export const EditPostgreSqlSpecificDataComponent = ({
/>
</div>
<div className="mt-4 mb-3 flex items-center">
{isRestoreMode && (
<div className="mb-5 flex w-full items-center">
<div className="min-w-[150px]">CPU count</div>
<div className="flex items-center">
<InputNumber
min={1}
max={128}
value={editingDatabase.postgresql?.cpuCount || 1}
onChange={(value) => {
if (!editingDatabase.postgresql) return;
setEditingDatabase({
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, cpuCount: value || 1 },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[75px] grow"
/>
<Tooltip
className="cursor-pointer"
title="Number of CPU cores to use for backup and restore operations. Higher values may speed up operations but use more resources."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
)}
<div className="mt-4 mb-1 flex items-center">
<div
className="flex cursor-pointer items-center text-sm text-blue-600 hover:text-blue-800"
onClick={() => setShowAdvanced(!isShowAdvanced)}

View File

@@ -43,6 +43,11 @@ export const ShowMongoDbSpecificDataComponent = ({ database }: Props) => {
<div>{database.mongodb.authDatabase}</div>
</div>
)}
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">CPU count</div>
<div>{database.mongodb?.cpuCount || 1}</div>
</div>
</div>
);
};

View File

@@ -14,7 +14,8 @@ export function EditSFTPStorageComponent({ storage, setStorage, setUnsaved }: Pr
const hasAdvancedValues = !!storage?.sftpStorage?.skipHostKeyVerify;
const [showAdvanced, setShowAdvanced] = useState(hasAdvancedValues);
const authMethod = storage?.sftpStorage?.privateKey ? 'privateKey' : 'password';
const initialAuthMethod = storage?.sftpStorage?.privateKey ? 'privateKey' : 'password';
const [authMethod, setAuthMethod] = useState<'password' | 'privateKey'>(initialAuthMethod);
return (
<>
@@ -93,7 +94,10 @@ export function EditSFTPStorageComponent({ storage, setStorage, setUnsaved }: Pr
onChange={(e) => {
if (!storage?.sftpStorage) return;
if (e.target.value === 'password') {
const newMethod = e.target.value as 'password' | 'privateKey';
setAuthMethod(newMethod);
if (newMethod === 'password') {
setStorage({
...storage,
sftpStorage: {

View File

@@ -130,7 +130,7 @@ export function AdminPasswordComponent({
}}
type="primary"
>
Set Password
Set password
</Button>
{adminPasswordError && (

View File

@@ -7,7 +7,7 @@ export function AuthNavbarComponent() {
<div className="flex h-[65px] items-center justify-center px-5 pt-5 sm:justify-start">
<div className="flex items-center gap-3 hover:opacity-80">
<a href="https://databasus.com" target="_blank" rel="noreferrer">
<img className="h-[45px] w-[45px]" src="/logo.svg" />
<img className="h-[45px] w-[45px] p-1" src="/logo.svg" />
</a>
<div className="text-xl font-bold">

View File

@@ -193,7 +193,7 @@ export const MainScreenComponent = () => {
<div className="mb-2 flex h-[50px] items-center rounded bg-white px-2 py-2 shadow md:mb-3 md:h-[60px] md:p-3 dark:bg-gray-800">
<div className="flex items-center gap-2 hover:opacity-80 md:gap-3">
<a href="https://databasus.com" target="_blank" rel="noreferrer">
<img className="h-[30px] w-[30px] md:h-[40px] md:w-[40px]" src="/logo.svg" />
<img className="h-[30px] w-[30px] p-1 md:h-[40px] md:w-[40px]" src="/logo.svg" />
</a>
</div>