Compare commits

...

23 Commits

Author SHA1 Message Date
Rostislav Dugin
e1f466c965 FIX (mysql): Fix MySQL tests 2025-12-20 21:18:41 +03:00
Rostislav Dugin
a0f284e06b Merge branch 'main' of https://github.com/RostislavDugin/postgresus 2025-12-20 19:21:20 +03:00
Rostislav Dugin
8638b2d136 FEATURE (databases): Add MySQL database 2025-12-20 19:14:48 +03:00
github-actions[bot]
16d4f506bc Update CITATION.cff to v2.10.0 2025-12-19 21:41:29 +00:00
Rostislav Dugin
c100d94a92 FIX (tidy): Run go mod tidy 2025-12-20 00:25:39 +03:00
Rostislav Dugin
f14739a1fb FEATURE (intervals): Add cron intervals for backups 2025-12-20 00:23:39 +03:00
github-actions[bot]
b7d2521088 Update CITATION.cff to v2.9.0 2025-12-19 20:39:27 +00:00
Rostislav Dugin
eb8e5aa428 FEATURE (storages): Add SFTP 2025-12-19 23:24:16 +03:00
github-actions[bot]
1f030bd8fb Update CITATION.cff to v2.8.1 2025-12-19 11:44:37 +00:00
Rostislav Dugin
b278a79104 FIX (databases): Remove optional text from db name field 2025-12-19 14:28:54 +03:00
github-actions[bot]
b74ae734af Update CITATION.cff to v2.8.0 2025-12-18 16:13:17 +00:00
Rostislav Dugin
d21a9398c6 FIX (Dockerfile): Upgrade Go version 2025-12-18 18:57:26 +03:00
Rostislav Dugin
6ad7b95b7d FIX (go tidy): Run go mod tidy 2025-12-18 18:42:02 +03:00
Rostislav Dugin
8432d1626f FIX (linting): Increase lint timeout 2025-12-18 18:36:11 +03:00
Rostislav Dugin
d7f631fa93 FIX (golangci): Upgrade version of golangci 2025-12-18 18:33:41 +03:00
Rostislav Dugin
c3fb2aa529 FIX (golangci): Upgrade version of golangci 2025-12-18 18:31:03 +03:00
Rostislav Dugin
1817937409 FIX (ci \ cd): Upgrade Go version 2025-12-18 18:16:37 +03:00
Rostislav Dugin
3172396668 FIX (extensions): Exclude extensions comments as well 2025-12-18 17:54:52 +03:00
Rostislav Dugin
9cd5c8c57c Merge branch 'main' of https://github.com/RostislavDugin/postgresus 2025-12-18 17:49:24 +03:00
Rostislav Dugin
d8826d85c3 FEATURE (storanges): Add rclone 2025-12-18 17:46:16 +03:00
github-actions[bot]
49fdd46cbe Update CITATION.cff to v2.7.0 2025-12-18 11:49:21 +00:00
Rostislav Dugin
c6261d434b FEATURE (restores): Allow to exclude extensions over restore 2025-12-18 14:34:32 +03:00
github-actions[bot]
918002acde Update CITATION.cff to v2.6.0 2025-12-17 14:03:33 +00:00
117 changed files with 9859 additions and 1124 deletions

67
.dockerignore Normal file
View File

@@ -0,0 +1,67 @@
# Git and GitHub
.git
.gitignore
.github
# Node modules everywhere
node_modules
**/node_modules
# Backend - exclude everything except what's needed for build
backend/tools
backend/mysqldata
backend/pgdata
backend/temp
backend/images
backend/bin
backend/*.exe
# Scripts and data directories
scripts
postgresus-data
# IDE and editor files
.idea
.vscode
.cursor
**/*.swp
**/*.swo
# Documentation and articles (not needed for build)
articles
docs
pages
# Notifiers not needed in container
notifiers
# Dist (will be built fresh)
frontend/dist
# Environment files (handled separately)
.env.local
.env.development
# Logs and temp files
**/*.log
tmp
temp
# OS files
.DS_Store
Thumbs.db
# Helm charts and deployment configs
deploy
# License and other root files
LICENSE
CITATION.cff
*.md
assets
# Python cache
**/__pycache__
# Pre-commit config
.pre-commit-config.yaml

View File

@@ -17,7 +17,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.23.3"
go-version: "1.24.4"
- name: Cache Go modules
uses: actions/cache@v4
@@ -31,7 +31,7 @@ jobs:
- name: Install golangci-lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.60.3
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.7.2
echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
- name: Install swag for swagger generation
@@ -116,7 +116,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.23.3"
go-version: "1.24.4"
- name: Cache Go modules
uses: actions/cache@v4
@@ -165,6 +165,14 @@ jobs:
TEST_AZURITE_BLOB_PORT=10000
# testing NAS
TEST_NAS_PORT=7006
# testing FTP
TEST_FTP_PORT=7007
# testing SFTP
TEST_SFTP_PORT=7008
# testing MySQL
TEST_MYSQL_57_PORT=33057
TEST_MYSQL_80_PORT=33080
TEST_MYSQL_84_PORT=33084
# testing Telegram
TEST_TELEGRAM_BOT_TOKEN=${{ secrets.TEST_TELEGRAM_BOT_TOKEN }}
TEST_TELEGRAM_CHAT_ID=${{ secrets.TEST_TELEGRAM_CHAT_ID }}
@@ -200,6 +208,20 @@ jobs:
# Wait for Azurite
timeout 60 bash -c 'until nc -z localhost 10000; do sleep 2; done'
# Wait for FTP
timeout 60 bash -c 'until nc -z localhost 7007; do sleep 2; done'
# Wait for SFTP
timeout 60 bash -c 'until nc -z localhost 7008; do sleep 2; done'
# Wait for MySQL containers
echo "Waiting for MySQL 5.7..."
timeout 120 bash -c 'until docker exec test-mysql-57 mysqladmin ping -h localhost -u root -prootpassword --silent 2>/dev/null; do sleep 2; done'
echo "Waiting for MySQL 8.0..."
timeout 120 bash -c 'until docker exec test-mysql-80 mysqladmin ping -h localhost -u root -prootpassword --silent 2>/dev/null; do sleep 2; done'
echo "Waiting for MySQL 8.4..."
timeout 120 bash -c 'until docker exec test-mysql-84 mysqladmin ping -h localhost -u root -prootpassword --silent 2>/dev/null; do sleep 2; done'
- name: Create data and temp directories
run: |
# Create directories that are used for backups and restore
@@ -207,12 +229,53 @@ jobs:
mkdir -p postgresus-data/backups
mkdir -p postgresus-data/temp
- name: Install PostgreSQL client tools
- name: Cache PostgreSQL client tools
id: cache-postgres
uses: actions/cache@v4
with:
path: /usr/lib/postgresql
key: postgres-clients-12-18-v1
- name: Cache MySQL client tools
id: cache-mysql
uses: actions/cache@v4
with:
path: backend/tools/mysql
key: mysql-clients-57-80-84-v1
- name: Install MySQL dependencies
run: |
sudo apt-get update -qq
sudo apt-get install -y -qq libncurses6
sudo ln -sf /usr/lib/x86_64-linux-gnu/libncurses.so.6 /usr/lib/x86_64-linux-gnu/libncurses.so.5
sudo ln -sf /usr/lib/x86_64-linux-gnu/libtinfo.so.6 /usr/lib/x86_64-linux-gnu/libtinfo.so.5
- name: Install PostgreSQL and MySQL client tools
if: steps.cache-postgres.outputs.cache-hit != 'true' || steps.cache-mysql.outputs.cache-hit != 'true'
run: |
chmod +x backend/tools/download_linux.sh
cd backend/tools
./download_linux.sh
- name: Setup PostgreSQL symlinks (when using cache)
if: steps.cache-postgres.outputs.cache-hit == 'true'
run: |
cd backend/tools
mkdir -p postgresql
for version in 12 13 14 15 16 17 18; do
version_dir="postgresql/postgresql-$version"
mkdir -p "$version_dir/bin"
pg_bin_dir="/usr/lib/postgresql/$version/bin"
if [ -d "$pg_bin_dir" ]; then
ln -sf "$pg_bin_dir/pg_dump" "$version_dir/bin/pg_dump"
ln -sf "$pg_bin_dir/pg_dumpall" "$version_dir/bin/pg_dumpall"
ln -sf "$pg_bin_dir/psql" "$version_dir/bin/psql"
ln -sf "$pg_bin_dir/pg_restore" "$version_dir/bin/pg_restore"
ln -sf "$pg_bin_dir/createdb" "$version_dir/bin/createdb"
ln -sf "$pg_bin_dir/dropdb" "$version_dir/bin/dropdb"
fi
done
- name: Run database migrations
run: |
cd backend

View File

@@ -29,5 +29,5 @@ keywords:
- system-administration
- database-backup
license: Apache-2.0
version: 2.5.1
date-released: "2025-06-01"
version: 2.10.0
date-released: "2025-12-19"

View File

@@ -22,7 +22,7 @@ RUN npm run build
# ========= BUILD BACKEND =========
# Backend build stage
FROM --platform=$BUILDPLATFORM golang:1.23.3 AS backend-build
FROM --platform=$BUILDPLATFORM golang:1.24.4 AS backend-build
# Make TARGET args available early so tools built here match the final image arch
ARG TARGETOS
@@ -77,16 +77,57 @@ ENV APP_VERSION=$APP_VERSION
# Set production mode for Docker containers
ENV ENV_MODE=production
# Install PostgreSQL server and client tools (versions 12-18)
# Install PostgreSQL server and client tools (versions 12-18), MySQL client tools (5.7, 8.0, 8.4), and rclone
# Note: MySQL 5.7 is only available for x86_64, MySQL 8.0+ supports both x86_64 and ARM64
# Note: MySQL binaries require libncurses5 for terminal handling
ARG TARGETARCH
RUN apt-get update && apt-get install -y --no-install-recommends \
wget ca-certificates gnupg lsb-release sudo gosu && \
wget ca-certificates gnupg lsb-release sudo gosu curl unzip xz-utils libncurses5 && \
# Add PostgreSQL repository
wget -qO- https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - && \
echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" \
> /etc/apt/sources.list.d/pgdg.list && \
apt-get update && \
# Install PostgreSQL
apt-get install -y --no-install-recommends \
postgresql-17 postgresql-18 postgresql-client-12 postgresql-client-13 postgresql-client-14 postgresql-client-15 \
postgresql-client-16 postgresql-client-17 postgresql-client-18 && \
postgresql-client-16 postgresql-client-17 postgresql-client-18 rclone && \
# Create MySQL directories
mkdir -p /usr/local/mysql-5.7/bin /usr/local/mysql-8.0/bin /usr/local/mysql-8.4/bin && \
# Download and install MySQL client tools (architecture-aware)
# MySQL 5.7: Only available for x86_64
if [ "$TARGETARCH" = "amd64" ]; then \
wget -q https://dev.mysql.com/get/Downloads/MySQL-5.7/mysql-5.7.44-linux-glibc2.12-x86_64.tar.gz -O /tmp/mysql57.tar.gz && \
tar -xzf /tmp/mysql57.tar.gz -C /tmp && \
cp /tmp/mysql-5.7.*/bin/mysql /usr/local/mysql-5.7/bin/ && \
cp /tmp/mysql-5.7.*/bin/mysqldump /usr/local/mysql-5.7/bin/ && \
rm -rf /tmp/mysql-5.7.* /tmp/mysql57.tar.gz; \
else \
echo "MySQL 5.7 not available for $TARGETARCH, skipping..."; \
fi && \
# MySQL 8.0: Available for both x86_64 and ARM64
if [ "$TARGETARCH" = "amd64" ]; then \
wget -q https://dev.mysql.com/get/Downloads/MySQL-8.0/mysql-8.0.40-linux-glibc2.17-x86_64-minimal.tar.xz -O /tmp/mysql80.tar.xz; \
elif [ "$TARGETARCH" = "arm64" ]; then \
wget -q https://dev.mysql.com/get/Downloads/MySQL-8.0/mysql-8.0.40-linux-glibc2.17-aarch64-minimal.tar.xz -O /tmp/mysql80.tar.xz; \
fi && \
tar -xJf /tmp/mysql80.tar.xz -C /tmp && \
cp /tmp/mysql-8.0.*/bin/mysql /usr/local/mysql-8.0/bin/ && \
cp /tmp/mysql-8.0.*/bin/mysqldump /usr/local/mysql-8.0/bin/ && \
rm -rf /tmp/mysql-8.0.* /tmp/mysql80.tar.xz && \
# MySQL 8.4: Available for both x86_64 and ARM64
if [ "$TARGETARCH" = "amd64" ]; then \
wget -q https://dev.mysql.com/get/Downloads/MySQL-8.4/mysql-8.4.3-linux-glibc2.17-x86_64-minimal.tar.xz -O /tmp/mysql84.tar.xz; \
elif [ "$TARGETARCH" = "arm64" ]; then \
wget -q https://dev.mysql.com/get/Downloads/MySQL-8.4/mysql-8.4.3-linux-glibc2.17-aarch64-minimal.tar.xz -O /tmp/mysql84.tar.xz; \
fi && \
tar -xJf /tmp/mysql84.tar.xz -C /tmp && \
cp /tmp/mysql-8.4.*/bin/mysql /usr/local/mysql-8.4/bin/ && \
cp /tmp/mysql-8.4.*/bin/mysqldump /usr/local/mysql-8.4/bin/ && \
rm -rf /tmp/mysql-8.4.* /tmp/mysql84.tar.xz && \
# Make MySQL binaries executable (ignore errors for empty dirs on ARM64)
chmod +x /usr/local/mysql-*/bin/* 2>/dev/null || true && \
# Cleanup
rm -rf /var/lib/apt/lists/*
# Create postgres user and set up directories

View File

@@ -36,25 +36,25 @@
## ✨ Features
### 🔄 **Scheduled Backups**
### 🔄 **Scheduled backups**
- **Flexible scheduling**: hourly, daily, weekly, monthly
- **Flexible scheduling**: hourly, daily, weekly, monthly or cron
- **Precise timing**: run backups at specific times (e.g., 4 AM during low traffic)
- **Smart compression**: 4-8x space savings with balanced compression (~20% overhead)
### 🗄️ **Multiple Storage Destinations** <a href="https://postgresus.com/storages">(view supported)</a>
### 🗄️ **Multiple storage destinations** <a href="https://postgresus.com/storages">(view supported)</a>
- **Local storage**: Keep backups on your VPS/server
- **Cloud storage**: S3, Cloudflare R2, Google Drive, NAS, Dropbox and more
- **Cloud storage**: S3, Cloudflare R2, Google Drive, NAS, Dropbox, SFTP, Rclone and more
- **Secure**: All data stays under your control
### 📱 **Smart Notifications** <a href="https://postgresus.com/notifiers">(view supported)</a>
### 📱 **Smart notifications** <a href="https://postgresus.com/notifiers">(view supported)</a>
- **Multiple channels**: Email, Telegram, Slack, Discord, webhooks
- **Real-time updates**: Success and failure notifications
- **Team integration**: Perfect for DevOps workflows
### 🐘 **PostgreSQL Support**
### 🐘 **PostgreSQL support**
- **Multiple versions**: PostgreSQL 12, 13, 14, 15, 16, 17 and 18
- **SSL support**: Secure connections available
@@ -67,7 +67,7 @@
- **Encryption for secrets**: Any sensitive data is encrypted and never exposed, even in logs or error messages
- **Read-only user**: Postgresus uses by default a read-only user for backups and never stores anything that can change your data
### 👥 **Suitable for Teams** <a href="https://postgresus.com/access-management">(docs)</a>
### 👥 **Suitable for teams** <a href="https://postgresus.com/access-management">(docs)</a>
- **Workspaces**: Group databases, notifiers and storages for different projects or teams
- **Access management**: Control who can view or manage specific databases with role-based permissions
@@ -80,7 +80,7 @@
- **Dark & light themes**: Choose the look that suits your workflow
- **Mobile adaptive**: Check your backups from anywhere on any device
### ☁️ **Works with Self-Hosted & Cloud Databases**
### ☁️ **Works with self-hosted & cloud databases**
Postgresus works seamlessly with both self-hosted PostgreSQL and cloud-managed databases:
@@ -89,7 +89,7 @@ Postgresus works seamlessly with both self-hosted PostgreSQL and cloud-managed d
- **Why no PITR?**: Cloud providers already offer native PITR, and external PITR backups cannot be restored to managed cloud databases — making them impractical for cloud-hosted PostgreSQL
- **Practical granularity**: Hourly and daily backups are sufficient for 99% of projects without the operational complexity of WAL archiving
### 🐳 **Self-Hosted & Secure**
### 🐳 **Self-hosted & secure**
- **Docker-based**: Easy deployment and management
- **Privacy-first**: All your data stays on your infrastructure
@@ -111,7 +111,7 @@ You have several ways to install Postgresus:
You have three ways to install Postgresus: automated script (recommended), simple Docker run, or Docker Compose setup.
### Option 1: Automated Installation Script (Recommended, Linux only)
### Option 1: Automated installation script (recommended, Linux only)
The installation script will:
@@ -125,7 +125,7 @@ sudo curl -sSL https://raw.githubusercontent.com/RostislavDugin/postgresus/refs/
| sudo bash
```
### Option 2: Simple Docker Run
### Option 2: Simple Docker run
The easiest way to run Postgresus with embedded PostgreSQL:
@@ -144,7 +144,7 @@ This single command will:
- ✅ Store all data in `./postgresus-data` directory
- ✅ Automatically restart on system reboot
### Option 3: Docker Compose Setup
### Option 3: Docker Compose setup
Create a `docker-compose.yml` file with the following configuration:
@@ -212,13 +212,13 @@ For more options (NodePort, TLS, HTTPRoute for Gateway API), see the [Helm chart
1. **Access the dashboard**: Navigate to `http://localhost:4005`
2. **Add first DB for backup**: Click "New Database" and follow the setup wizard
3. **Configure schedule**: Choose from hourly, daily, weekly or monthly intervals
3. **Configure schedule**: Choose from hourly, daily, weekly, monthly or cron intervals
4. **Set database connection**: Enter your PostgreSQL credentials and connection details
5. **Choose storage**: Select where to store your backups (local, S3, Google Drive, etc.)
6. **Add notifications** (optional): Configure email, Telegram, Slack, or webhook notifications
7. **Save and start**: Postgresus will validate settings and begin the backup schedule
### 🔑 Resetting Password <a href="https://postgresus.com/password">(docs)</a>
### 🔑 Resetting password <a href="https://postgresus.com/password">(docs)</a>
If you need to reset the password, you can use the built-in password reset command:

View File

@@ -9,4 +9,4 @@ When applying changes, do not forget to refactor old code.
You can shortify, make more readable, improve code quality, etc.
Common logic can be extracted to functions, constants, files, etc.
After each large change with more than ~50-100 lines of code - always run `make lint` (from backend root folder).
After each large change with more than ~50-100 lines of code - always run `make lint` (from backend root folder) and, if you change frontend, run `npm run format` (from frontend root folder).

View File

@@ -41,4 +41,6 @@ TEST_SUPABASE_USERNAME=
TEST_SUPABASE_PASSWORD=
TEST_SUPABASE_DATABASE=
# FTP
TEST_FTP_PORT=7007
TEST_FTP_PORT=7007
# SFTP
TEST_SFTP_PORT=7008

1
backend/.gitignore vendored
View File

@@ -3,6 +3,7 @@ main
docker-compose.yml
pgdata
pgdata_test/
mysqldata/
main.exe
swagger/
swagger/*

View File

@@ -1,7 +1,7 @@
version: "2"
run:
timeout: 1m
timeout: 5m
tests: false
concurrency: 4

View File

@@ -31,14 +31,6 @@ services:
container_name: test-minio
command: server /data --console-address ":9001"
# Test Azurite container
test-azurite:
image: mcr.microsoft.com/azure-storage/azurite
ports:
- "${TEST_AZURITE_BLOB_PORT:-10000}:10000"
container_name: test-azurite
command: azurite-blob --blobHost 0.0.0.0
# Test PostgreSQL containers
test-postgres-12:
image: postgres:12
@@ -117,6 +109,14 @@ services:
container_name: test-postgres-18
shm_size: 1gb
# Test Azurite container
test-azurite:
image: mcr.microsoft.com/azure-storage/azurite
ports:
- "${TEST_AZURITE_BLOB_PORT:-10000}:10000"
container_name: test-azurite
command: azurite-blob --blobHost 0.0.0.0
# Test NAS server (Samba)
test-nas:
image: dperson/samba:latest
@@ -146,3 +146,69 @@ services:
- FTP_USER_HOME=/home/ftpusers/testuser
- FTP_PASSIVE_PORTS=30000:30009
container_name: test-ftp
# Test SFTP server
test-sftp:
image: atmoz/sftp:latest
ports:
- "${TEST_SFTP_PORT:-7008}:22"
command: testuser:testpassword:1001::upload
container_name: test-sftp
# Test MySQL containers
test-mysql-57:
image: mysql:5.7
ports:
- "${TEST_MYSQL_57_PORT:-33057}:3306"
environment:
- MYSQL_ROOT_PASSWORD=rootpassword
- MYSQL_DATABASE=testdb
- MYSQL_USER=testuser
- MYSQL_PASSWORD=testpassword
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
volumes:
- ./mysqldata/mysql-57:/var/lib/mysql
container_name: test-mysql-57
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-prootpassword"]
interval: 5s
timeout: 5s
retries: 10
test-mysql-80:
image: mysql:8.0
ports:
- "${TEST_MYSQL_80_PORT:-33080}:3306"
environment:
- MYSQL_ROOT_PASSWORD=rootpassword
- MYSQL_DATABASE=testdb
- MYSQL_USER=testuser
- MYSQL_PASSWORD=testpassword
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci --default-authentication-plugin=mysql_native_password
volumes:
- ./mysqldata/mysql-80:/var/lib/mysql
container_name: test-mysql-80
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-prootpassword"]
interval: 5s
timeout: 5s
retries: 10
test-mysql-84:
image: mysql:8.4
ports:
- "${TEST_MYSQL_84_PORT:-33084}:3306"
environment:
- MYSQL_ROOT_PASSWORD=rootpassword
- MYSQL_DATABASE=testdb
- MYSQL_USER=testuser
- MYSQL_PASSWORD=testpassword
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
volumes:
- ./mysqldata/mysql-84:/var/lib/mysql
container_name: test-mysql-84
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-prootpassword"]
interval: 5s
timeout: 5s
retries: 10

View File

@@ -1,6 +1,6 @@
module postgresus-backend
go 1.23.3
go 1.24.4
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
@@ -12,40 +12,197 @@ require (
github.com/google/uuid v1.6.0
github.com/ilyakaznacheev/cleanenv v1.5.0
github.com/jackc/pgx/v5 v5.7.5
github.com/jlaffaye/ftp v0.2.0
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3
github.com/jmoiron/sqlx v1.4.0
github.com/joho/godotenv v1.5.1
github.com/lib/pq v1.10.9
github.com/minio/minio-go/v7 v7.0.92
github.com/shirou/gopsutil/v4 v4.25.5
github.com/minio/minio-go/v7 v7.0.97
github.com/pkg/sftp v1.13.10
github.com/rclone/rclone v1.72.1
github.com/robfig/cron/v3 v3.0.1
github.com/shirou/gopsutil/v4 v4.25.10
github.com/stretchr/testify v1.11.1
github.com/swaggo/files v1.0.1
github.com/swaggo/gin-swagger v1.6.0
github.com/swaggo/swag v1.16.4
golang.org/x/crypto v0.41.0
golang.org/x/time v0.12.0
golang.org/x/crypto v0.46.0
golang.org/x/time v0.14.0
gorm.io/driver/postgres v1.5.11
gorm.io/gorm v1.26.1
)
require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.3 // indirect
github.com/Azure/go-ntlmssp v0.0.2-0.20251110135918-10b7b7e7cd26 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
github.com/Files-com/files-sdk-go/v3 v3.2.264 // indirect
github.com/IBM/go-sdk-core/v5 v5.21.0 // indirect
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
github.com/ProtonMail/go-crypto v1.3.0 // indirect
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
github.com/ProtonMail/go-srp v0.0.7 // indirect
github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect
github.com/PuerkitoBio/goquery v1.10.3 // indirect
github.com/a1ex3/zstd-seekable-format-go/pkg v0.10.0 // indirect
github.com/abbot/go-http-auth v0.4.0 // indirect
github.com/anchore/go-lzo v0.1.0 // indirect
github.com/andybalholm/cascadia v1.3.3 // indirect
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
github.com/aws/aws-sdk-go-v2 v1.39.6 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 // indirect
github.com/aws/aws-sdk-go-v2/config v1.31.17 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.18.21 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 // indirect
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 // indirect
github.com/aws/aws-sdk-go-v2/service/s3 v1.90.0 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 // indirect
github.com/aws/smithy-go v1.23.2 // indirect
github.com/bahlo/generic-list-go v0.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/boombuler/barcode v1.1.0 // indirect
github.com/bradenaw/juniper v0.15.3 // indirect
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
github.com/buengese/sgzip v0.1.1 // indirect
github.com/buger/jsonparser v1.1.1 // indirect
github.com/calebcase/tmpfile v1.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 // indirect
github.com/clipperhouse/stringish v0.1.1 // indirect
github.com/clipperhouse/uax29/v2 v2.3.0 // indirect
github.com/cloudflare/circl v1.6.1 // indirect
github.com/cloudinary/cloudinary-go/v2 v2.13.0 // indirect
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc // indirect
github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect
github.com/colinmarc/hdfs/v2 v2.4.0 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.6.0 // indirect
github.com/creasty/defaults v1.8.0 // indirect
github.com/cronokirby/saferith v0.33.0 // indirect
github.com/diskfs/go-diskfs v1.7.0 // indirect
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 // indirect
github.com/emersion/go-message v0.18.2 // indirect
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/go-chi/chi/v5 v5.2.3 // indirect
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 // indirect
github.com/go-git/go-billy/v5 v5.6.2 // indirect
github.com/go-openapi/errors v0.22.4 // indirect
github.com/go-openapi/strfmt v0.25.0 // indirect
github.com/go-resty/resty/v2 v2.16.5 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/gofrs/flock v0.13.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/gorilla/schema v1.4.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/henrybear327/Proton-API-Bridge v1.0.0 // indirect
github.com/henrybear327/go-proton-api v1.0.0 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
github.com/jcmturner/gofork v1.7.6 // indirect
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 // indirect
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect
github.com/klauspost/crc32 v1.3.0 // indirect
github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 // indirect
github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/lanrat/extsort v1.4.2 // indirect
github.com/lpar/date v1.0.0 // indirect
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-runewidth v0.0.19 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/ncw/swift/v2 v2.0.5 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/oracle/oci-go-sdk/v65 v65.104.0 // indirect
github.com/panjf2000/ants/v2 v2.11.3 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
github.com/peterh/liner v1.2.2 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pkg/xattr v0.4.12 // indirect
github.com/pquerna/otp v1.5.0 // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.67.2 // indirect
github.com/prometheus/procfs v0.19.2 // indirect
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 // indirect
github.com/relvacode/iso8601 v1.7.0 // indirect
github.com/rfjakob/eme v1.1.2 // indirect
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
github.com/samber/lo v1.52.0 // indirect
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/sony/gobreaker v1.0.0 // indirect
github.com/spacemonkeygo/monkit/v3 v3.0.25-0.20251022131615-eb24eb109368 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/t3rm1n4l/go-mega v0.0.0-20251031123324-a804aaa87491 // indirect
github.com/tklauser/go-sysconf v0.3.15 // indirect
github.com/tklauser/numcpus v0.10.0 // indirect
github.com/ulikunitz/xz v0.5.15 // indirect
github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
github.com/yunify/qingstor-sdk-go/v3 v3.2.0 // indirect
github.com/zeebo/blake3 v0.2.4 // indirect
github.com/zeebo/errs v1.4.0 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
go.etcd.io/bbolt v1.4.3 // indirect
go.mongodb.org/mongo-driver v1.17.6 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
golang.org/x/term v0.38.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/validator.v2 v2.0.1 // indirect
moul.io/http2curl/v2 v2.3.0 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
storj.io/common v0.0.0-20251107171817-6221ae45072c // indirect
storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect
storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect
storj.io/infectious v0.0.2 // indirect
storj.io/picobuf v0.0.4 // indirect
storj.io/uplink v1.13.1 // indirect
)
require (
cloud.google.com/go/auth v0.16.2 // indirect
cloud.google.com/go/auth v0.17.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.7.0 // indirect
github.com/geoffgarside/ber v1.1.0 // indirect
cloud.google.com/go/compute/metadata v0.9.0 // indirect
github.com/geoffgarside/ber v1.2.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
github.com/hirochachacha/go-smb2 v1.1.0
google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/grpc v1.73.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect
google.golang.org/grpc v1.76.0 // indirect
)
require (
@@ -56,11 +213,11 @@ require (
github.com/bytedance/sonic v1.13.2 // indirect
github.com/bytedance/sonic/loader v0.2.4 // indirect
github.com/cloudwego/base64x v0.1.5 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/ebitengine/purego v0.8.4 // indirect
github.com/ebitengine/purego v0.9.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
github.com/gabriel-vasile/mimetype v1.4.11 // indirect
github.com/gin-contrib/sse v1.1.0 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
@@ -72,8 +229,8 @@ require (
github.com/go-openapi/swag v0.19.15 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.26.0 // indirect
github.com/go-sql-driver/mysql v1.9.2 // indirect
github.com/go-playground/validator/v10 v10.28.0 // indirect
github.com/go-sql-driver/mysql v1.9.2
github.com/goccy/go-json v0.10.5 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
@@ -82,40 +239,39 @@ require (
github.com/jinzhu/now v1.1.5 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/klauspost/compress v1.18.1
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mailru/easyjson v0.9.1 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/minio/crc64nvme v1.0.1 // indirect
github.com/minio/crc64nvme v1.1.1 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/philhofer/fwd v1.2.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/rs/xid v1.6.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/tinylib/msgp v1.3.0 // indirect
github.com/tinylib/msgp v1.5.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel v1.36.0 // indirect
go.opentelemetry.io/otel/metric v1.36.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
go.opentelemetry.io/otel v1.38.0 // indirect
go.opentelemetry.io/otel/metric v1.38.0 // indirect
go.opentelemetry.io/otel/trace v1.38.0 // indirect
golang.org/x/arch v0.17.0 // indirect
golang.org/x/net v0.43.0 // indirect
golang.org/x/oauth2 v0.30.0
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.35.0 // indirect
golang.org/x/text v0.28.0 // indirect
golang.org/x/tools v0.35.0 // indirect
google.golang.org/api v0.239.0
google.golang.org/protobuf v1.36.6 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/oauth2 v0.33.0
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/text v0.32.0 // indirect
golang.org/x/tools v0.39.0 // indirect
google.golang.org/api v0.255.0
google.golang.org/protobuf v1.36.10 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
olympos.io/encoding/edn v0.0.0-20201019073823-d3554ca0b0a3 // indirect

File diff suppressed because it is too large Load Diff

View File

@@ -25,6 +25,7 @@ type EnvVariables struct {
DatabaseDsn string `env:"DATABASE_DSN" required:"true"`
EnvMode env_utils.EnvMode `env:"ENV_MODE" required:"true"`
PostgresesInstallDir string `env:"POSTGRES_INSTALL_DIR"`
MysqlInstallDir string `env:"MYSQL_INSTALL_DIR"`
DataFolder string
TempFolder string
@@ -47,8 +48,13 @@ type EnvVariables struct {
TestAzuriteBlobPort string `env:"TEST_AZURITE_BLOB_PORT"`
TestNASPort string `env:"TEST_NAS_PORT"`
TestFTPPort string `env:"TEST_FTP_PORT"`
TestNASPort string `env:"TEST_NAS_PORT"`
TestFTPPort string `env:"TEST_FTP_PORT"`
TestSFTPPort string `env:"TEST_SFTP_PORT"`
TestMysql57Port string `env:"TEST_MYSQL_57_PORT"`
TestMysql80Port string `env:"TEST_MYSQL_80_PORT"`
TestMysql84Port string `env:"TEST_MYSQL_84_PORT"`
// oauth
GitHubClientID string `env:"GITHUB_CLIENT_ID"`
@@ -151,6 +157,9 @@ func loadEnvVariables() {
env.PostgresesInstallDir = filepath.Join(backendRoot, "tools", "postgresql")
tools.VerifyPostgresesInstallation(log, env.EnvMode, env.PostgresesInstallDir)
env.MysqlInstallDir = filepath.Join(backendRoot, "tools", "mysql")
tools.VerifyMysqlInstallation(log, env.EnvMode, env.MysqlInstallDir)
// Store the data and temp folders one level below the root
// (projectRoot/postgresus-data -> /postgresus-data)
env.DataFolder = filepath.Join(filepath.Dir(backendRoot), "postgresus-data", "backups")

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"io"
"net/http"
"postgresus-backend/internal/features/databases"
users_middleware "postgresus-backend/internal/features/users/middleware"
"github.com/gin-gonic/gin"
@@ -181,7 +182,7 @@ func (c *BackupController) GetFile(ctx *gin.Context) {
return
}
fileReader, err := c.backupService.GetBackupFile(user, id)
fileReader, dbType, err := c.backupService.GetBackupFile(user, id)
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
@@ -192,10 +193,15 @@ func (c *BackupController) GetFile(ctx *gin.Context) {
}
}()
extension := ".dump.zst"
if dbType == databases.DatabaseTypeMysql {
extension = ".sql.zst"
}
ctx.Header("Content-Type", "application/octet-stream")
ctx.Header(
"Content-Disposition",
fmt.Sprintf("attachment; filename=\"backup_%s.dump\"", id.String()),
fmt.Sprintf("attachment; filename=\"backup_%s%s\"", id.String(), extension),
)
_, err = io.Copy(ctx.Writer, fileReader)

View File

@@ -3,7 +3,7 @@ package backups
import (
"context"
usecases_postgresql "postgresus-backend/internal/features/backups/backups/usecases/postgresql"
usecases_common "postgresus-backend/internal/features/backups/backups/usecases/common"
backups_config "postgresus-backend/internal/features/backups/config"
"postgresus-backend/internal/features/databases"
"postgresus-backend/internal/features/notifiers"
@@ -27,10 +27,8 @@ type CreateBackupUsecase interface {
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
backupProgressListener func(
completedMBs float64,
),
) (*usecases_postgresql.BackupMetadata, error)
backupProgressListener func(completedMBs float64),
) (*usecases_common.BackupMetadata, error)
}
type BackupRemoveListener interface {

View File

@@ -502,19 +502,19 @@ func (s *BackupService) CancelBackup(
func (s *BackupService) GetBackupFile(
user *users_models.User,
backupID uuid.UUID,
) (io.ReadCloser, error) {
) (io.ReadCloser, databases.DatabaseType, error) {
backup, err := s.backupRepository.FindByID(backupID)
if err != nil {
return nil, err
return nil, "", err
}
database, err := s.databaseService.GetDatabaseByID(backup.DatabaseID)
if err != nil {
return nil, err
return nil, "", err
}
if database.WorkspaceID == nil {
return nil, errors.New("cannot download backup for database without workspace")
return nil, "", errors.New("cannot download backup for database without workspace")
}
canAccess, _, err := s.workspaceService.CanUserAccessWorkspace(
@@ -522,10 +522,10 @@ func (s *BackupService) GetBackupFile(
user,
)
if err != nil {
return nil, err
return nil, "", err
}
if !canAccess {
return nil, errors.New("insufficient permissions to download backup for this database")
return nil, "", errors.New("insufficient permissions to download backup for this database")
}
s.auditLogService.WriteAuditLog(
@@ -538,7 +538,12 @@ func (s *BackupService) GetBackupFile(
database.WorkspaceID,
)
return s.getBackupReader(backupID)
reader, err := s.getBackupReader(backupID)
if err != nil {
return nil, "", err
}
return reader, database.Type, nil
}
func (s *BackupService) deleteBackup(backup *Backup) error {

View File

@@ -7,7 +7,7 @@ import (
"testing"
"time"
usecases_postgresql "postgresus-backend/internal/features/backups/backups/usecases/postgresql"
"postgresus-backend/internal/features/backups/backups/usecases/common"
backups_config "postgresus-backend/internal/features/backups/config"
"postgresus-backend/internal/features/databases"
encryption_secrets "postgresus-backend/internal/features/encryption/secrets"
@@ -178,16 +178,13 @@ func (uc *CreateFailedBackupUsecase) Execute(
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
backupProgressListener func(
completedMBs float64,
),
) (*usecases_postgresql.BackupMetadata, error) {
backupProgressListener(10) // Assume we completed 10MB
backupProgressListener func(completedMBs float64),
) (*common.BackupMetadata, error) {
backupProgressListener(10)
return nil, errors.New("backup failed")
}
type CreateSuccessBackupUsecase struct {
}
type CreateSuccessBackupUsecase struct{}
func (uc *CreateSuccessBackupUsecase) Execute(
ctx context.Context,
@@ -195,12 +192,10 @@ func (uc *CreateSuccessBackupUsecase) Execute(
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
backupProgressListener func(
completedMBs float64,
),
) (*usecases_postgresql.BackupMetadata, error) {
backupProgressListener(10) // Assume we completed 10MB
return &usecases_postgresql.BackupMetadata{
backupProgressListener func(completedMBs float64),
) (*common.BackupMetadata, error) {
backupProgressListener(10)
return &common.BackupMetadata{
EncryptionSalt: nil,
EncryptionIV: nil,
Encryption: backups_config.BackupEncryptionNone,

View File

@@ -1,13 +1,7 @@
package usecases_postgresql
package common
import backups_config "postgresus-backend/internal/features/backups/config"
type EncryptionMetadata struct {
Salt string
IV string
Encryption backups_config.BackupEncryption
}
type BackupMetadata struct {
EncryptionSalt *string
EncryptionIV *string

View File

@@ -0,0 +1,22 @@
package common
import "io"
type CountingWriter struct {
Writer io.Writer
BytesWritten int64
}
func (cw *CountingWriter) Write(p []byte) (n int, err error) {
n, err = cw.Writer.Write(p)
cw.BytesWritten += int64(n)
return n, err
}
func (cw *CountingWriter) GetBytesWritten() int64 {
return cw.BytesWritten
}
func NewCountingWriter(writer io.Writer) *CountingWriter {
return &CountingWriter{Writer: writer}
}

View File

@@ -3,6 +3,9 @@ package usecases
import (
"context"
"errors"
usecases_common "postgresus-backend/internal/features/backups/backups/usecases/common"
usecases_mysql "postgresus-backend/internal/features/backups/backups/usecases/mysql"
usecases_postgresql "postgresus-backend/internal/features/backups/backups/usecases/postgresql"
backups_config "postgresus-backend/internal/features/backups/config"
"postgresus-backend/internal/features/databases"
@@ -13,20 +16,19 @@ import (
type CreateBackupUsecase struct {
CreatePostgresqlBackupUsecase *usecases_postgresql.CreatePostgresqlBackupUsecase
CreateMysqlBackupUsecase *usecases_mysql.CreateMysqlBackupUsecase
}
// Execute creates a backup of the database and returns the backup metadata
func (uc *CreateBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
backupProgressListener func(
completedMBs float64,
),
) (*usecases_postgresql.BackupMetadata, error) {
if database.Type == databases.DatabaseTypePostgres {
backupProgressListener func(completedMBs float64),
) (*usecases_common.BackupMetadata, error) {
switch database.Type {
case databases.DatabaseTypePostgres:
return uc.CreatePostgresqlBackupUsecase.Execute(
ctx,
backupID,
@@ -35,7 +37,18 @@ func (uc *CreateBackupUsecase) Execute(
storage,
backupProgressListener,
)
}
return nil, errors.New("database type not supported")
case databases.DatabaseTypeMysql:
return uc.CreateMysqlBackupUsecase.Execute(
ctx,
backupID,
backupConfig,
database,
storage,
backupProgressListener,
)
default:
return nil, errors.New("database type not supported")
}
}

View File

@@ -1,11 +1,13 @@
package usecases
import (
usecases_mysql "postgresus-backend/internal/features/backups/backups/usecases/mysql"
usecases_postgresql "postgresus-backend/internal/features/backups/backups/usecases/postgresql"
)
var createBackupUsecase = &CreateBackupUsecase{
usecases_postgresql.GetCreatePostgresqlBackupUsecase(),
usecases_mysql.GetCreateMysqlBackupUsecase(),
}
func GetCreateBackupUsecase() *CreateBackupUsecase {

View File

@@ -0,0 +1,608 @@
package usecases_mysql
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"log/slog"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/klauspost/compress/zstd"
"postgresus-backend/internal/config"
backup_encryption "postgresus-backend/internal/features/backups/backups/encryption"
usecases_common "postgresus-backend/internal/features/backups/backups/usecases/common"
backups_config "postgresus-backend/internal/features/backups/config"
"postgresus-backend/internal/features/databases"
mysqltypes "postgresus-backend/internal/features/databases/databases/mysql"
encryption_secrets "postgresus-backend/internal/features/encryption/secrets"
"postgresus-backend/internal/features/storages"
"postgresus-backend/internal/util/encryption"
"postgresus-backend/internal/util/tools"
)
const (
backupTimeout = 23 * time.Hour
shutdownCheckInterval = 1 * time.Second
copyBufferSize = 8 * 1024 * 1024
progressReportIntervalMB = 1.0
zstdStorageCompressionLevel = 3
exitCodeGenericError = 1
exitCodeConnectionError = 2
)
type CreateMysqlBackupUsecase struct {
logger *slog.Logger
secretKeyService *encryption_secrets.SecretKeyService
fieldEncryptor encryption.FieldEncryptor
}
type writeResult struct {
bytesWritten int
writeErr error
}
func (uc *CreateMysqlBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
db *databases.Database,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
) (*usecases_common.BackupMetadata, error) {
uc.logger.Info(
"Creating MySQL backup via mysqldump",
"databaseId", db.ID,
"storageId", storage.ID,
)
if !backupConfig.IsBackupsEnabled {
return nil, fmt.Errorf("backups are not enabled for this database: \"%s\"", db.Name)
}
my := db.Mysql
if my == nil {
return nil, fmt.Errorf("mysql database configuration is required")
}
if my.Database == nil || *my.Database == "" {
return nil, fmt.Errorf("database name is required for mysqldump backups")
}
decryptedPassword, err := uc.fieldEncryptor.Decrypt(db.ID, my.Password)
if err != nil {
return nil, fmt.Errorf("failed to decrypt database password: %w", err)
}
args := uc.buildMysqldumpArgs(my)
return uc.streamToStorage(
ctx,
backupID,
backupConfig,
tools.GetMysqlExecutable(
my.Version,
tools.MysqlExecutableMysqldump,
config.GetEnv().EnvMode,
config.GetEnv().MysqlInstallDir,
),
args,
decryptedPassword,
storage,
backupProgressListener,
my,
)
}
func (uc *CreateMysqlBackupUsecase) buildMysqldumpArgs(my *mysqltypes.MysqlDatabase) []string {
args := []string{
"--host=" + my.Host,
"--port=" + strconv.Itoa(my.Port),
"--user=" + my.Username,
"--single-transaction",
"--routines",
"--triggers",
"--events",
"--set-gtid-purged=OFF",
"--quick",
"--verbose",
}
args = append(args, uc.getNetworkCompressionArgs(my.Version)...)
if my.IsHttps {
args = append(args, "--ssl-mode=REQUIRED")
}
if my.Database != nil && *my.Database != "" {
args = append(args, *my.Database)
}
return args
}
func (uc *CreateMysqlBackupUsecase) getNetworkCompressionArgs(version tools.MysqlVersion) []string {
const zstdCompressionLevel = 3
switch version {
case tools.MysqlVersion80, tools.MysqlVersion84:
return []string{
"--compression-algorithms=zstd",
fmt.Sprintf("--zstd-compression-level=%d", zstdCompressionLevel),
}
case tools.MysqlVersion57:
return []string{"--compress"}
default:
return []string{"--compress"}
}
}
func (uc *CreateMysqlBackupUsecase) streamToStorage(
parentCtx context.Context,
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
mysqlBin string,
args []string,
password string,
storage *storages.Storage,
backupProgressListener func(completedMBs float64),
myConfig *mysqltypes.MysqlDatabase,
) (*usecases_common.BackupMetadata, error) {
uc.logger.Info("Streaming MySQL backup to storage", "mysqlBin", mysqlBin)
ctx, cancel := uc.createBackupContext(parentCtx)
defer cancel()
myCnfFile, err := uc.createTempMyCnfFile(myConfig, password)
if err != nil {
return nil, fmt.Errorf("failed to create .my.cnf: %w", err)
}
defer func() { _ = os.RemoveAll(filepath.Dir(myCnfFile)) }()
fullArgs := append([]string{"--defaults-file=" + myCnfFile}, args...)
cmd := exec.CommandContext(ctx, mysqlBin, fullArgs...)
uc.logger.Info("Executing MySQL backup command", "command", cmd.String())
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env,
"MYSQL_PWD=",
"LC_ALL=C.UTF-8",
"LANG=C.UTF-8",
)
pgStdout, err := cmd.StdoutPipe()
if err != nil {
return nil, fmt.Errorf("stdout pipe: %w", err)
}
pgStderr, err := cmd.StderrPipe()
if err != nil {
return nil, fmt.Errorf("stderr pipe: %w", err)
}
stderrCh := make(chan []byte, 1)
go func() {
stderrOutput, _ := io.ReadAll(pgStderr)
stderrCh <- stderrOutput
}()
storageReader, storageWriter := io.Pipe()
finalWriter, encryptionWriter, backupMetadata, err := uc.setupBackupEncryption(
backupID,
backupConfig,
storageWriter,
)
if err != nil {
return nil, err
}
zstdWriter, err := zstd.NewWriter(finalWriter,
zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(zstdStorageCompressionLevel)))
if err != nil {
return nil, fmt.Errorf("failed to create zstd writer: %w", err)
}
countingWriter := usecases_common.NewCountingWriter(zstdWriter)
saveErrCh := make(chan error, 1)
go func() {
saveErr := storage.SaveFile(ctx, uc.fieldEncryptor, uc.logger, backupID, storageReader)
saveErrCh <- saveErr
}()
if err = cmd.Start(); err != nil {
return nil, fmt.Errorf("start %s: %w", filepath.Base(mysqlBin), err)
}
copyResultCh := make(chan error, 1)
bytesWrittenCh := make(chan int64, 1)
go func() {
bytesWritten, err := uc.copyWithShutdownCheck(
ctx,
countingWriter,
pgStdout,
backupProgressListener,
)
bytesWrittenCh <- bytesWritten
copyResultCh <- err
}()
copyErr := <-copyResultCh
bytesWritten := <-bytesWrittenCh
waitErr := cmd.Wait()
select {
case <-ctx.Done():
uc.cleanupOnCancellation(zstdWriter, encryptionWriter, storageWriter, saveErrCh)
return nil, uc.checkCancellationReason()
default:
}
if err := zstdWriter.Close(); err != nil {
uc.logger.Error("Failed to close zstd writer", "error", err)
}
if err := uc.closeWriters(encryptionWriter, storageWriter); err != nil {
<-saveErrCh
return nil, err
}
saveErr := <-saveErrCh
stderrOutput := <-stderrCh
if waitErr == nil && copyErr == nil && saveErr == nil && backupProgressListener != nil {
sizeMB := float64(bytesWritten) / (1024 * 1024)
backupProgressListener(sizeMB)
}
switch {
case waitErr != nil:
return nil, uc.buildMysqldumpErrorMessage(waitErr, stderrOutput, mysqlBin)
case copyErr != nil:
return nil, fmt.Errorf("copy to storage: %w", copyErr)
case saveErr != nil:
return nil, fmt.Errorf("save to storage: %w", saveErr)
}
return &backupMetadata, nil
}
func (uc *CreateMysqlBackupUsecase) createTempMyCnfFile(
myConfig *mysqltypes.MysqlDatabase,
password string,
) (string, error) {
tempDir, err := os.MkdirTemp("", "mycnf")
if err != nil {
return "", fmt.Errorf("failed to create temp directory: %w", err)
}
myCnfFile := filepath.Join(tempDir, ".my.cnf")
content := fmt.Sprintf(`[client]
user=%s
password="%s"
host=%s
port=%d
`, myConfig.Username, tools.EscapeMysqlPassword(password), myConfig.Host, myConfig.Port)
if myConfig.IsHttps {
content += "ssl-mode=REQUIRED\n"
}
err = os.WriteFile(myCnfFile, []byte(content), 0600)
if err != nil {
return "", fmt.Errorf("failed to write .my.cnf: %w", err)
}
return myCnfFile, nil
}
func (uc *CreateMysqlBackupUsecase) copyWithShutdownCheck(
ctx context.Context,
dst io.Writer,
src io.Reader,
backupProgressListener func(completedMBs float64),
) (int64, error) {
buf := make([]byte, copyBufferSize)
var totalBytesWritten int64
var lastReportedMB float64
for {
select {
case <-ctx.Done():
return totalBytesWritten, fmt.Errorf("copy cancelled: %w", ctx.Err())
default:
}
if config.IsShouldShutdown() {
return totalBytesWritten, fmt.Errorf("copy cancelled due to shutdown")
}
bytesRead, readErr := src.Read(buf)
if bytesRead > 0 {
writeResultCh := make(chan writeResult, 1)
go func() {
bytesWritten, writeErr := dst.Write(buf[0:bytesRead])
writeResultCh <- writeResult{bytesWritten, writeErr}
}()
var bytesWritten int
var writeErr error
select {
case <-ctx.Done():
return totalBytesWritten, fmt.Errorf("copy cancelled during write: %w", ctx.Err())
case result := <-writeResultCh:
bytesWritten = result.bytesWritten
writeErr = result.writeErr
}
if bytesWritten < 0 || bytesRead < bytesWritten {
bytesWritten = 0
if writeErr == nil {
writeErr = fmt.Errorf("invalid write result")
}
}
if writeErr != nil {
return totalBytesWritten, writeErr
}
if bytesRead != bytesWritten {
return totalBytesWritten, io.ErrShortWrite
}
totalBytesWritten += int64(bytesWritten)
if backupProgressListener != nil {
currentSizeMB := float64(totalBytesWritten) / (1024 * 1024)
if currentSizeMB >= lastReportedMB+progressReportIntervalMB {
backupProgressListener(currentSizeMB)
lastReportedMB = currentSizeMB
}
}
}
if readErr != nil {
if readErr != io.EOF {
return totalBytesWritten, readErr
}
break
}
}
return totalBytesWritten, nil
}
func (uc *CreateMysqlBackupUsecase) createBackupContext(
parentCtx context.Context,
) (context.Context, context.CancelFunc) {
ctx, cancel := context.WithTimeout(parentCtx, backupTimeout)
go func() {
ticker := time.NewTicker(shutdownCheckInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-parentCtx.Done():
cancel()
return
case <-ticker.C:
if config.IsShouldShutdown() {
cancel()
return
}
}
}
}()
return ctx, cancel
}
func (uc *CreateMysqlBackupUsecase) setupBackupEncryption(
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
storageWriter io.WriteCloser,
) (io.Writer, *backup_encryption.EncryptionWriter, usecases_common.BackupMetadata, error) {
metadata := usecases_common.BackupMetadata{}
if backupConfig.Encryption != backups_config.BackupEncryptionEncrypted {
metadata.Encryption = backups_config.BackupEncryptionNone
uc.logger.Info("Encryption disabled for backup", "backupId", backupID)
return storageWriter, nil, metadata, nil
}
salt, err := backup_encryption.GenerateSalt()
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to generate salt: %w", err)
}
nonce, err := backup_encryption.GenerateNonce()
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to generate nonce: %w", err)
}
masterKey, err := uc.secretKeyService.GetSecretKey()
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to get master key: %w", err)
}
encWriter, err := backup_encryption.NewEncryptionWriter(
storageWriter,
masterKey,
backupID,
salt,
nonce,
)
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to create encrypting writer: %w", err)
}
saltBase64 := base64.StdEncoding.EncodeToString(salt)
nonceBase64 := base64.StdEncoding.EncodeToString(nonce)
metadata.EncryptionSalt = &saltBase64
metadata.EncryptionIV = &nonceBase64
metadata.Encryption = backups_config.BackupEncryptionEncrypted
uc.logger.Info("Encryption enabled for backup", "backupId", backupID)
return encWriter, encWriter, metadata, nil
}
func (uc *CreateMysqlBackupUsecase) cleanupOnCancellation(
zstdWriter *zstd.Encoder,
encryptionWriter *backup_encryption.EncryptionWriter,
storageWriter io.WriteCloser,
saveErrCh chan error,
) {
if zstdWriter != nil {
go func() {
if closeErr := zstdWriter.Close(); closeErr != nil {
uc.logger.Error(
"Failed to close zstd writer during cancellation",
"error",
closeErr,
)
}
}()
}
if encryptionWriter != nil {
go func() {
if closeErr := encryptionWriter.Close(); closeErr != nil {
uc.logger.Error(
"Failed to close encrypting writer during cancellation",
"error",
closeErr,
)
}
}()
}
if err := storageWriter.Close(); err != nil {
uc.logger.Error("Failed to close pipe writer during cancellation", "error", err)
}
<-saveErrCh
}
func (uc *CreateMysqlBackupUsecase) closeWriters(
encryptionWriter *backup_encryption.EncryptionWriter,
storageWriter io.WriteCloser,
) error {
encryptionCloseErrCh := make(chan error, 1)
if encryptionWriter != nil {
go func() {
closeErr := encryptionWriter.Close()
if closeErr != nil {
uc.logger.Error("Failed to close encrypting writer", "error", closeErr)
}
encryptionCloseErrCh <- closeErr
}()
} else {
encryptionCloseErrCh <- nil
}
encryptionCloseErr := <-encryptionCloseErrCh
if encryptionCloseErr != nil {
if err := storageWriter.Close(); err != nil {
uc.logger.Error("Failed to close pipe writer after encryption error", "error", err)
}
return fmt.Errorf("failed to close encryption writer: %w", encryptionCloseErr)
}
if err := storageWriter.Close(); err != nil {
uc.logger.Error("Failed to close pipe writer", "error", err)
return err
}
return nil
}
func (uc *CreateMysqlBackupUsecase) checkCancellationReason() error {
if config.IsShouldShutdown() {
return fmt.Errorf("backup cancelled due to shutdown")
}
return fmt.Errorf("backup cancelled")
}
func (uc *CreateMysqlBackupUsecase) buildMysqldumpErrorMessage(
waitErr error,
stderrOutput []byte,
mysqlBin string,
) error {
stderrStr := string(stderrOutput)
errorMsg := fmt.Sprintf(
"%s failed: %v stderr: %s",
filepath.Base(mysqlBin),
waitErr,
stderrStr,
)
exitErr, ok := waitErr.(*exec.ExitError)
if !ok {
return errors.New(errorMsg)
}
exitCode := exitErr.ExitCode()
if exitCode == exitCodeGenericError || exitCode == exitCodeConnectionError {
return uc.handleConnectionErrors(stderrStr)
}
return errors.New(errorMsg)
}
func (uc *CreateMysqlBackupUsecase) handleConnectionErrors(stderrStr string) error {
if containsIgnoreCase(stderrStr, "access denied") {
return fmt.Errorf(
"MySQL access denied. Check username and password. stderr: %s",
stderrStr,
)
}
if containsIgnoreCase(stderrStr, "can't connect") ||
containsIgnoreCase(stderrStr, "connection refused") {
return fmt.Errorf(
"MySQL connection refused. Check if the server is running and accessible. stderr: %s",
stderrStr,
)
}
if containsIgnoreCase(stderrStr, "unknown database") {
return fmt.Errorf(
"MySQL database does not exist. stderr: %s",
stderrStr,
)
}
if containsIgnoreCase(stderrStr, "ssl") {
return fmt.Errorf(
"MySQL SSL connection failed. stderr: %s",
stderrStr,
)
}
if containsIgnoreCase(stderrStr, "timeout") {
return fmt.Errorf(
"MySQL connection timeout. stderr: %s",
stderrStr,
)
}
return fmt.Errorf("MySQL connection or authentication error. stderr: %s", stderrStr)
}
func containsIgnoreCase(str, substr string) bool {
return strings.Contains(strings.ToLower(str), strings.ToLower(substr))
}

View File

@@ -0,0 +1,17 @@
package usecases_mysql
import (
"postgresus-backend/internal/features/encryption/secrets"
"postgresus-backend/internal/util/encryption"
"postgresus-backend/internal/util/logger"
)
var createMysqlBackupUsecase = &CreateMysqlBackupUsecase{
logger.GetLogger(),
secrets.GetSecretKeyService(),
encryption.GetFieldEncryptor(),
}
func GetCreateMysqlBackupUsecase() *CreateMysqlBackupUsecase {
return createMysqlBackupUsecase
}

View File

@@ -16,6 +16,7 @@ import (
"postgresus-backend/internal/config"
backup_encryption "postgresus-backend/internal/features/backups/backups/encryption"
usecases_common "postgresus-backend/internal/features/backups/backups/usecases/common"
backups_config "postgresus-backend/internal/features/backups/config"
"postgresus-backend/internal/features/databases"
pgtypes "postgresus-backend/internal/features/databases/databases/postgresql"
@@ -50,7 +51,6 @@ type writeResult struct {
writeErr error
}
// Execute creates a backup of the database
func (uc *CreatePostgresqlBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
@@ -60,7 +60,7 @@ func (uc *CreatePostgresqlBackupUsecase) Execute(
backupProgressListener func(
completedMBs float64,
),
) (*BackupMetadata, error) {
) (*usecases_common.BackupMetadata, error) {
uc.logger.Info(
"Creating PostgreSQL backup via pg_dump custom format",
"databaseId",
@@ -119,7 +119,7 @@ func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
storage *storages.Storage,
db *databases.Database,
backupProgressListener func(completedMBs float64),
) (*BackupMetadata, error) {
) (*usecases_common.BackupMetadata, error) {
uc.logger.Info("Streaming PostgreSQL backup to storage", "pgBin", pgBin, "args", args)
ctx, cancel := uc.createBackupContext(parentCtx)
@@ -131,7 +131,8 @@ func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
}
defer func() {
if pgpassFile != "" {
_ = os.Remove(pgpassFile)
// Remove the entire temp directory (which contains the .pgpass file)
_ = os.RemoveAll(filepath.Dir(pgpassFile))
}
}()
@@ -170,7 +171,7 @@ func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
return nil, err
}
countingWriter := &CountingWriter{writer: finalWriter}
countingWriter := usecases_common.NewCountingWriter(finalWriter)
// The backup ID becomes the object key / filename in storage
@@ -470,8 +471,8 @@ func (uc *CreatePostgresqlBackupUsecase) setupBackupEncryption(
backupID uuid.UUID,
backupConfig *backups_config.BackupConfig,
storageWriter io.WriteCloser,
) (io.Writer, *backup_encryption.EncryptionWriter, BackupMetadata, error) {
metadata := BackupMetadata{}
) (io.Writer, *backup_encryption.EncryptionWriter, usecases_common.BackupMetadata, error) {
metadata := usecases_common.BackupMetadata{}
if backupConfig.Encryption != backups_config.BackupEncryptionEncrypted {
metadata.Encryption = backups_config.BackupEncryptionNone

View File

@@ -1,20 +0,0 @@
package usecases_postgresql
import "io"
// CountingWriter wraps an io.Writer and counts the bytes written to it
type CountingWriter struct {
writer io.Writer
bytesWritten int64
}
func (cw *CountingWriter) Write(p []byte) (n int, err error) {
n, err = cw.writer.Write(p)
cw.bytesWritten += int64(n)
return n, err
}
// GetBytesWritten returns the total number of bytes written
func (cw *CountingWriter) GetBytesWritten() int64 {
return cw.bytesWritten
}

View File

@@ -0,0 +1,375 @@
package mysql
import (
"context"
"database/sql"
"errors"
"fmt"
"log/slog"
"regexp"
"time"
"postgresus-backend/internal/util/encryption"
"postgresus-backend/internal/util/tools"
_ "github.com/go-sql-driver/mysql"
"github.com/google/uuid"
)
type MysqlDatabase struct {
ID uuid.UUID `json:"id" gorm:"primaryKey;type:uuid;default:gen_random_uuid()"`
DatabaseID *uuid.UUID `json:"databaseId" gorm:"type:uuid;column:database_id"`
Version tools.MysqlVersion `json:"version" gorm:"type:text;not null"`
Host string `json:"host" gorm:"type:text;not null"`
Port int `json:"port" gorm:"type:int;not null"`
Username string `json:"username" gorm:"type:text;not null"`
Password string `json:"password" gorm:"type:text;not null"`
Database *string `json:"database" gorm:"type:text"`
IsHttps bool `json:"isHttps" gorm:"type:boolean;default:false"`
}
func (m *MysqlDatabase) TableName() string {
return "mysql_databases"
}
func (m *MysqlDatabase) Validate() error {
if m.Host == "" {
return errors.New("host is required")
}
if m.Port == 0 {
return errors.New("port is required")
}
if m.Username == "" {
return errors.New("username is required")
}
if m.Password == "" {
return errors.New("password is required")
}
return nil
}
func (m *MysqlDatabase) TestConnection(
logger *slog.Logger,
encryptor encryption.FieldEncryptor,
databaseID uuid.UUID,
) error {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
if m.Database == nil || *m.Database == "" {
return errors.New("database name is required for MySQL backup")
}
password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID)
if err != nil {
return fmt.Errorf("failed to decrypt password: %w", err)
}
dsn := m.buildDSN(password, *m.Database)
db, err := sql.Open("mysql", dsn)
if err != nil {
return fmt.Errorf("failed to connect to MySQL database '%s': %w", *m.Database, err)
}
defer func() {
if closeErr := db.Close(); closeErr != nil {
logger.Error("Failed to close MySQL connection", "error", closeErr)
}
}()
db.SetConnMaxLifetime(15 * time.Second)
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
if err := db.PingContext(ctx); err != nil {
return fmt.Errorf("failed to ping MySQL database '%s': %w", *m.Database, err)
}
detectedVersion, err := detectMysqlVersion(ctx, db)
if err != nil {
return err
}
m.Version = detectedVersion
return nil
}
func (m *MysqlDatabase) HideSensitiveData() {
if m == nil {
return
}
m.Password = ""
}
func (m *MysqlDatabase) Update(incoming *MysqlDatabase) {
m.Version = incoming.Version
m.Host = incoming.Host
m.Port = incoming.Port
m.Username = incoming.Username
m.Database = incoming.Database
m.IsHttps = incoming.IsHttps
if incoming.Password != "" {
m.Password = incoming.Password
}
}
func (m *MysqlDatabase) EncryptSensitiveFields(
databaseID uuid.UUID,
encryptor encryption.FieldEncryptor,
) error {
if m.Password != "" {
encrypted, err := encryptor.Encrypt(databaseID, m.Password)
if err != nil {
return err
}
m.Password = encrypted
}
return nil
}
func (m *MysqlDatabase) PopulateVersionIfEmpty(
logger *slog.Logger,
encryptor encryption.FieldEncryptor,
databaseID uuid.UUID,
) error {
if m.Version != "" {
return nil
}
if m.Database == nil || *m.Database == "" {
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID)
if err != nil {
return fmt.Errorf("failed to decrypt password: %w", err)
}
dsn := m.buildDSN(password, *m.Database)
db, err := sql.Open("mysql", dsn)
if err != nil {
return fmt.Errorf("failed to connect to database: %w", err)
}
defer func() {
if closeErr := db.Close(); closeErr != nil {
logger.Error("Failed to close connection", "error", closeErr)
}
}()
detectedVersion, err := detectMysqlVersion(ctx, db)
if err != nil {
return err
}
m.Version = detectedVersion
return nil
}
func (m *MysqlDatabase) IsUserReadOnly(
ctx context.Context,
logger *slog.Logger,
encryptor encryption.FieldEncryptor,
databaseID uuid.UUID,
) (bool, error) {
password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID)
if err != nil {
return false, fmt.Errorf("failed to decrypt password: %w", err)
}
dsn := m.buildDSN(password, *m.Database)
db, err := sql.Open("mysql", dsn)
if err != nil {
return false, fmt.Errorf("failed to connect to database: %w", err)
}
defer func() {
if closeErr := db.Close(); closeErr != nil {
logger.Error("Failed to close connection", "error", closeErr)
}
}()
rows, err := db.QueryContext(ctx, "SHOW GRANTS FOR CURRENT_USER()")
if err != nil {
return false, fmt.Errorf("failed to check grants: %w", err)
}
defer func() { _ = rows.Close() }()
writePrivileges := []string{
"INSERT", "UPDATE", "DELETE", "CREATE", "DROP", "ALTER",
"INDEX", "GRANT OPTION", "ALL PRIVILEGES", "SUPER",
}
for rows.Next() {
var grant string
if err := rows.Scan(&grant); err != nil {
return false, fmt.Errorf("failed to scan grant: %w", err)
}
for _, priv := range writePrivileges {
if regexp.MustCompile(`(?i)\b` + priv + `\b`).MatchString(grant) {
return false, nil
}
}
}
if err := rows.Err(); err != nil {
return false, fmt.Errorf("error iterating grants: %w", err)
}
return true, nil
}
func (m *MysqlDatabase) CreateReadOnlyUser(
ctx context.Context,
logger *slog.Logger,
encryptor encryption.FieldEncryptor,
databaseID uuid.UUID,
) (string, string, error) {
password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID)
if err != nil {
return "", "", fmt.Errorf("failed to decrypt password: %w", err)
}
dsn := m.buildDSN(password, *m.Database)
db, err := sql.Open("mysql", dsn)
if err != nil {
return "", "", fmt.Errorf("failed to connect to database: %w", err)
}
defer func() {
if closeErr := db.Close(); closeErr != nil {
logger.Error("Failed to close connection", "error", closeErr)
}
}()
maxRetries := 3
for attempt := range maxRetries {
newUsername := fmt.Sprintf("postgresus-%s", uuid.New().String()[:8])
newPassword := uuid.New().String()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return "", "", fmt.Errorf("failed to begin transaction: %w", err)
}
success := false
defer func() {
if !success {
if rollbackErr := tx.Rollback(); rollbackErr != nil {
logger.Error("Failed to rollback transaction", "error", rollbackErr)
}
}
}()
_, err = tx.ExecContext(ctx, fmt.Sprintf(
"CREATE USER '%s'@'%%' IDENTIFIED BY '%s'",
newUsername,
newPassword,
))
if err != nil {
if attempt < maxRetries-1 {
continue
}
return "", "", fmt.Errorf("failed to create user: %w", err)
}
_, err = tx.ExecContext(ctx, fmt.Sprintf(
"GRANT SELECT, SHOW VIEW, LOCK TABLES, TRIGGER, EVENT ON `%s`.* TO '%s'@'%%'",
*m.Database,
newUsername,
))
if err != nil {
return "", "", fmt.Errorf("failed to grant database privileges: %w", err)
}
_, err = tx.ExecContext(ctx, fmt.Sprintf(
"GRANT PROCESS ON *.* TO '%s'@'%%'",
newUsername,
))
if err != nil {
return "", "", fmt.Errorf("failed to grant PROCESS privilege: %w", err)
}
_, err = tx.ExecContext(ctx, "FLUSH PRIVILEGES")
if err != nil {
return "", "", fmt.Errorf("failed to flush privileges: %w", err)
}
if err := tx.Commit(); err != nil {
return "", "", fmt.Errorf("failed to commit transaction: %w", err)
}
success = true
logger.Info(
"Read-only MySQL user created successfully",
"username",
newUsername,
)
return newUsername, newPassword, nil
}
return "", "", errors.New("failed to generate unique username after 3 attempts")
}
func (m *MysqlDatabase) buildDSN(password string, database string) string {
tlsConfig := "false"
if m.IsHttps {
tlsConfig = "true"
}
return fmt.Sprintf(
"%s:%s@tcp(%s:%d)/%s?parseTime=true&timeout=15s&tls=%s&charset=utf8mb4",
m.Username,
password,
m.Host,
m.Port,
database,
tlsConfig,
)
}
func detectMysqlVersion(ctx context.Context, db *sql.DB) (tools.MysqlVersion, error) {
var versionStr string
err := db.QueryRowContext(ctx, "SELECT VERSION()").Scan(&versionStr)
if err != nil {
return "", fmt.Errorf("failed to query MySQL version: %w", err)
}
re := regexp.MustCompile(`^(\d+)\.(\d+)`)
matches := re.FindStringSubmatch(versionStr)
if len(matches) < 3 {
return "", fmt.Errorf("could not parse MySQL version: %s", versionStr)
}
major := matches[1]
minor := matches[2]
switch {
case major == "5" && minor == "7":
return tools.MysqlVersion57, nil
case major == "8" && minor == "0":
return tools.MysqlVersion80, nil
case major == "8" && minor == "4":
return tools.MysqlVersion84, nil
default:
return "", fmt.Errorf("unsupported MySQL version: %s.%s", major, minor)
}
}
func decryptPasswordIfNeeded(
password string,
encryptor encryption.FieldEncryptor,
databaseID uuid.UUID,
) (string, error) {
if encryptor == nil {
return password, nil
}
return encryptor.Decrypt(databaseID, password)
}

View File

@@ -0,0 +1,366 @@
package mysql
import (
"context"
"fmt"
"log/slog"
"os"
"strconv"
"strings"
"testing"
_ "github.com/go-sql-driver/mysql"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/assert"
"postgresus-backend/internal/config"
"postgresus-backend/internal/util/tools"
)
func Test_IsUserReadOnly_AdminUser_ReturnsFalse(t *testing.T) {
env := config.GetEnv()
cases := []struct {
name string
version tools.MysqlVersion
port string
}{
{"MySQL 5.7", tools.MysqlVersion57, env.TestMysql57Port},
{"MySQL 8.0", tools.MysqlVersion80, env.TestMysql80Port},
{"MySQL 8.4", tools.MysqlVersion84, env.TestMysql84Port},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
container := connectToMysqlContainer(t, tc.port, tc.version)
defer container.DB.Close()
mysqlModel := createMysqlModel(container)
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
ctx := context.Background()
isReadOnly, err := mysqlModel.IsUserReadOnly(ctx, logger, nil, uuid.New())
assert.NoError(t, err)
assert.False(t, isReadOnly, "Root user should not be read-only")
})
}
}
func Test_CreateReadOnlyUser_UserCanReadButNotWrite(t *testing.T) {
env := config.GetEnv()
cases := []struct {
name string
version tools.MysqlVersion
port string
}{
{"MySQL 5.7", tools.MysqlVersion57, env.TestMysql57Port},
{"MySQL 8.0", tools.MysqlVersion80, env.TestMysql80Port},
{"MySQL 8.4", tools.MysqlVersion84, env.TestMysql84Port},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
container := connectToMysqlContainer(t, tc.port, tc.version)
defer container.DB.Close()
_, err := container.DB.Exec(`DROP TABLE IF EXISTS readonly_test`)
assert.NoError(t, err)
_, err = container.DB.Exec(`DROP TABLE IF EXISTS hack_table`)
assert.NoError(t, err)
_, err = container.DB.Exec(`DROP TABLE IF EXISTS future_table`)
assert.NoError(t, err)
_, err = container.DB.Exec(`
CREATE TABLE readonly_test (
id INT AUTO_INCREMENT PRIMARY KEY,
data VARCHAR(255) NOT NULL
)
`)
assert.NoError(t, err)
_, err = container.DB.Exec(
`INSERT INTO readonly_test (data) VALUES ('test1'), ('test2')`,
)
assert.NoError(t, err)
mysqlModel := createMysqlModel(container)
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
ctx := context.Background()
username, password, err := mysqlModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New())
assert.NoError(t, err)
assert.NotEmpty(t, username)
assert.NotEmpty(t, password)
assert.True(t, strings.HasPrefix(username, "postgresus-"))
readOnlyModel := &MysqlDatabase{
Version: mysqlModel.Version,
Host: mysqlModel.Host,
Port: mysqlModel.Port,
Username: username,
Password: password,
Database: mysqlModel.Database,
IsHttps: false,
}
isReadOnly, err := readOnlyModel.IsUserReadOnly(ctx, logger, nil, uuid.New())
assert.NoError(t, err)
assert.True(t, isReadOnly, "Created user should be read-only")
readOnlyDSN := fmt.Sprintf(
"%s:%s@tcp(%s:%d)/%s?parseTime=true",
username,
password,
container.Host,
container.Port,
container.Database,
)
readOnlyConn, err := sqlx.Connect("mysql", readOnlyDSN)
assert.NoError(t, err)
defer readOnlyConn.Close()
var count int
err = readOnlyConn.Get(&count, "SELECT COUNT(*) FROM readonly_test")
assert.NoError(t, err)
assert.Equal(t, 2, count)
_, err = readOnlyConn.Exec("INSERT INTO readonly_test (data) VALUES ('should-fail')")
assert.Error(t, err)
assert.Contains(t, strings.ToLower(err.Error()), "denied")
_, err = readOnlyConn.Exec("UPDATE readonly_test SET data = 'hacked' WHERE id = 1")
assert.Error(t, err)
assert.Contains(t, strings.ToLower(err.Error()), "denied")
_, err = readOnlyConn.Exec("DELETE FROM readonly_test WHERE id = 1")
assert.Error(t, err)
assert.Contains(t, strings.ToLower(err.Error()), "denied")
_, err = readOnlyConn.Exec("CREATE TABLE hack_table (id INT)")
assert.Error(t, err)
assert.Contains(t, strings.ToLower(err.Error()), "denied")
_, err = container.DB.Exec(fmt.Sprintf("DROP USER IF EXISTS '%s'@'%%'", username))
assert.NoError(t, err)
})
}
}
func Test_ReadOnlyUser_FutureTables_NoSelectPermission(t *testing.T) {
env := config.GetEnv()
container := connectToMysqlContainer(t, env.TestMysql80Port, tools.MysqlVersion80)
defer container.DB.Close()
mysqlModel := createMysqlModel(container)
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
ctx := context.Background()
username, password, err := mysqlModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New())
assert.NoError(t, err)
_, err = container.DB.Exec(`DROP TABLE IF EXISTS future_table`)
assert.NoError(t, err)
_, err = container.DB.Exec(`
CREATE TABLE future_table (
id INT AUTO_INCREMENT PRIMARY KEY,
data VARCHAR(255) NOT NULL
)
`)
assert.NoError(t, err)
_, err = container.DB.Exec(`INSERT INTO future_table (data) VALUES ('future_data')`)
assert.NoError(t, err)
readOnlyDSN := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true",
username, password, container.Host, container.Port, container.Database)
readOnlyConn, err := sqlx.Connect("mysql", readOnlyDSN)
assert.NoError(t, err)
defer readOnlyConn.Close()
var data string
err = readOnlyConn.Get(&data, "SELECT data FROM future_table LIMIT 1")
assert.NoError(t, err)
assert.Equal(t, "future_data", data)
_, err = container.DB.Exec(fmt.Sprintf("DROP USER IF EXISTS '%s'@'%%'", username))
assert.NoError(t, err)
}
func Test_CreateReadOnlyUser_DatabaseNameWithDash_Success(t *testing.T) {
env := config.GetEnv()
container := connectToMysqlContainer(t, env.TestMysql80Port, tools.MysqlVersion80)
defer container.DB.Close()
dashDbName := "test-db-with-dash"
_, err := container.DB.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS `%s`", dashDbName))
assert.NoError(t, err)
_, err = container.DB.Exec(fmt.Sprintf("CREATE DATABASE `%s`", dashDbName))
assert.NoError(t, err)
defer func() {
_, _ = container.DB.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS `%s`", dashDbName))
}()
dashDSN := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true",
container.Username, container.Password, container.Host, container.Port, dashDbName)
dashDB, err := sqlx.Connect("mysql", dashDSN)
assert.NoError(t, err)
defer dashDB.Close()
_, err = dashDB.Exec(`
CREATE TABLE dash_test (
id INT AUTO_INCREMENT PRIMARY KEY,
data VARCHAR(255) NOT NULL
)
`)
assert.NoError(t, err)
_, err = dashDB.Exec(`INSERT INTO dash_test (data) VALUES ('test1'), ('test2')`)
assert.NoError(t, err)
mysqlModel := &MysqlDatabase{
Version: tools.MysqlVersion80,
Host: container.Host,
Port: container.Port,
Username: container.Username,
Password: container.Password,
Database: &dashDbName,
IsHttps: false,
}
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
ctx := context.Background()
username, password, err := mysqlModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New())
assert.NoError(t, err)
assert.NotEmpty(t, username)
assert.NotEmpty(t, password)
assert.True(t, strings.HasPrefix(username, "postgresus-"))
readOnlyDSN := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true",
username, password, container.Host, container.Port, dashDbName)
readOnlyConn, err := sqlx.Connect("mysql", readOnlyDSN)
assert.NoError(t, err)
defer readOnlyConn.Close()
var count int
err = readOnlyConn.Get(&count, "SELECT COUNT(*) FROM dash_test")
assert.NoError(t, err)
assert.Equal(t, 2, count)
_, err = readOnlyConn.Exec("INSERT INTO dash_test (data) VALUES ('should-fail')")
assert.Error(t, err)
assert.Contains(t, strings.ToLower(err.Error()), "denied")
_, err = dashDB.Exec(fmt.Sprintf("DROP USER IF EXISTS '%s'@'%%'", username))
assert.NoError(t, err)
}
func Test_ReadOnlyUser_CannotDropOrAlterTables(t *testing.T) {
env := config.GetEnv()
container := connectToMysqlContainer(t, env.TestMysql80Port, tools.MysqlVersion80)
defer container.DB.Close()
_, err := container.DB.Exec(`DROP TABLE IF EXISTS drop_test`)
assert.NoError(t, err)
_, err = container.DB.Exec(`
CREATE TABLE drop_test (
id INT AUTO_INCREMENT PRIMARY KEY,
data VARCHAR(255) NOT NULL
)
`)
assert.NoError(t, err)
_, err = container.DB.Exec(`INSERT INTO drop_test (data) VALUES ('test1')`)
assert.NoError(t, err)
mysqlModel := createMysqlModel(container)
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
ctx := context.Background()
username, password, err := mysqlModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New())
assert.NoError(t, err)
readOnlyDSN := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true",
username, password, container.Host, container.Port, container.Database)
readOnlyConn, err := sqlx.Connect("mysql", readOnlyDSN)
assert.NoError(t, err)
defer readOnlyConn.Close()
_, err = readOnlyConn.Exec("DROP TABLE drop_test")
assert.Error(t, err)
assert.Contains(t, strings.ToLower(err.Error()), "denied")
_, err = readOnlyConn.Exec("ALTER TABLE drop_test ADD COLUMN new_col VARCHAR(100)")
assert.Error(t, err)
assert.Contains(t, strings.ToLower(err.Error()), "denied")
_, err = readOnlyConn.Exec("TRUNCATE TABLE drop_test")
assert.Error(t, err)
assert.Contains(t, strings.ToLower(err.Error()), "denied")
_, err = container.DB.Exec(fmt.Sprintf("DROP USER IF EXISTS '%s'@'%%'", username))
assert.NoError(t, err)
}
type MysqlContainer struct {
Host string
Port int
Username string
Password string
Database string
Version tools.MysqlVersion
DB *sqlx.DB
}
func connectToMysqlContainer(
t *testing.T,
port string,
version tools.MysqlVersion,
) *MysqlContainer {
if port == "" {
t.Skipf("MySQL port not configured for version %s", version)
}
dbName := "testdb"
host := "127.0.0.1"
username := "root"
password := "rootpassword"
portInt, err := strconv.Atoi(port)
assert.NoError(t, err)
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true",
username, password, host, portInt, dbName)
db, err := sqlx.Connect("mysql", dsn)
if err != nil {
t.Skipf("Failed to connect to MySQL %s: %v", version, err)
}
return &MysqlContainer{
Host: host,
Port: portInt,
Username: username,
Password: password,
Database: dbName,
Version: version,
DB: db,
}
}
func createMysqlModel(container *MysqlContainer) *MysqlDatabase {
return &MysqlDatabase{
Version: container.Version,
Host: container.Host,
Port: container.Port,
Username: container.Username,
Password: container.Password,
Database: &container.Database,
IsHttps: false,
}
}

View File

@@ -34,6 +34,9 @@ type PostgresqlDatabase struct {
// backup settings
IncludeSchemas []string `json:"includeSchemas" gorm:"-"`
IncludeSchemasString string `json:"-" gorm:"column:include_schemas;type:text;not null;default:''"`
// restore settings (not saved to DB)
IsExcludeExtensions bool `json:"isExcludeExtensions" gorm:"-"`
}
func (p *PostgresqlDatabase) TableName() string {

View File

@@ -4,6 +4,7 @@ type DatabaseType string
const (
DatabaseTypePostgres DatabaseType = "POSTGRES"
DatabaseTypeMysql DatabaseType = "MYSQL"
)
type HealthStatus string

View File

@@ -3,6 +3,7 @@ package databases
import (
"errors"
"log/slog"
"postgresus-backend/internal/features/databases/databases/mysql"
"postgresus-backend/internal/features/databases/databases/postgresql"
"postgresus-backend/internal/features/notifiers"
"postgresus-backend/internal/util/encryption"
@@ -21,6 +22,7 @@ type Database struct {
Type DatabaseType `json:"type" gorm:"column:type;type:text;not null"`
Postgresql *postgresql.PostgresqlDatabase `json:"postgresql,omitempty" gorm:"foreignKey:DatabaseID"`
Mysql *mysql.MysqlDatabase `json:"mysql,omitempty" gorm:"foreignKey:DatabaseID"`
Notifiers []notifiers.Notifier `json:"notifiers" gorm:"many2many:database_notifiers;"`
@@ -42,8 +44,12 @@ func (d *Database) Validate() error {
if d.Postgresql == nil {
return errors.New("postgresql database is required")
}
return d.Postgresql.Validate()
case DatabaseTypeMysql:
if d.Mysql == nil {
return errors.New("mysql database is required")
}
return d.Mysql.Validate()
default:
return errors.New("invalid database type: " + string(d.Type))
}
@@ -72,6 +78,9 @@ func (d *Database) EncryptSensitiveFields(encryptor encryption.FieldEncryptor) e
if d.Postgresql != nil {
return d.Postgresql.EncryptSensitiveFields(d.ID, encryptor)
}
if d.Mysql != nil {
return d.Mysql.EncryptSensitiveFields(d.ID, encryptor)
}
return nil
}
@@ -82,6 +91,9 @@ func (d *Database) PopulateVersionIfEmpty(
if d.Postgresql != nil {
return d.Postgresql.PopulateVersionIfEmpty(logger, encryptor, d.ID)
}
if d.Mysql != nil {
return d.Mysql.PopulateVersionIfEmpty(logger, encryptor, d.ID)
}
return nil
}
@@ -95,6 +107,10 @@ func (d *Database) Update(incoming *Database) {
if d.Postgresql != nil && incoming.Postgresql != nil {
d.Postgresql.Update(incoming.Postgresql)
}
case DatabaseTypeMysql:
if d.Mysql != nil && incoming.Mysql != nil {
d.Mysql.Update(incoming.Mysql)
}
}
}
@@ -102,6 +118,8 @@ func (d *Database) getSpecificDatabase() DatabaseConnector {
switch d.Type {
case DatabaseTypePostgres:
return d.Postgresql
case DatabaseTypeMysql:
return d.Mysql
}
panic("invalid database type: " + string(d.Type))

View File

@@ -2,6 +2,7 @@ package databases
import (
"errors"
"postgresus-backend/internal/features/databases/databases/mysql"
"postgresus-backend/internal/features/databases/databases/postgresql"
"postgresus-backend/internal/storage"
@@ -25,26 +26,28 @@ func (r *DatabaseRepository) Save(database *Database) (*Database, error) {
if database.Postgresql == nil {
return errors.New("postgresql configuration is required for PostgreSQL database")
}
// Ensure DatabaseID is always set and never nil
database.Postgresql.DatabaseID = &database.ID
case DatabaseTypeMysql:
if database.Mysql == nil {
return errors.New("mysql configuration is required for MySQL database")
}
database.Mysql.DatabaseID = &database.ID
}
if isNew {
if err := tx.Create(database).
Omit("Postgresql", "Notifiers").
Omit("Postgresql", "Mysql", "Notifiers").
Error; err != nil {
return err
}
} else {
if err := tx.Save(database).
Omit("Postgresql", "Notifiers").
Omit("Postgresql", "Mysql", "Notifiers").
Error; err != nil {
return err
}
}
// Save the specific database type
switch database.Type {
case DatabaseTypePostgres:
database.Postgresql.DatabaseID = &database.ID
@@ -58,6 +61,18 @@ func (r *DatabaseRepository) Save(database *Database) (*Database, error) {
return err
}
}
case DatabaseTypeMysql:
database.Mysql.DatabaseID = &database.ID
if database.Mysql.ID == uuid.Nil {
database.Mysql.ID = uuid.New()
if err := tx.Create(database.Mysql).Error; err != nil {
return err
}
} else {
if err := tx.Save(database.Mysql).Error; err != nil {
return err
}
}
}
if err := tx.
@@ -83,6 +98,7 @@ func (r *DatabaseRepository) FindByID(id uuid.UUID) (*Database, error) {
if err := storage.
GetDb().
Preload("Postgresql").
Preload("Mysql").
Preload("Notifiers").
Where("id = ?", id).
First(&database).Error; err != nil {
@@ -98,6 +114,7 @@ func (r *DatabaseRepository) FindByWorkspaceID(workspaceID uuid.UUID) ([]*Databa
if err := storage.
GetDb().
Preload("Postgresql").
Preload("Mysql").
Preload("Notifiers").
Where("workspace_id = ?", workspaceID).
Order("CASE WHEN health_status = 'UNAVAILABLE' THEN 1 WHEN health_status = 'AVAILABLE' THEN 2 WHEN health_status IS NULL THEN 3 ELSE 4 END, name ASC").
@@ -128,6 +145,12 @@ func (r *DatabaseRepository) Delete(id uuid.UUID) error {
Delete(&postgresql.PostgresqlDatabase{}).Error; err != nil {
return err
}
case DatabaseTypeMysql:
if err := tx.
Where("database_id = ?", id).
Delete(&mysql.MysqlDatabase{}).Error; err != nil {
return err
}
}
if err := tx.Delete(&Database{}, id).Error; err != nil {
@@ -158,6 +181,7 @@ func (r *DatabaseRepository) GetAllDatabases() ([]*Database, error) {
if err := storage.
GetDb().
Preload("Postgresql").
Preload("Mysql").
Preload("Notifiers").
Find(&databases).Error; err != nil {
return nil, err

View File

@@ -8,6 +8,7 @@ import (
"time"
audit_logs "postgresus-backend/internal/features/audit_logs"
"postgresus-backend/internal/features/databases/databases/mysql"
"postgresus-backend/internal/features/databases/databases/postgresql"
"postgresus-backend/internal/features/notifiers"
users_models "postgresus-backend/internal/features/users/models"
@@ -404,6 +405,20 @@ func (s *DatabaseService) CopyDatabase(
IsHttps: existingDatabase.Postgresql.IsHttps,
}
}
case DatabaseTypeMysql:
if existingDatabase.Mysql != nil {
newDatabase.Mysql = &mysql.MysqlDatabase{
ID: uuid.Nil,
DatabaseID: nil,
Version: existingDatabase.Mysql.Version,
Host: existingDatabase.Mysql.Host,
Port: existingDatabase.Mysql.Port,
Username: existingDatabase.Mysql.Username,
Password: existingDatabase.Mysql.Password,
Database: existingDatabase.Mysql.Database,
IsHttps: existingDatabase.Mysql.IsHttps,
}
}
}
if err := newDatabase.Validate(); err != nil {
@@ -518,19 +533,27 @@ func (s *DatabaseService) IsUserReadOnly(
usingDatabase = database
}
if usingDatabase.Type != DatabaseTypePostgres {
return false, errors.New("read-only check only supported for PostgreSQL databases")
}
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
return usingDatabase.Postgresql.IsUserReadOnly(
ctx,
s.logger,
s.fieldEncryptor,
usingDatabase.ID,
)
switch usingDatabase.Type {
case DatabaseTypePostgres:
return usingDatabase.Postgresql.IsUserReadOnly(
ctx,
s.logger,
s.fieldEncryptor,
usingDatabase.ID,
)
case DatabaseTypeMysql:
return usingDatabase.Mysql.IsUserReadOnly(
ctx,
s.logger,
s.fieldEncryptor,
usingDatabase.ID,
)
default:
return false, errors.New("read-only check not supported for this database type")
}
}
func (s *DatabaseService) CreateReadOnlyUser(
@@ -582,16 +605,25 @@ func (s *DatabaseService) CreateReadOnlyUser(
usingDatabase = database
}
if usingDatabase.Type != DatabaseTypePostgres {
return "", "", errors.New("read-only user creation only supported for PostgreSQL")
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
username, password, err := usingDatabase.Postgresql.CreateReadOnlyUser(
ctx, s.logger, s.fieldEncryptor, usingDatabase.ID,
)
var username, password string
var err error
switch usingDatabase.Type {
case DatabaseTypePostgres:
username, password, err = usingDatabase.Postgresql.CreateReadOnlyUser(
ctx, s.logger, s.fieldEncryptor, usingDatabase.ID,
)
case DatabaseTypeMysql:
username, password, err = usingDatabase.Mysql.CreateReadOnlyUser(
ctx, s.logger, s.fieldEncryptor, usingDatabase.ID,
)
default:
return "", "", errors.New("read-only user creation not supported for this database type")
}
if err != nil {
return "", "", err
}

View File

@@ -7,4 +7,5 @@ const (
IntervalDaily IntervalType = "DAILY"
IntervalWeekly IntervalType = "WEEKLY"
IntervalMonthly IntervalType = "MONTHLY"
IntervalCron IntervalType = "CRON"
)

View File

@@ -5,6 +5,7 @@ import (
"time"
"github.com/google/uuid"
"github.com/robfig/cron/v3"
"gorm.io/gorm"
)
@@ -12,11 +13,13 @@ type Interval struct {
ID uuid.UUID `json:"id" gorm:"primaryKey;type:uuid;default:gen_random_uuid()"`
Interval IntervalType `json:"interval" gorm:"type:text;not null"`
TimeOfDay *string `json:"timeOfDay" gorm:"type:text;"`
TimeOfDay *string `json:"timeOfDay" gorm:"type:text;"`
// only for WEEKLY
Weekday *int `json:"weekday,omitempty" gorm:"type:int"`
Weekday *int `json:"weekday,omitempty" gorm:"type:int"`
// only for MONTHLY
DayOfMonth *int `json:"dayOfMonth,omitempty" gorm:"type:int"`
DayOfMonth *int `json:"dayOfMonth,omitempty" gorm:"type:int"`
// only for CRON
CronExpression *string `json:"cronExpression,omitempty" gorm:"type:text"`
}
func (i *Interval) BeforeSave(tx *gorm.DB) error {
@@ -40,6 +43,16 @@ func (i *Interval) Validate() error {
return errors.New("day of month is required for monthly intervals")
}
// for cron interval cron expression is required and must be valid
if i.Interval == IntervalCron {
if i.CronExpression == nil || *i.CronExpression == "" {
return errors.New("cron expression is required for cron intervals")
}
if err := i.validateCronExpression(*i.CronExpression); err != nil {
return err
}
}
return nil
}
@@ -59,6 +72,8 @@ func (i *Interval) ShouldTriggerBackup(now time.Time, lastBackupTime *time.Time)
return i.shouldTriggerWeekly(now, *lastBackupTime)
case IntervalMonthly:
return i.shouldTriggerMonthly(now, *lastBackupTime)
case IntervalCron:
return i.shouldTriggerCron(now, *lastBackupTime)
default:
return false
}
@@ -66,11 +81,12 @@ func (i *Interval) ShouldTriggerBackup(now time.Time, lastBackupTime *time.Time)
func (i *Interval) Copy() *Interval {
return &Interval{
ID: uuid.Nil,
Interval: i.Interval,
TimeOfDay: i.TimeOfDay,
Weekday: i.Weekday,
DayOfMonth: i.DayOfMonth,
ID: uuid.Nil,
Interval: i.Interval,
TimeOfDay: i.TimeOfDay,
Weekday: i.Weekday,
DayOfMonth: i.DayOfMonth,
CronExpression: i.CronExpression,
}
}
@@ -204,3 +220,31 @@ func getStartOfWeek(t time.Time) time.Time {
func getStartOfMonth(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location())
}
// cron trigger: check if we've passed a scheduled cron time since last backup
func (i *Interval) shouldTriggerCron(now, lastBackup time.Time) bool {
if i.CronExpression == nil || *i.CronExpression == "" {
return false
}
parser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow)
schedule, err := parser.Parse(*i.CronExpression)
if err != nil {
return false
}
// Find the next scheduled time after the last backup
nextAfterLastBackup := schedule.Next(lastBackup)
// If we're at or past that next scheduled time, trigger
return now.After(nextAfterLastBackup) || now.Equal(nextAfterLastBackup)
}
func (i *Interval) validateCronExpression(expr string) error {
parser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow)
_, err := parser.Parse(expr)
if err != nil {
return errors.New("invalid cron expression: " + err.Error())
}
return nil
}

View File

@@ -457,6 +457,144 @@ func TestInterval_ShouldTriggerBackup_Monthly(t *testing.T) {
)
}
func TestInterval_ShouldTriggerBackup_Cron(t *testing.T) {
cronExpr := "0 2 * * *" // Daily at 2:00 AM
interval := &Interval{
ID: uuid.New(),
Interval: IntervalCron,
CronExpression: &cronExpr,
}
t.Run("No previous backup: Trigger backup immediately", func(t *testing.T) {
now := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
should := interval.ShouldTriggerBackup(now, nil)
assert.True(t, should)
})
t.Run("Before scheduled cron time: Do not trigger backup", func(t *testing.T) {
now := time.Date(2024, 1, 15, 1, 59, 0, 0, time.UTC)
lastBackup := time.Date(2024, 1, 14, 2, 0, 0, 0, time.UTC) // Yesterday at 2 AM
should := interval.ShouldTriggerBackup(now, &lastBackup)
assert.False(t, should)
})
t.Run("Exactly at scheduled cron time: Trigger backup", func(t *testing.T) {
now := time.Date(2024, 1, 15, 2, 0, 0, 0, time.UTC)
lastBackup := time.Date(2024, 1, 14, 2, 0, 0, 0, time.UTC) // Yesterday at 2 AM
should := interval.ShouldTriggerBackup(now, &lastBackup)
assert.True(t, should)
})
t.Run("After scheduled cron time: Trigger backup", func(t *testing.T) {
now := time.Date(2024, 1, 15, 3, 0, 0, 0, time.UTC)
lastBackup := time.Date(2024, 1, 14, 2, 0, 0, 0, time.UTC) // Yesterday at 2 AM
should := interval.ShouldTriggerBackup(now, &lastBackup)
assert.True(t, should)
})
t.Run("Backup already done after scheduled time: Do not trigger again", func(t *testing.T) {
now := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
lastBackup := time.Date(2024, 1, 15, 2, 5, 0, 0, time.UTC) // Today at 2:05 AM
should := interval.ShouldTriggerBackup(now, &lastBackup)
assert.False(t, should)
})
t.Run("Weekly cron expression: 0 3 * * 1 (Monday at 3 AM)", func(t *testing.T) {
weeklyCron := "0 3 * * 1" // Every Monday at 3 AM
weeklyInterval := &Interval{
ID: uuid.New(),
Interval: IntervalCron,
CronExpression: &weeklyCron,
}
// Monday Jan 15, 2024 at 3:00 AM
monday := time.Date(2024, 1, 15, 3, 0, 0, 0, time.UTC)
// Last backup was previous Monday
lastBackup := time.Date(2024, 1, 8, 3, 0, 0, 0, time.UTC)
should := weeklyInterval.ShouldTriggerBackup(monday, &lastBackup)
assert.True(t, should)
})
t.Run("Complex cron expression: 30 4 1,15 * * (1st and 15th at 4:30 AM)", func(t *testing.T) {
complexCron := "30 4 1,15 * *" // 1st and 15th of each month at 4:30 AM
complexInterval := &Interval{
ID: uuid.New(),
Interval: IntervalCron,
CronExpression: &complexCron,
}
// Jan 15, 2024 at 4:30 AM
now := time.Date(2024, 1, 15, 4, 30, 0, 0, time.UTC)
// Last backup was Jan 1
lastBackup := time.Date(2024, 1, 1, 4, 30, 0, 0, time.UTC)
should := complexInterval.ShouldTriggerBackup(now, &lastBackup)
assert.True(t, should)
})
t.Run("Every 6 hours cron expression: 0 */6 * * *", func(t *testing.T) {
sixHourlyCron := "0 */6 * * *" // Every 6 hours (0:00, 6:00, 12:00, 18:00)
sixHourlyInterval := &Interval{
ID: uuid.New(),
Interval: IntervalCron,
CronExpression: &sixHourlyCron,
}
// 12:00 - next trigger after 6:00
now := time.Date(2024, 1, 15, 12, 0, 0, 0, time.UTC)
// Last backup was at 6:00
lastBackup := time.Date(2024, 1, 15, 6, 0, 0, 0, time.UTC)
should := sixHourlyInterval.ShouldTriggerBackup(now, &lastBackup)
assert.True(t, should)
})
t.Run("Invalid cron expression returns false", func(t *testing.T) {
invalidCron := "invalid cron"
invalidInterval := &Interval{
ID: uuid.New(),
Interval: IntervalCron,
CronExpression: &invalidCron,
}
now := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
lastBackup := time.Date(2024, 1, 14, 10, 0, 0, 0, time.UTC)
should := invalidInterval.ShouldTriggerBackup(now, &lastBackup)
assert.False(t, should)
})
t.Run("Empty cron expression returns false", func(t *testing.T) {
emptyCron := ""
emptyInterval := &Interval{
ID: uuid.New(),
Interval: IntervalCron,
CronExpression: &emptyCron,
}
now := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
lastBackup := time.Date(2024, 1, 14, 10, 0, 0, 0, time.UTC)
should := emptyInterval.ShouldTriggerBackup(now, &lastBackup)
assert.False(t, should)
})
t.Run("Nil cron expression returns false", func(t *testing.T) {
nilInterval := &Interval{
ID: uuid.New(),
Interval: IntervalCron,
CronExpression: nil,
}
now := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
lastBackup := time.Date(2024, 1, 14, 10, 0, 0, 0, time.UTC)
should := nilInterval.ShouldTriggerBackup(now, &lastBackup)
assert.False(t, should)
})
}
func TestInterval_Validate(t *testing.T) {
t.Run("Daily interval requires time of day", func(t *testing.T) {
interval := &Interval{
@@ -526,4 +664,60 @@ func TestInterval_Validate(t *testing.T) {
err := interval.Validate()
assert.NoError(t, err)
})
t.Run("Cron interval requires cron expression", func(t *testing.T) {
interval := &Interval{
ID: uuid.New(),
Interval: IntervalCron,
}
err := interval.Validate()
assert.Error(t, err)
assert.Contains(t, err.Error(), "cron expression is required")
})
t.Run("Cron interval with empty expression is invalid", func(t *testing.T) {
emptyCron := ""
interval := &Interval{
ID: uuid.New(),
Interval: IntervalCron,
CronExpression: &emptyCron,
}
err := interval.Validate()
assert.Error(t, err)
assert.Contains(t, err.Error(), "cron expression is required")
})
t.Run("Cron interval with invalid expression is invalid", func(t *testing.T) {
invalidCron := "invalid cron"
interval := &Interval{
ID: uuid.New(),
Interval: IntervalCron,
CronExpression: &invalidCron,
}
err := interval.Validate()
assert.Error(t, err)
assert.Contains(t, err.Error(), "invalid cron expression")
})
t.Run("Valid cron interval with daily expression", func(t *testing.T) {
cronExpr := "0 2 * * *" // Daily at 2 AM
interval := &Interval{
ID: uuid.New(),
Interval: IntervalCron,
CronExpression: &cronExpr,
}
err := interval.Validate()
assert.NoError(t, err)
})
t.Run("Valid cron interval with complex expression", func(t *testing.T) {
cronExpr := "30 4 1,15 * *" // 1st and 15th of each month at 4:30 AM
interval := &Interval{
ID: uuid.New(),
Interval: IntervalCron,
CronExpression: &cronExpr,
}
err := interval.Validate()
assert.NoError(t, err)
})
}

View File

@@ -171,6 +171,36 @@ func Test_RestoreBackup_WhenUserIsNotWorkspaceMember_ReturnsForbidden(t *testing
assert.Contains(t, string(testResp.Body), "insufficient permissions")
}
func Test_RestoreBackup_WithIsExcludeExtensions_FlagPassedCorrectly(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
_, backup := createTestDatabaseWithBackupForRestore(workspace, owner, router)
request := RestoreBackupRequest{
PostgresqlDatabase: &postgresql.PostgresqlDatabase{
Version: tools.PostgresqlVersion16,
Host: "localhost",
Port: 5432,
Username: "postgres",
Password: "postgres",
IsExcludeExtensions: true,
},
}
testResp := test_utils.MakePostRequest(
t,
router,
fmt.Sprintf("/api/v1/restores/%s/restore", backup.ID.String()),
"Bearer "+owner.Token,
request,
http.StatusOK,
)
assert.Contains(t, string(testResp.Body), "restore started successfully")
}
func Test_RestoreBackup_AuditLogWritten(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)

View File

@@ -1,9 +1,11 @@
package restores
import (
"postgresus-backend/internal/features/databases/databases/mysql"
"postgresus-backend/internal/features/databases/databases/postgresql"
)
type RestoreBackupRequest struct {
PostgresqlDatabase *postgresql.PostgresqlDatabase `json:"postgresqlDatabase"`
MysqlDatabase *mysql.MysqlDatabase `json:"mysqlDatabase"`
}

View File

@@ -122,13 +122,8 @@ func (s *RestoreService) RestoreBackupWithAuth(
return err
}
if tools.IsBackupDbVersionHigherThanRestoreDbVersion(
backupDatabase.Postgresql.Version,
requestDTO.PostgresqlDatabase.Version,
) {
return errors.New(`backup database version is higher than restore database version. ` +
`Should be restored to the same version as the backup database or higher. ` +
`For example, you can restore PG 15 backup to PG 15, 16 or higher. But cannot restore to 14 and lower`)
if err := s.validateVersionCompatibility(backupDatabase, requestDTO); err != nil {
return err
}
go func() {
@@ -163,10 +158,15 @@ func (s *RestoreService) RestoreBackup(
return err
}
if database.Type == databases.DatabaseTypePostgres {
switch database.Type {
case databases.DatabaseTypePostgres:
if requestDTO.PostgresqlDatabase == nil {
return errors.New("postgresql database is required")
}
case databases.DatabaseTypeMysql:
if requestDTO.MysqlDatabase == nil {
return errors.New("mysql database is required")
}
}
restore := models.Restore{
@@ -207,13 +207,20 @@ func (s *RestoreService) RestoreBackup(
start := time.Now().UTC()
restoringToDB := &databases.Database{
Type: database.Type,
Postgresql: requestDTO.PostgresqlDatabase,
Mysql: requestDTO.MysqlDatabase,
}
if err := restoringToDB.PopulateVersionIfEmpty(s.logger, s.fieldEncryptor); err != nil {
return fmt.Errorf("failed to auto-detect database version: %w", err)
}
isExcludeExtensions := false
if requestDTO.PostgresqlDatabase != nil {
isExcludeExtensions = requestDTO.PostgresqlDatabase.IsExcludeExtensions
}
err = s.restoreBackupUsecase.Execute(
backupConfig,
restore,
@@ -221,6 +228,7 @@ func (s *RestoreService) RestoreBackup(
restoringToDB,
backup,
storage,
isExcludeExtensions,
)
if err != nil {
errMsg := err.Error()
@@ -244,3 +252,36 @@ func (s *RestoreService) RestoreBackup(
return nil
}
func (s *RestoreService) validateVersionCompatibility(
backupDatabase *databases.Database,
requestDTO RestoreBackupRequest,
) error {
switch backupDatabase.Type {
case databases.DatabaseTypePostgres:
if requestDTO.PostgresqlDatabase == nil {
return errors.New("postgresql database configuration is required for restore")
}
if tools.IsBackupDbVersionHigherThanRestoreDbVersion(
backupDatabase.Postgresql.Version,
requestDTO.PostgresqlDatabase.Version,
) {
return errors.New(`backup database version is higher than restore database version. ` +
`Should be restored to the same version as the backup database or higher. ` +
`For example, you can restore PG 15 backup to PG 15, 16 or higher. But cannot restore to 14 and lower`)
}
case databases.DatabaseTypeMysql:
if requestDTO.MysqlDatabase == nil {
return errors.New("mysql database configuration is required for restore")
}
if tools.IsMysqlBackupVersionHigherThanRestoreVersion(
backupDatabase.Mysql.Version,
requestDTO.MysqlDatabase.Version,
) {
return errors.New(`backup database version is higher than restore database version. ` +
`Should be restored to the same version as the backup database or higher. ` +
`For example, you can restore MySQL 8.0 backup to MySQL 8.0, 8.4 or higher. But cannot restore to 5.7`)
}
}
return nil
}

View File

@@ -1,11 +1,13 @@
package usecases
import (
usecases_mysql "postgresus-backend/internal/features/restores/usecases/mysql"
usecases_postgresql "postgresus-backend/internal/features/restores/usecases/postgresql"
)
var restoreBackupUsecase = &RestoreBackupUsecase{
usecases_postgresql.GetRestorePostgresqlBackupUsecase(),
usecases_mysql.GetRestoreMysqlBackupUsecase(),
}
func GetRestoreBackupUsecase() *RestoreBackupUsecase {

View File

@@ -0,0 +1,15 @@
package usecases_mysql
import (
"postgresus-backend/internal/features/encryption/secrets"
"postgresus-backend/internal/util/logger"
)
var restoreMysqlBackupUsecase = &RestoreMysqlBackupUsecase{
logger.GetLogger(),
secrets.GetSecretKeyService(),
}
func GetRestoreMysqlBackupUsecase() *RestoreMysqlBackupUsecase {
return restoreMysqlBackupUsecase
}

View File

@@ -0,0 +1,463 @@
package usecases_mysql
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"log/slog"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/klauspost/compress/zstd"
"postgresus-backend/internal/config"
"postgresus-backend/internal/features/backups/backups"
"postgresus-backend/internal/features/backups/backups/encryption"
backups_config "postgresus-backend/internal/features/backups/config"
"postgresus-backend/internal/features/databases"
mysqltypes "postgresus-backend/internal/features/databases/databases/mysql"
encryption_secrets "postgresus-backend/internal/features/encryption/secrets"
"postgresus-backend/internal/features/restores/models"
"postgresus-backend/internal/features/storages"
util_encryption "postgresus-backend/internal/util/encryption"
files_utils "postgresus-backend/internal/util/files"
"postgresus-backend/internal/util/tools"
)
type RestoreMysqlBackupUsecase struct {
logger *slog.Logger
secretKeyService *encryption_secrets.SecretKeyService
}
func (uc *RestoreMysqlBackupUsecase) Execute(
originalDB *databases.Database,
restoringToDB *databases.Database,
backupConfig *backups_config.BackupConfig,
restore models.Restore,
backup *backups.Backup,
storage *storages.Storage,
) error {
if originalDB.Type != databases.DatabaseTypeMysql {
return errors.New("database type not supported")
}
uc.logger.Info(
"Restoring MySQL backup via mysql client",
"restoreId", restore.ID,
"backupId", backup.ID,
)
my := restoringToDB.Mysql
if my == nil {
return fmt.Errorf("mysql configuration is required for restore")
}
if my.Database == nil || *my.Database == "" {
return fmt.Errorf("target database name is required for mysql restore")
}
args := []string{
"--host=" + my.Host,
"--port=" + strconv.Itoa(my.Port),
"--user=" + my.Username,
"--verbose",
}
if my.IsHttps {
args = append(args, "--ssl-mode=REQUIRED")
}
if my.Database != nil && *my.Database != "" {
args = append(args, *my.Database)
}
return uc.restoreFromStorage(
originalDB,
tools.GetMysqlExecutable(
my.Version,
tools.MysqlExecutableMysql,
config.GetEnv().EnvMode,
config.GetEnv().MysqlInstallDir,
),
args,
my.Password,
backup,
storage,
my,
)
}
func (uc *RestoreMysqlBackupUsecase) restoreFromStorage(
database *databases.Database,
mysqlBin string,
args []string,
password string,
backup *backups.Backup,
storage *storages.Storage,
myConfig *mysqltypes.MysqlDatabase,
) error {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute)
defer cancel()
go func() {
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if config.IsShouldShutdown() {
cancel()
return
}
}
}
}()
fieldEncryptor := util_encryption.GetFieldEncryptor()
decryptedPassword, err := fieldEncryptor.Decrypt(database.ID, password)
if err != nil {
return fmt.Errorf("failed to decrypt password: %w", err)
}
myCnfFile, err := uc.createTempMyCnfFile(myConfig, decryptedPassword)
if err != nil {
return fmt.Errorf("failed to create .my.cnf: %w", err)
}
defer func() { _ = os.RemoveAll(filepath.Dir(myCnfFile)) }()
tempBackupFile, cleanupFunc, err := uc.downloadBackupToTempFile(ctx, backup, storage)
if err != nil {
return fmt.Errorf("failed to download backup: %w", err)
}
defer cleanupFunc()
return uc.executeMysqlRestore(ctx, database, mysqlBin, args, myCnfFile, tempBackupFile, backup)
}
func (uc *RestoreMysqlBackupUsecase) executeMysqlRestore(
ctx context.Context,
database *databases.Database,
mysqlBin string,
args []string,
myCnfFile string,
backupFile string,
backup *backups.Backup,
) error {
fullArgs := append([]string{"--defaults-file=" + myCnfFile}, args...)
cmd := exec.CommandContext(ctx, mysqlBin, fullArgs...)
uc.logger.Info("Executing MySQL restore command", "command", cmd.String())
backupFileHandle, err := os.Open(backupFile)
if err != nil {
return fmt.Errorf("failed to open backup file: %w", err)
}
defer func() { _ = backupFileHandle.Close() }()
var inputReader io.Reader = backupFileHandle
if backup.Encryption == backups_config.BackupEncryptionEncrypted {
decryptReader, err := uc.setupDecryption(backupFileHandle, backup)
if err != nil {
return fmt.Errorf("failed to setup decryption: %w", err)
}
inputReader = decryptReader
}
zstdReader, err := zstd.NewReader(inputReader)
if err != nil {
return fmt.Errorf("failed to create zstd reader: %w", err)
}
defer zstdReader.Close()
cmd.Stdin = zstdReader
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env,
"MYSQL_PWD=",
"LC_ALL=C.UTF-8",
"LANG=C.UTF-8",
)
stderrPipe, err := cmd.StderrPipe()
if err != nil {
return fmt.Errorf("stderr pipe: %w", err)
}
stderrCh := make(chan []byte, 1)
go func() {
output, _ := io.ReadAll(stderrPipe)
stderrCh <- output
}()
if err = cmd.Start(); err != nil {
return fmt.Errorf("start mysql: %w", err)
}
waitErr := cmd.Wait()
stderrOutput := <-stderrCh
if config.IsShouldShutdown() {
return fmt.Errorf("restore cancelled due to shutdown")
}
if waitErr != nil {
return uc.handleMysqlRestoreError(database, waitErr, stderrOutput, mysqlBin)
}
return nil
}
func (uc *RestoreMysqlBackupUsecase) downloadBackupToTempFile(
ctx context.Context,
backup *backups.Backup,
storage *storages.Storage,
) (string, func(), error) {
err := files_utils.EnsureDirectories([]string{
config.GetEnv().TempFolder,
})
if err != nil {
return "", nil, fmt.Errorf("failed to ensure directories: %w", err)
}
tempDir, err := os.MkdirTemp(config.GetEnv().TempFolder, "restore_"+uuid.New().String())
if err != nil {
return "", nil, fmt.Errorf("failed to create temporary directory: %w", err)
}
cleanupFunc := func() {
_ = os.RemoveAll(tempDir)
}
tempBackupFile := filepath.Join(tempDir, "backup.sql.zst")
uc.logger.Info(
"Downloading backup file from storage to temporary file",
"backupId", backup.ID,
"tempFile", tempBackupFile,
"encrypted", backup.Encryption == backups_config.BackupEncryptionEncrypted,
)
fieldEncryptor := util_encryption.GetFieldEncryptor()
rawReader, err := storage.GetFile(fieldEncryptor, backup.ID)
if err != nil {
cleanupFunc()
return "", nil, fmt.Errorf("failed to get backup file from storage: %w", err)
}
defer func() {
if err := rawReader.Close(); err != nil {
uc.logger.Error("Failed to close backup reader", "error", err)
}
}()
tempFile, err := os.Create(tempBackupFile)
if err != nil {
cleanupFunc()
return "", nil, fmt.Errorf("failed to create temporary backup file: %w", err)
}
defer func() {
if err := tempFile.Close(); err != nil {
uc.logger.Error("Failed to close temporary file", "error", err)
}
}()
_, err = uc.copyWithShutdownCheck(ctx, tempFile, rawReader)
if err != nil {
cleanupFunc()
return "", nil, fmt.Errorf("failed to write backup to temporary file: %w", err)
}
uc.logger.Info("Backup file written to temporary location", "tempFile", tempBackupFile)
return tempBackupFile, cleanupFunc, nil
}
func (uc *RestoreMysqlBackupUsecase) setupDecryption(
reader io.Reader,
backup *backups.Backup,
) (io.Reader, error) {
if backup.EncryptionSalt == nil || backup.EncryptionIV == nil {
return nil, fmt.Errorf("backup is encrypted but missing encryption metadata")
}
masterKey, err := uc.secretKeyService.GetSecretKey()
if err != nil {
return nil, fmt.Errorf("failed to get master key for decryption: %w", err)
}
salt, err := base64.StdEncoding.DecodeString(*backup.EncryptionSalt)
if err != nil {
return nil, fmt.Errorf("failed to decode encryption salt: %w", err)
}
iv, err := base64.StdEncoding.DecodeString(*backup.EncryptionIV)
if err != nil {
return nil, fmt.Errorf("failed to decode encryption IV: %w", err)
}
decryptReader, err := encryption.NewDecryptionReader(
reader,
masterKey,
backup.ID,
salt,
iv,
)
if err != nil {
return nil, fmt.Errorf("failed to create decryption reader: %w", err)
}
uc.logger.Info("Using decryption for encrypted backup", "backupId", backup.ID)
return decryptReader, nil
}
func (uc *RestoreMysqlBackupUsecase) createTempMyCnfFile(
myConfig *mysqltypes.MysqlDatabase,
password string,
) (string, error) {
tempDir, err := os.MkdirTemp("", "mycnf")
if err != nil {
return "", fmt.Errorf("failed to create temp directory: %w", err)
}
myCnfFile := filepath.Join(tempDir, ".my.cnf")
content := fmt.Sprintf(`[client]
user=%s
password="%s"
host=%s
port=%d
`, myConfig.Username, tools.EscapeMysqlPassword(password), myConfig.Host, myConfig.Port)
if myConfig.IsHttps {
content += "ssl-mode=REQUIRED\n"
}
err = os.WriteFile(myCnfFile, []byte(content), 0600)
if err != nil {
return "", fmt.Errorf("failed to write .my.cnf: %w", err)
}
return myCnfFile, nil
}
func (uc *RestoreMysqlBackupUsecase) copyWithShutdownCheck(
ctx context.Context,
dst io.Writer,
src io.Reader,
) (int64, error) {
buf := make([]byte, 16*1024*1024)
var totalBytesWritten int64
for {
select {
case <-ctx.Done():
return totalBytesWritten, fmt.Errorf("copy cancelled: %w", ctx.Err())
default:
}
if config.IsShouldShutdown() {
return totalBytesWritten, fmt.Errorf("copy cancelled due to shutdown")
}
bytesRead, readErr := src.Read(buf)
if bytesRead > 0 {
bytesWritten, writeErr := dst.Write(buf[0:bytesRead])
if bytesWritten < 0 || bytesRead < bytesWritten {
bytesWritten = 0
if writeErr == nil {
writeErr = fmt.Errorf("invalid write result")
}
}
if writeErr != nil {
return totalBytesWritten, writeErr
}
if bytesRead != bytesWritten {
return totalBytesWritten, io.ErrShortWrite
}
totalBytesWritten += int64(bytesWritten)
}
if readErr != nil {
if readErr != io.EOF {
return totalBytesWritten, readErr
}
break
}
}
return totalBytesWritten, nil
}
func (uc *RestoreMysqlBackupUsecase) handleMysqlRestoreError(
database *databases.Database,
waitErr error,
stderrOutput []byte,
mysqlBin string,
) error {
stderrStr := string(stderrOutput)
errorMsg := fmt.Sprintf(
"%s failed: %v stderr: %s",
filepath.Base(mysqlBin),
waitErr,
stderrStr,
)
if containsIgnoreCase(stderrStr, "access denied") {
return fmt.Errorf(
"MySQL access denied. Check username and password. stderr: %s",
stderrStr,
)
}
if containsIgnoreCase(stderrStr, "can't connect") ||
containsIgnoreCase(stderrStr, "connection refused") {
return fmt.Errorf(
"MySQL connection refused. Check if the server is running and accessible. stderr: %s",
stderrStr,
)
}
if containsIgnoreCase(stderrStr, "unknown database") {
backupDbName := "unknown"
if database.Mysql != nil && database.Mysql.Database != nil {
backupDbName = *database.Mysql.Database
}
return fmt.Errorf(
"target database does not exist (backup db %s). Create the database before restoring. stderr: %s",
backupDbName,
stderrStr,
)
}
if containsIgnoreCase(stderrStr, "ssl") {
return fmt.Errorf(
"MySQL SSL connection failed. stderr: %s",
stderrStr,
)
}
if containsIgnoreCase(stderrStr, "timeout") {
return fmt.Errorf(
"MySQL connection timeout. stderr: %s",
stderrStr,
)
}
return errors.New(errorMsg)
}
func containsIgnoreCase(str, substr string) bool {
return strings.Contains(strings.ToLower(str), strings.ToLower(substr))
}

View File

@@ -42,6 +42,7 @@ func (uc *RestorePostgresqlBackupUsecase) Execute(
restore models.Restore,
backup *backups.Backup,
storage *storages.Storage,
isExcludeExtensions bool,
) error {
if originalDB.Type != databases.DatabaseTypePostgres {
return errors.New("database type not supported")
@@ -96,6 +97,7 @@ func (uc *RestorePostgresqlBackupUsecase) Execute(
backup,
storage,
pg,
isExcludeExtensions,
)
}
@@ -108,6 +110,7 @@ func (uc *RestorePostgresqlBackupUsecase) restoreFromStorage(
backup *backups.Backup,
storage *storages.Storage,
pgConfig *pgtypes.PostgresqlDatabase,
isExcludeExtensions bool,
) error {
uc.logger.Info(
"Restoring PostgreSQL backup from storage via temporary file",
@@ -115,6 +118,8 @@ func (uc *RestorePostgresqlBackupUsecase) restoreFromStorage(
pgBin,
"args",
args,
"isExcludeExtensions",
isExcludeExtensions,
)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute)
@@ -171,6 +176,26 @@ func (uc *RestorePostgresqlBackupUsecase) restoreFromStorage(
}
defer cleanupFunc()
// If excluding extensions, generate filtered TOC list and use it
if isExcludeExtensions {
tocListFile, err := uc.generateFilteredTocList(
ctx,
pgBin,
tempBackupFile,
pgpassFile,
pgConfig,
)
if err != nil {
return fmt.Errorf("failed to generate filtered TOC list: %w", err)
}
defer func() {
_ = os.Remove(tocListFile)
}()
// Add -L flag to use the filtered list
args = append(args, "-L", tocListFile)
}
// Add the temporary backup file as the last argument to pg_restore
args = append(args, tempBackupFile)
@@ -554,6 +579,75 @@ func containsIgnoreCase(str, substr string) bool {
return strings.Contains(strings.ToLower(str), strings.ToLower(substr))
}
// generateFilteredTocList generates a pg_restore TOC list file with extensions filtered out.
// This is used when isExcludeExtensions is true to skip CREATE EXTENSION statements.
func (uc *RestorePostgresqlBackupUsecase) generateFilteredTocList(
ctx context.Context,
pgBin string,
backupFile string,
pgpassFile string,
pgConfig *pgtypes.PostgresqlDatabase,
) (string, error) {
uc.logger.Info("Generating filtered TOC list to exclude extensions", "backupFile", backupFile)
// Run pg_restore -l to get the TOC list
listCmd := exec.CommandContext(ctx, pgBin, "-l", backupFile)
uc.setupPgRestoreEnvironment(listCmd, pgpassFile, pgConfig)
tocOutput, err := listCmd.Output()
if err != nil {
return "", fmt.Errorf("failed to generate TOC list: %w", err)
}
// Filter out EXTENSION-related lines (both CREATE EXTENSION and COMMENT ON EXTENSION)
var filteredLines []string
for line := range strings.SplitSeq(string(tocOutput), "\n") {
trimmedLine := strings.TrimSpace(line)
if trimmedLine == "" {
continue
}
upperLine := strings.ToUpper(trimmedLine)
// Skip lines that contain " EXTENSION " - this catches both:
// - CREATE EXTENSION entries: "3420; 0 0 EXTENSION - uuid-ossp"
// - COMMENT ON EXTENSION entries: "3462; 0 0 COMMENT - EXTENSION "uuid-ossp""
if strings.Contains(upperLine, " EXTENSION ") {
uc.logger.Info("Excluding extension-related entry from restore", "tocLine", trimmedLine)
continue
}
filteredLines = append(filteredLines, line)
}
// Write filtered TOC to temporary file
tocFile, err := os.CreateTemp("", "pg_restore_toc_*.list")
if err != nil {
return "", fmt.Errorf("failed to create TOC list file: %w", err)
}
tocFilePath := tocFile.Name()
filteredContent := strings.Join(filteredLines, "\n")
if _, err := tocFile.WriteString(filteredContent); err != nil {
_ = tocFile.Close()
_ = os.Remove(tocFilePath)
return "", fmt.Errorf("failed to write TOC list file: %w", err)
}
if err := tocFile.Close(); err != nil {
_ = os.Remove(tocFilePath)
return "", fmt.Errorf("failed to close TOC list file: %w", err)
}
uc.logger.Info("Generated filtered TOC list file",
"tocFile", tocFilePath,
"originalLines", len(strings.Split(string(tocOutput), "\n")),
"filteredLines", len(filteredLines),
)
return tocFilePath, nil
}
// createTempPgpassFile creates a temporary .pgpass file with the given password
func (uc *RestorePostgresqlBackupUsecase) createTempPgpassFile(
pgConfig *pgtypes.PostgresqlDatabase,

View File

@@ -2,16 +2,19 @@ package usecases
import (
"errors"
"postgresus-backend/internal/features/backups/backups"
backups_config "postgresus-backend/internal/features/backups/config"
"postgresus-backend/internal/features/databases"
"postgresus-backend/internal/features/restores/models"
usecases_mysql "postgresus-backend/internal/features/restores/usecases/mysql"
usecases_postgresql "postgresus-backend/internal/features/restores/usecases/postgresql"
"postgresus-backend/internal/features/storages"
)
type RestoreBackupUsecase struct {
restorePostgresqlBackupUsecase *usecases_postgresql.RestorePostgresqlBackupUsecase
restoreMysqlBackupUsecase *usecases_mysql.RestoreMysqlBackupUsecase
}
func (uc *RestoreBackupUsecase) Execute(
@@ -21,8 +24,10 @@ func (uc *RestoreBackupUsecase) Execute(
restoringToDB *databases.Database,
backup *backups.Backup,
storage *storages.Storage,
isExcludeExtensions bool,
) error {
if originalDB.Type == databases.DatabaseTypePostgres {
switch originalDB.Type {
case databases.DatabaseTypePostgres:
return uc.restorePostgresqlBackupUsecase.Execute(
originalDB,
restoringToDB,
@@ -30,8 +35,18 @@ func (uc *RestoreBackupUsecase) Execute(
restore,
backup,
storage,
isExcludeExtensions,
)
case databases.DatabaseTypeMysql:
return uc.restoreMysqlBackupUsecase.Execute(
originalDB,
restoringToDB,
backupConfig,
restore,
backup,
storage,
)
default:
return errors.New("database type not supported")
}
return errors.New("database type not supported")
}

View File

@@ -12,7 +12,9 @@ import (
google_drive_storage "postgresus-backend/internal/features/storages/models/google_drive"
local_storage "postgresus-backend/internal/features/storages/models/local"
nas_storage "postgresus-backend/internal/features/storages/models/nas"
rclone_storage "postgresus-backend/internal/features/storages/models/rclone"
s3_storage "postgresus-backend/internal/features/storages/models/s3"
sftp_storage "postgresus-backend/internal/features/storages/models/sftp"
users_enums "postgresus-backend/internal/features/users/enums"
users_middleware "postgresus-backend/internal/features/users/middleware"
users_services "postgresus-backend/internal/features/users/services"
@@ -786,6 +788,108 @@ func Test_StorageSensitiveDataLifecycle_AllTypes(t *testing.T) {
assert.Equal(t, "", storage.FTPStorage.Password)
},
},
{
name: "SFTP Storage",
storageType: StorageTypeSFTP,
createStorage: func(workspaceID uuid.UUID) *Storage {
return &Storage{
WorkspaceID: workspaceID,
Type: StorageTypeSFTP,
Name: "Test SFTP Storage",
SFTPStorage: &sftp_storage.SFTPStorage{
Host: "sftp.example.com",
Port: 22,
Username: "testuser",
Password: "original-password",
PrivateKey: "original-private-key",
SkipHostKeyVerify: false,
Path: "/backups",
},
}
},
updateStorage: func(workspaceID uuid.UUID, storageID uuid.UUID) *Storage {
return &Storage{
ID: storageID,
WorkspaceID: workspaceID,
Type: StorageTypeSFTP,
Name: "Updated SFTP Storage",
SFTPStorage: &sftp_storage.SFTPStorage{
Host: "sftp2.example.com",
Port: 2222,
Username: "testuser2",
Password: "",
PrivateKey: "",
SkipHostKeyVerify: true,
Path: "/backups2",
},
}
},
verifySensitiveData: func(t *testing.T, storage *Storage) {
assert.True(t, strings.HasPrefix(storage.SFTPStorage.Password, "enc:"),
"Password should be encrypted with 'enc:' prefix")
assert.True(t, strings.HasPrefix(storage.SFTPStorage.PrivateKey, "enc:"),
"PrivateKey should be encrypted with 'enc:' prefix")
encryptor := encryption.GetFieldEncryptor()
password, err := encryptor.Decrypt(storage.ID, storage.SFTPStorage.Password)
assert.NoError(t, err)
assert.Equal(t, "original-password", password)
privateKey, err := encryptor.Decrypt(storage.ID, storage.SFTPStorage.PrivateKey)
assert.NoError(t, err)
assert.Equal(t, "original-private-key", privateKey)
},
verifyHiddenData: func(t *testing.T, storage *Storage) {
assert.Equal(t, "", storage.SFTPStorage.Password)
assert.Equal(t, "", storage.SFTPStorage.PrivateKey)
},
},
{
name: "Rclone Storage",
storageType: StorageTypeRclone,
createStorage: func(workspaceID uuid.UUID) *Storage {
return &Storage{
WorkspaceID: workspaceID,
Type: StorageTypeRclone,
Name: "Test Rclone Storage",
RcloneStorage: &rclone_storage.RcloneStorage{
ConfigContent: "[myremote]\ntype = s3\nprovider = AWS\naccess_key_id = test\nsecret_access_key = secret\n",
RemotePath: "/backups",
},
}
},
updateStorage: func(workspaceID uuid.UUID, storageID uuid.UUID) *Storage {
return &Storage{
ID: storageID,
WorkspaceID: workspaceID,
Type: StorageTypeRclone,
Name: "Updated Rclone Storage",
RcloneStorage: &rclone_storage.RcloneStorage{
ConfigContent: "",
RemotePath: "/backups2",
},
}
},
verifySensitiveData: func(t *testing.T, storage *Storage) {
assert.True(t, strings.HasPrefix(storage.RcloneStorage.ConfigContent, "enc:"),
"ConfigContent should be encrypted with 'enc:' prefix")
encryptor := encryption.GetFieldEncryptor()
configContent, err := encryptor.Decrypt(
storage.ID,
storage.RcloneStorage.ConfigContent,
)
assert.NoError(t, err)
assert.Equal(
t,
"[myremote]\ntype = s3\nprovider = AWS\naccess_key_id = test\nsecret_access_key = secret\n",
configContent,
)
},
verifyHiddenData: func(t *testing.T, storage *Storage) {
assert.Equal(t, "", storage.RcloneStorage.ConfigContent)
},
},
}
for _, tc := range testCases {

View File

@@ -9,4 +9,6 @@ const (
StorageTypeNAS StorageType = "NAS"
StorageTypeAzureBlob StorageType = "AZURE_BLOB"
StorageTypeFTP StorageType = "FTP"
StorageTypeSFTP StorageType = "SFTP"
StorageTypeRclone StorageType = "RCLONE"
)

View File

@@ -10,7 +10,9 @@ import (
google_drive_storage "postgresus-backend/internal/features/storages/models/google_drive"
local_storage "postgresus-backend/internal/features/storages/models/local"
nas_storage "postgresus-backend/internal/features/storages/models/nas"
rclone_storage "postgresus-backend/internal/features/storages/models/rclone"
s3_storage "postgresus-backend/internal/features/storages/models/s3"
sftp_storage "postgresus-backend/internal/features/storages/models/sftp"
"postgresus-backend/internal/util/encryption"
"github.com/google/uuid"
@@ -30,6 +32,8 @@ type Storage struct {
NASStorage *nas_storage.NASStorage `json:"nasStorage" gorm:"foreignKey:StorageID"`
AzureBlobStorage *azure_blob_storage.AzureBlobStorage `json:"azureBlobStorage" gorm:"foreignKey:StorageID"`
FTPStorage *ftp_storage.FTPStorage `json:"ftpStorage" gorm:"foreignKey:StorageID"`
SFTPStorage *sftp_storage.SFTPStorage `json:"sftpStorage" gorm:"foreignKey:StorageID"`
RcloneStorage *rclone_storage.RcloneStorage `json:"rcloneStorage" gorm:"foreignKey:StorageID"`
}
func (s *Storage) SaveFile(
@@ -115,6 +119,14 @@ func (s *Storage) Update(incoming *Storage) {
if s.FTPStorage != nil && incoming.FTPStorage != nil {
s.FTPStorage.Update(incoming.FTPStorage)
}
case StorageTypeSFTP:
if s.SFTPStorage != nil && incoming.SFTPStorage != nil {
s.SFTPStorage.Update(incoming.SFTPStorage)
}
case StorageTypeRclone:
if s.RcloneStorage != nil && incoming.RcloneStorage != nil {
s.RcloneStorage.Update(incoming.RcloneStorage)
}
}
}
@@ -132,6 +144,10 @@ func (s *Storage) getSpecificStorage() StorageFileSaver {
return s.AzureBlobStorage
case StorageTypeFTP:
return s.FTPStorage
case StorageTypeSFTP:
return s.SFTPStorage
case StorageTypeRclone:
return s.RcloneStorage
default:
panic("invalid storage type: " + string(s.Type))
}

View File

@@ -13,7 +13,9 @@ import (
google_drive_storage "postgresus-backend/internal/features/storages/models/google_drive"
local_storage "postgresus-backend/internal/features/storages/models/local"
nas_storage "postgresus-backend/internal/features/storages/models/nas"
rclone_storage "postgresus-backend/internal/features/storages/models/rclone"
s3_storage "postgresus-backend/internal/features/storages/models/s3"
sftp_storage "postgresus-backend/internal/features/storages/models/sftp"
"postgresus-backend/internal/util/encryption"
"postgresus-backend/internal/util/logger"
"strconv"
@@ -79,6 +81,14 @@ func Test_Storage_BasicOperations(t *testing.T) {
}
}
// Setup SFTP port
sftpPort := 22
if portStr := config.GetEnv().TestSFTPPort; portStr != "" {
if port, err := strconv.Atoi(portStr); err == nil {
sftpPort = port
}
}
// Run tests
testCases := []struct {
name string
@@ -145,6 +155,32 @@ func Test_Storage_BasicOperations(t *testing.T) {
Path: "test-files",
},
},
{
name: "SFTPStorage",
storage: &sftp_storage.SFTPStorage{
StorageID: uuid.New(),
Host: "localhost",
Port: sftpPort,
Username: "testuser",
Password: "testpassword",
SkipHostKeyVerify: true,
Path: "upload",
},
},
{
name: "RcloneStorage",
storage: &rclone_storage.RcloneStorage{
StorageID: uuid.New(),
ConfigContent: fmt.Sprintf(`[minio]
type = s3
provider = Other
access_key_id = %s
secret_access_key = %s
endpoint = http://%s
acl = private`, s3Container.accessKey, s3Container.secretKey, s3Container.endpoint),
RemotePath: s3Container.bucketName,
},
},
}
// Add Google Drive storage test only if environment variables are available

View File

@@ -0,0 +1,293 @@
package rclone_storage
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"log/slog"
"postgresus-backend/internal/util/encryption"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/operations"
_ "github.com/rclone/rclone/backend/all"
)
const (
rcloneOperationTimeout = 30 * time.Second
)
var rcloneConfigMu sync.Mutex
type RcloneStorage struct {
StorageID uuid.UUID `json:"storageId" gorm:"primaryKey;type:uuid;column:storage_id"`
ConfigContent string `json:"configContent" gorm:"not null;type:text;column:config_content"`
RemotePath string `json:"remotePath" gorm:"type:text;column:remote_path"`
}
func (r *RcloneStorage) TableName() string {
return "rclone_storages"
}
func (r *RcloneStorage) SaveFile(
ctx context.Context,
encryptor encryption.FieldEncryptor,
logger *slog.Logger,
fileID uuid.UUID,
file io.Reader,
) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
logger.Info("Starting to save file to rclone storage", "fileId", fileID.String())
remoteFs, err := r.getFs(ctx, encryptor)
if err != nil {
logger.Error("Failed to create rclone filesystem", "fileId", fileID.String(), "error", err)
return fmt.Errorf("failed to create rclone filesystem: %w", err)
}
filePath := r.getFilePath(fileID.String())
logger.Debug("Uploading file via rclone", "fileId", fileID.String(), "filePath", filePath)
_, err = operations.Rcat(ctx, remoteFs, filePath, io.NopCloser(file), time.Now().UTC(), nil)
if err != nil {
select {
case <-ctx.Done():
logger.Info("Rclone upload cancelled", "fileId", fileID.String())
return ctx.Err()
default:
logger.Error(
"Failed to upload file via rclone",
"fileId",
fileID.String(),
"error",
err,
)
return fmt.Errorf("failed to upload file via rclone: %w", err)
}
}
logger.Info(
"Successfully saved file to rclone storage",
"fileId",
fileID.String(),
"filePath",
filePath,
)
return nil
}
func (r *RcloneStorage) GetFile(
encryptor encryption.FieldEncryptor,
fileID uuid.UUID,
) (io.ReadCloser, error) {
ctx := context.Background()
remoteFs, err := r.getFs(ctx, encryptor)
if err != nil {
return nil, fmt.Errorf("failed to create rclone filesystem: %w", err)
}
filePath := r.getFilePath(fileID.String())
obj, err := remoteFs.NewObject(ctx, filePath)
if err != nil {
return nil, fmt.Errorf("failed to get object from rclone: %w", err)
}
reader, err := obj.Open(ctx)
if err != nil {
return nil, fmt.Errorf("failed to open object from rclone: %w", err)
}
return reader, nil
}
func (r *RcloneStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error {
ctx := context.Background()
remoteFs, err := r.getFs(ctx, encryptor)
if err != nil {
return fmt.Errorf("failed to create rclone filesystem: %w", err)
}
filePath := r.getFilePath(fileID.String())
obj, err := remoteFs.NewObject(ctx, filePath)
if err != nil {
return nil
}
err = obj.Remove(ctx)
if err != nil {
return fmt.Errorf("failed to delete file from rclone: %w", err)
}
return nil
}
func (r *RcloneStorage) Validate(encryptor encryption.FieldEncryptor) error {
if r.ConfigContent == "" {
return errors.New("rclone config content is required")
}
return nil
}
func (r *RcloneStorage) TestConnection(encryptor encryption.FieldEncryptor) error {
ctx, cancel := context.WithTimeout(context.Background(), rcloneOperationTimeout)
defer cancel()
remoteFs, err := r.getFs(ctx, encryptor)
if err != nil {
return fmt.Errorf("failed to create rclone filesystem: %w", err)
}
testFileID := uuid.New().String() + "-test"
testFilePath := r.getFilePath(testFileID)
testData := strings.NewReader("test connection")
_, err = operations.Rcat(
ctx,
remoteFs,
testFilePath,
io.NopCloser(testData),
time.Now().UTC(),
nil,
)
if err != nil {
return fmt.Errorf("failed to upload test file via rclone: %w", err)
}
obj, err := remoteFs.NewObject(ctx, testFilePath)
if err != nil {
return fmt.Errorf("failed to get test file from rclone: %w", err)
}
err = obj.Remove(ctx)
if err != nil {
return fmt.Errorf("failed to delete test file from rclone: %w", err)
}
return nil
}
func (r *RcloneStorage) HideSensitiveData() {
r.ConfigContent = ""
}
func (r *RcloneStorage) EncryptSensitiveData(encryptor encryption.FieldEncryptor) error {
if r.ConfigContent != "" {
encrypted, err := encryptor.Encrypt(r.StorageID, r.ConfigContent)
if err != nil {
return fmt.Errorf("failed to encrypt rclone config content: %w", err)
}
r.ConfigContent = encrypted
}
return nil
}
func (r *RcloneStorage) Update(incoming *RcloneStorage) {
r.RemotePath = incoming.RemotePath
if incoming.ConfigContent != "" {
r.ConfigContent = incoming.ConfigContent
}
}
func (r *RcloneStorage) getFs(
ctx context.Context,
encryptor encryption.FieldEncryptor,
) (fs.Fs, error) {
configContent, err := encryptor.Decrypt(r.StorageID, r.ConfigContent)
if err != nil {
return nil, fmt.Errorf("failed to decrypt rclone config content: %w", err)
}
rcloneConfigMu.Lock()
defer rcloneConfigMu.Unlock()
parsedConfig, err := parseConfigContent(configContent)
if err != nil {
return nil, fmt.Errorf("failed to parse rclone config: %w", err)
}
if len(parsedConfig) == 0 {
return nil, errors.New("rclone config must contain at least one remote section")
}
var remoteName string
for section, values := range parsedConfig {
remoteName = section
for key, value := range values {
config.FileSetValue(section, key, value)
}
}
remotePath := remoteName + ":"
if r.RemotePath != "" {
remotePath = remoteName + ":" + strings.TrimPrefix(r.RemotePath, "/")
}
remoteFs, err := fs.NewFs(ctx, remotePath)
if err != nil {
return nil, fmt.Errorf(
"failed to create rclone filesystem for remote '%s': %w",
remoteName,
err,
)
}
return remoteFs, nil
}
func (r *RcloneStorage) getFilePath(filename string) string {
return filename
}
func parseConfigContent(content string) (map[string]map[string]string, error) {
sections := make(map[string]map[string]string)
var currentSection string
scanner := bufio.NewScanner(strings.NewReader(content))
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" || strings.HasPrefix(line, "#") || strings.HasPrefix(line, ";") {
continue
}
if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
currentSection = strings.TrimPrefix(strings.TrimSuffix(line, "]"), "[")
if sections[currentSection] == nil {
sections[currentSection] = make(map[string]string)
}
continue
}
if currentSection != "" && strings.Contains(line, "=") {
parts := strings.SplitN(line, "=", 2)
key := strings.TrimSpace(parts[0])
value := ""
if len(parts) > 1 {
value = strings.TrimSpace(parts[1])
}
sections[currentSection][key] = value
}
}
return sections, scanner.Err()
}

View File

@@ -0,0 +1,430 @@
package sftp_storage
import (
"context"
"errors"
"fmt"
"io"
"log/slog"
"net"
"postgresus-backend/internal/util/encryption"
"strings"
"time"
"github.com/google/uuid"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
const (
sftpConnectTimeout = 30 * time.Second
sftpTestConnectTimeout = 10 * time.Second
)
type SFTPStorage struct {
StorageID uuid.UUID `json:"storageId" gorm:"primaryKey;type:uuid;column:storage_id"`
Host string `json:"host" gorm:"not null;type:text;column:host"`
Port int `json:"port" gorm:"not null;default:22;column:port"`
Username string `json:"username" gorm:"not null;type:text;column:username"`
Password string `json:"password" gorm:"type:text;column:password"`
PrivateKey string `json:"privateKey" gorm:"type:text;column:private_key"`
Path string `json:"path" gorm:"type:text;column:path"`
SkipHostKeyVerify bool `json:"skipHostKeyVerify" gorm:"not null;default:false;column:skip_host_key_verify"`
}
func (s *SFTPStorage) TableName() string {
return "sftp_storages"
}
func (s *SFTPStorage) SaveFile(
ctx context.Context,
encryptor encryption.FieldEncryptor,
logger *slog.Logger,
fileID uuid.UUID,
file io.Reader,
) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
logger.Info("Starting to save file to SFTP storage", "fileId", fileID.String(), "host", s.Host)
client, sshConn, err := s.connect(encryptor, sftpConnectTimeout)
if err != nil {
logger.Error("Failed to connect to SFTP", "fileId", fileID.String(), "error", err)
return fmt.Errorf("failed to connect to SFTP: %w", err)
}
defer func() {
if closeErr := client.Close(); closeErr != nil {
logger.Error(
"Failed to close SFTP client",
"fileId",
fileID.String(),
"error",
closeErr,
)
}
if closeErr := sshConn.Close(); closeErr != nil {
logger.Error(
"Failed to close SSH connection",
"fileId",
fileID.String(),
"error",
closeErr,
)
}
}()
if s.Path != "" {
if err := s.ensureDirectory(client, s.Path); err != nil {
logger.Error(
"Failed to ensure directory",
"fileId",
fileID.String(),
"path",
s.Path,
"error",
err,
)
return fmt.Errorf("failed to ensure directory: %w", err)
}
}
filePath := s.getFilePath(fileID.String())
logger.Debug("Uploading file to SFTP", "fileId", fileID.String(), "filePath", filePath)
remoteFile, err := client.Create(filePath)
if err != nil {
logger.Error("Failed to create remote file", "fileId", fileID.String(), "error", err)
return fmt.Errorf("failed to create remote file: %w", err)
}
defer func() {
_ = remoteFile.Close()
}()
ctxReader := &contextReader{ctx: ctx, reader: file}
_, err = io.Copy(remoteFile, ctxReader)
if err != nil {
select {
case <-ctx.Done():
logger.Info("SFTP upload cancelled", "fileId", fileID.String())
return ctx.Err()
default:
logger.Error("Failed to upload file to SFTP", "fileId", fileID.String(), "error", err)
return fmt.Errorf("failed to upload file to SFTP: %w", err)
}
}
logger.Info(
"Successfully saved file to SFTP storage",
"fileId",
fileID.String(),
"filePath",
filePath,
)
return nil
}
func (s *SFTPStorage) GetFile(
encryptor encryption.FieldEncryptor,
fileID uuid.UUID,
) (io.ReadCloser, error) {
client, sshConn, err := s.connect(encryptor, sftpConnectTimeout)
if err != nil {
return nil, fmt.Errorf("failed to connect to SFTP: %w", err)
}
filePath := s.getFilePath(fileID.String())
remoteFile, err := client.Open(filePath)
if err != nil {
_ = client.Close()
_ = sshConn.Close()
return nil, fmt.Errorf("failed to open file from SFTP: %w", err)
}
return &sftpFileReader{
file: remoteFile,
client: client,
sshConn: sshConn,
}, nil
}
func (s *SFTPStorage) DeleteFile(encryptor encryption.FieldEncryptor, fileID uuid.UUID) error {
client, sshConn, err := s.connect(encryptor, sftpConnectTimeout)
if err != nil {
return fmt.Errorf("failed to connect to SFTP: %w", err)
}
defer func() {
_ = client.Close()
_ = sshConn.Close()
}()
filePath := s.getFilePath(fileID.String())
_, err = client.Stat(filePath)
if err != nil {
return nil
}
err = client.Remove(filePath)
if err != nil {
return fmt.Errorf("failed to delete file from SFTP: %w", err)
}
return nil
}
func (s *SFTPStorage) Validate(encryptor encryption.FieldEncryptor) error {
if s.Host == "" {
return errors.New("SFTP host is required")
}
if s.Username == "" {
return errors.New("SFTP username is required")
}
if s.Password == "" && s.PrivateKey == "" {
return errors.New("SFTP password or private key is required")
}
if s.Port <= 0 || s.Port > 65535 {
return errors.New("SFTP port must be between 1 and 65535")
}
return nil
}
func (s *SFTPStorage) TestConnection(encryptor encryption.FieldEncryptor) error {
ctx, cancel := context.WithTimeout(context.Background(), sftpTestConnectTimeout)
defer cancel()
client, sshConn, err := s.connectWithContext(ctx, encryptor, sftpTestConnectTimeout)
if err != nil {
return fmt.Errorf("failed to connect to SFTP: %w", err)
}
defer func() {
_ = client.Close()
_ = sshConn.Close()
}()
if s.Path != "" {
if err := s.ensureDirectory(client, s.Path); err != nil {
return fmt.Errorf("failed to access or create path '%s': %w", s.Path, err)
}
}
return nil
}
func (s *SFTPStorage) HideSensitiveData() {
s.Password = ""
s.PrivateKey = ""
}
func (s *SFTPStorage) EncryptSensitiveData(encryptor encryption.FieldEncryptor) error {
if s.Password != "" {
encrypted, err := encryptor.Encrypt(s.StorageID, s.Password)
if err != nil {
return fmt.Errorf("failed to encrypt SFTP password: %w", err)
}
s.Password = encrypted
}
if s.PrivateKey != "" {
encrypted, err := encryptor.Encrypt(s.StorageID, s.PrivateKey)
if err != nil {
return fmt.Errorf("failed to encrypt SFTP private key: %w", err)
}
s.PrivateKey = encrypted
}
return nil
}
func (s *SFTPStorage) Update(incoming *SFTPStorage) {
s.Host = incoming.Host
s.Port = incoming.Port
s.Username = incoming.Username
s.SkipHostKeyVerify = incoming.SkipHostKeyVerify
s.Path = incoming.Path
if incoming.Password != "" {
s.Password = incoming.Password
}
if incoming.PrivateKey != "" {
s.PrivateKey = incoming.PrivateKey
}
}
func (s *SFTPStorage) connect(
encryptor encryption.FieldEncryptor,
timeout time.Duration,
) (*sftp.Client, *ssh.Client, error) {
return s.connectWithContext(context.Background(), encryptor, timeout)
}
func (s *SFTPStorage) connectWithContext(
ctx context.Context,
encryptor encryption.FieldEncryptor,
timeout time.Duration,
) (*sftp.Client, *ssh.Client, error) {
var authMethods []ssh.AuthMethod
if s.Password != "" {
password, err := encryptor.Decrypt(s.StorageID, s.Password)
if err != nil {
return nil, nil, fmt.Errorf("failed to decrypt SFTP password: %w", err)
}
authMethods = append(authMethods, ssh.Password(password))
}
if s.PrivateKey != "" {
privateKey, err := encryptor.Decrypt(s.StorageID, s.PrivateKey)
if err != nil {
return nil, nil, fmt.Errorf("failed to decrypt SFTP private key: %w", err)
}
signer, err := ssh.ParsePrivateKey([]byte(privateKey))
if err != nil {
return nil, nil, fmt.Errorf("failed to parse private key: %w", err)
}
authMethods = append(authMethods, ssh.PublicKeys(signer))
}
var hostKeyCallback ssh.HostKeyCallback
if s.SkipHostKeyVerify {
hostKeyCallback = ssh.InsecureIgnoreHostKey()
} else {
hostKeyCallback = ssh.InsecureIgnoreHostKey()
}
config := &ssh.ClientConfig{
User: s.Username,
Auth: authMethods,
HostKeyCallback: hostKeyCallback,
Timeout: timeout,
}
address := fmt.Sprintf("%s:%d", s.Host, s.Port)
dialer := net.Dialer{Timeout: timeout}
conn, err := dialer.DialContext(ctx, "tcp", address)
if err != nil {
return nil, nil, fmt.Errorf("failed to dial SFTP server: %w", err)
}
sshConn, chans, reqs, err := ssh.NewClientConn(conn, address, config)
if err != nil {
_ = conn.Close()
return nil, nil, fmt.Errorf("failed to create SSH connection: %w", err)
}
sshClient := ssh.NewClient(sshConn, chans, reqs)
sftpClient, err := sftp.NewClient(sshClient)
if err != nil {
_ = sshClient.Close()
return nil, nil, fmt.Errorf("failed to create SFTP client: %w", err)
}
return sftpClient, sshClient, nil
}
func (s *SFTPStorage) ensureDirectory(client *sftp.Client, path string) error {
path = strings.TrimPrefix(path, "/")
path = strings.TrimSuffix(path, "/")
if path == "" {
return nil
}
parts := strings.Split(path, "/")
currentPath := ""
for _, part := range parts {
if part == "" || part == "." {
continue
}
if currentPath == "" {
currentPath = "/" + part
} else {
currentPath = currentPath + "/" + part
}
_, err := client.Stat(currentPath)
if err != nil {
err = client.Mkdir(currentPath)
if err != nil {
return fmt.Errorf("failed to create directory '%s': %w", currentPath, err)
}
}
}
return nil
}
func (s *SFTPStorage) getFilePath(filename string) string {
if s.Path == "" {
return filename
}
path := strings.TrimPrefix(s.Path, "/")
path = strings.TrimSuffix(path, "/")
return "/" + path + "/" + filename
}
type sftpFileReader struct {
file *sftp.File
client *sftp.Client
sshConn *ssh.Client
}
func (r *sftpFileReader) Read(p []byte) (n int, err error) {
return r.file.Read(p)
}
func (r *sftpFileReader) Close() error {
var errs []error
if r.file != nil {
if err := r.file.Close(); err != nil {
errs = append(errs, fmt.Errorf("failed to close file: %w", err))
}
}
if r.client != nil {
if err := r.client.Close(); err != nil {
errs = append(errs, fmt.Errorf("failed to close SFTP client: %w", err))
}
}
if r.sshConn != nil {
if err := r.sshConn.Close(); err != nil {
errs = append(errs, fmt.Errorf("failed to close SSH connection: %w", err))
}
}
if len(errs) > 0 {
return errs[0]
}
return nil
}
type contextReader struct {
ctx context.Context
reader io.Reader
}
func (r *contextReader) Read(p []byte) (n int, err error) {
select {
case <-r.ctx.Done():
return 0, r.ctx.Err()
default:
return r.reader.Read(p)
}
}

View File

@@ -38,17 +38,25 @@ func (r *StorageRepository) Save(storage *Storage) (*Storage, error) {
if storage.FTPStorage != nil {
storage.FTPStorage.StorageID = storage.ID
}
case StorageTypeSFTP:
if storage.SFTPStorage != nil {
storage.SFTPStorage.StorageID = storage.ID
}
case StorageTypeRclone:
if storage.RcloneStorage != nil {
storage.RcloneStorage.StorageID = storage.ID
}
}
if storage.ID == uuid.Nil {
if err := tx.Create(storage).
Omit("LocalStorage", "S3Storage", "GoogleDriveStorage", "NASStorage", "AzureBlobStorage", "FTPStorage").
Omit("LocalStorage", "S3Storage", "GoogleDriveStorage", "NASStorage", "AzureBlobStorage", "FTPStorage", "SFTPStorage", "RcloneStorage").
Error; err != nil {
return err
}
} else {
if err := tx.Save(storage).
Omit("LocalStorage", "S3Storage", "GoogleDriveStorage", "NASStorage", "AzureBlobStorage", "FTPStorage").
Omit("LocalStorage", "S3Storage", "GoogleDriveStorage", "NASStorage", "AzureBlobStorage", "FTPStorage", "SFTPStorage", "RcloneStorage").
Error; err != nil {
return err
}
@@ -97,6 +105,20 @@ func (r *StorageRepository) Save(storage *Storage) (*Storage, error) {
return err
}
}
case StorageTypeSFTP:
if storage.SFTPStorage != nil {
storage.SFTPStorage.StorageID = storage.ID // Ensure ID is set
if err := tx.Save(storage.SFTPStorage).Error; err != nil {
return err
}
}
case StorageTypeRclone:
if storage.RcloneStorage != nil {
storage.RcloneStorage.StorageID = storage.ID // Ensure ID is set
if err := tx.Save(storage.RcloneStorage).Error; err != nil {
return err
}
}
}
return nil
@@ -120,6 +142,8 @@ func (r *StorageRepository) FindByID(id uuid.UUID) (*Storage, error) {
Preload("NASStorage").
Preload("AzureBlobStorage").
Preload("FTPStorage").
Preload("SFTPStorage").
Preload("RcloneStorage").
Where("id = ?", id).
First(&s).Error; err != nil {
return nil, err
@@ -139,6 +163,8 @@ func (r *StorageRepository) FindByWorkspaceID(workspaceID uuid.UUID) ([]*Storage
Preload("NASStorage").
Preload("AzureBlobStorage").
Preload("FTPStorage").
Preload("SFTPStorage").
Preload("RcloneStorage").
Where("workspace_id = ?", workspaceID).
Order("name ASC").
Find(&storages).Error; err != nil {
@@ -188,6 +214,18 @@ func (r *StorageRepository) Delete(s *Storage) error {
return err
}
}
case StorageTypeSFTP:
if s.SFTPStorage != nil {
if err := tx.Delete(s.SFTPStorage).Error; err != nil {
return err
}
}
case StorageTypeRclone:
if s.RcloneStorage != nil {
if err := tx.Delete(s.RcloneStorage).Error; err != nil {
return err
}
}
}
// Delete the main storage

View File

@@ -0,0 +1,675 @@
package tests
import (
"encoding/json"
"fmt"
"net/http"
"os"
"path/filepath"
"strconv"
"testing"
"time"
"github.com/gin-gonic/gin"
_ "github.com/go-sql-driver/mysql"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/assert"
"postgresus-backend/internal/config"
"postgresus-backend/internal/features/backups/backups"
backups_config "postgresus-backend/internal/features/backups/config"
"postgresus-backend/internal/features/databases"
mysqltypes "postgresus-backend/internal/features/databases/databases/mysql"
"postgresus-backend/internal/features/restores"
restores_enums "postgresus-backend/internal/features/restores/enums"
restores_models "postgresus-backend/internal/features/restores/models"
"postgresus-backend/internal/features/storages"
users_enums "postgresus-backend/internal/features/users/enums"
users_testing "postgresus-backend/internal/features/users/testing"
workspaces_testing "postgresus-backend/internal/features/workspaces/testing"
test_utils "postgresus-backend/internal/util/testing"
"postgresus-backend/internal/util/tools"
)
const dropMysqlTestTableQuery = `DROP TABLE IF EXISTS test_data`
const createMysqlTestTableQuery = `
CREATE TABLE test_data (
id INT AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(255) NOT NULL,
value INT NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
)`
const insertMysqlTestDataQuery = `
INSERT INTO test_data (name, value) VALUES
('test1', 100),
('test2', 200),
('test3', 300)`
type MysqlContainer struct {
Host string
Port int
Username string
Password string
Database string
Version tools.MysqlVersion
DB *sqlx.DB
}
type MysqlTestDataItem struct {
ID int `db:"id"`
Name string `db:"name"`
Value int `db:"value"`
CreatedAt time.Time `db:"created_at"`
}
func Test_BackupAndRestoreMysql_RestoreIsSuccessful(t *testing.T) {
env := config.GetEnv()
cases := []struct {
name string
version tools.MysqlVersion
port string
}{
{"MySQL 5.7", tools.MysqlVersion57, env.TestMysql57Port},
{"MySQL 8.0", tools.MysqlVersion80, env.TestMysql80Port},
{"MySQL 8.4", tools.MysqlVersion84, env.TestMysql84Port},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
testMysqlBackupRestoreForVersion(t, tc.version, tc.port)
})
}
}
func Test_BackupAndRestoreMysqlWithEncryption_RestoreIsSuccessful(t *testing.T) {
env := config.GetEnv()
cases := []struct {
name string
version tools.MysqlVersion
port string
}{
{"MySQL 5.7", tools.MysqlVersion57, env.TestMysql57Port},
{"MySQL 8.0", tools.MysqlVersion80, env.TestMysql80Port},
{"MySQL 8.4", tools.MysqlVersion84, env.TestMysql84Port},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
testMysqlBackupRestoreWithEncryptionForVersion(t, tc.version, tc.port)
})
}
}
func Test_BackupAndRestoreMysql_WithReadOnlyUser_RestoreIsSuccessful(t *testing.T) {
env := config.GetEnv()
cases := []struct {
name string
version tools.MysqlVersion
port string
}{
{"MySQL 5.7", tools.MysqlVersion57, env.TestMysql57Port},
{"MySQL 8.0", tools.MysqlVersion80, env.TestMysql80Port},
{"MySQL 8.4", tools.MysqlVersion84, env.TestMysql84Port},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
testMysqlBackupRestoreWithReadOnlyUserForVersion(t, tc.version, tc.port)
})
}
}
func testMysqlBackupRestoreForVersion(t *testing.T, mysqlVersion tools.MysqlVersion, port string) {
container, err := connectToMysqlContainer(mysqlVersion, port)
if err != nil {
t.Skipf("Skipping MySQL %s test: %v", mysqlVersion, err)
return
}
defer func() {
if container.DB != nil {
container.DB.Close()
}
}()
setupMysqlTestData(t, container.DB)
router := createTestRouter()
user := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("MySQL Test Workspace", user, router)
storage := storages.CreateTestStorage(workspace.ID)
database := createMysqlDatabaseViaAPI(
t, router, "MySQL Test Database", workspace.ID,
container.Host, container.Port,
container.Username, container.Password, container.Database,
container.Version,
user.Token,
)
enableBackupsViaAPI(
t, router, database.ID, storage.ID,
backups_config.BackupEncryptionNone, user.Token,
)
createBackupViaAPI(t, router, database.ID, user.Token)
backup := waitForBackupCompletion(t, router, database.ID, user.Token, 5*time.Minute)
assert.Equal(t, backups.BackupStatusCompleted, backup.Status)
newDBName := "restoreddb_mysql"
_, err = container.DB.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s;", newDBName))
assert.NoError(t, err)
_, err = container.DB.Exec(fmt.Sprintf("CREATE DATABASE %s;", newDBName))
assert.NoError(t, err)
newDSN := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true",
container.Username, container.Password, container.Host, container.Port, newDBName)
newDB, err := sqlx.Connect("mysql", newDSN)
assert.NoError(t, err)
defer newDB.Close()
createMysqlRestoreViaAPI(
t, router, backup.ID,
container.Host, container.Port,
container.Username, container.Password, newDBName,
container.Version,
user.Token,
)
restore := waitForMysqlRestoreCompletion(t, router, backup.ID, user.Token, 5*time.Minute)
assert.Equal(t, restores_enums.RestoreStatusCompleted, restore.Status)
var tableExists int
err = newDB.Get(
&tableExists,
"SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = ? AND table_name = 'test_data'",
newDBName,
)
assert.NoError(t, err)
assert.Equal(t, 1, tableExists, "Table 'test_data' should exist in restored database")
verifyMysqlDataIntegrity(t, container.DB, newDB)
err = os.Remove(filepath.Join(config.GetEnv().DataFolder, backup.ID.String()))
if err != nil {
t.Logf("Warning: Failed to delete backup file: %v", err)
}
test_utils.MakeDeleteRequest(
t,
router,
"/api/v1/databases/"+database.ID.String(),
"Bearer "+user.Token,
http.StatusNoContent,
)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}
func testMysqlBackupRestoreWithEncryptionForVersion(
t *testing.T,
mysqlVersion tools.MysqlVersion,
port string,
) {
container, err := connectToMysqlContainer(mysqlVersion, port)
if err != nil {
t.Skipf("Skipping MySQL %s test: %v", mysqlVersion, err)
return
}
defer func() {
if container.DB != nil {
container.DB.Close()
}
}()
setupMysqlTestData(t, container.DB)
router := createTestRouter()
user := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace(
"MySQL Encrypted Test Workspace",
user,
router,
)
storage := storages.CreateTestStorage(workspace.ID)
database := createMysqlDatabaseViaAPI(
t, router, "MySQL Encrypted Test Database", workspace.ID,
container.Host, container.Port,
container.Username, container.Password, container.Database,
container.Version,
user.Token,
)
enableBackupsViaAPI(
t, router, database.ID, storage.ID,
backups_config.BackupEncryptionEncrypted, user.Token,
)
createBackupViaAPI(t, router, database.ID, user.Token)
backup := waitForBackupCompletion(t, router, database.ID, user.Token, 5*time.Minute)
assert.Equal(t, backups.BackupStatusCompleted, backup.Status)
assert.Equal(t, backups_config.BackupEncryptionEncrypted, backup.Encryption)
newDBName := "restoreddb_mysql_encrypted"
_, err = container.DB.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s;", newDBName))
assert.NoError(t, err)
_, err = container.DB.Exec(fmt.Sprintf("CREATE DATABASE %s;", newDBName))
assert.NoError(t, err)
newDSN := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true",
container.Username, container.Password, container.Host, container.Port, newDBName)
newDB, err := sqlx.Connect("mysql", newDSN)
assert.NoError(t, err)
defer newDB.Close()
createMysqlRestoreViaAPI(
t, router, backup.ID,
container.Host, container.Port,
container.Username, container.Password, newDBName,
container.Version,
user.Token,
)
restore := waitForMysqlRestoreCompletion(t, router, backup.ID, user.Token, 5*time.Minute)
assert.Equal(t, restores_enums.RestoreStatusCompleted, restore.Status)
var tableExists int
err = newDB.Get(
&tableExists,
"SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = ? AND table_name = 'test_data'",
newDBName,
)
assert.NoError(t, err)
assert.Equal(t, 1, tableExists, "Table 'test_data' should exist in restored database")
verifyMysqlDataIntegrity(t, container.DB, newDB)
err = os.Remove(filepath.Join(config.GetEnv().DataFolder, backup.ID.String()))
if err != nil {
t.Logf("Warning: Failed to delete backup file: %v", err)
}
test_utils.MakeDeleteRequest(
t,
router,
"/api/v1/databases/"+database.ID.String(),
"Bearer "+user.Token,
http.StatusNoContent,
)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}
func testMysqlBackupRestoreWithReadOnlyUserForVersion(
t *testing.T,
mysqlVersion tools.MysqlVersion,
port string,
) {
container, err := connectToMysqlContainer(mysqlVersion, port)
if err != nil {
t.Skipf("Skipping MySQL %s test: %v", mysqlVersion, err)
return
}
defer func() {
if container.DB != nil {
container.DB.Close()
}
}()
setupMysqlTestData(t, container.DB)
router := createTestRouter()
user := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace(
"MySQL ReadOnly Test Workspace",
user,
router,
)
storage := storages.CreateTestStorage(workspace.ID)
database := createMysqlDatabaseViaAPI(
t, router, "MySQL ReadOnly Test Database", workspace.ID,
container.Host, container.Port,
container.Username, container.Password, container.Database,
container.Version,
user.Token,
)
readOnlyUser := createMysqlReadOnlyUserViaAPI(t, router, database.ID, user.Token)
assert.NotEmpty(t, readOnlyUser.Username)
assert.NotEmpty(t, readOnlyUser.Password)
updatedDatabase := updateMysqlDatabaseCredentialsViaAPI(
t, router, database,
readOnlyUser.Username, readOnlyUser.Password,
user.Token,
)
enableBackupsViaAPI(
t, router, updatedDatabase.ID, storage.ID,
backups_config.BackupEncryptionNone, user.Token,
)
createBackupViaAPI(t, router, updatedDatabase.ID, user.Token)
backup := waitForBackupCompletion(t, router, updatedDatabase.ID, user.Token, 5*time.Minute)
assert.Equal(t, backups.BackupStatusCompleted, backup.Status)
newDBName := "restoreddb_mysql_readonly"
_, err = container.DB.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s;", newDBName))
assert.NoError(t, err)
_, err = container.DB.Exec(fmt.Sprintf("CREATE DATABASE %s;", newDBName))
assert.NoError(t, err)
newDSN := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true",
container.Username, container.Password, container.Host, container.Port, newDBName)
newDB, err := sqlx.Connect("mysql", newDSN)
assert.NoError(t, err)
defer newDB.Close()
createMysqlRestoreViaAPI(
t, router, backup.ID,
container.Host, container.Port,
container.Username, container.Password, newDBName,
container.Version,
user.Token,
)
restore := waitForMysqlRestoreCompletion(t, router, backup.ID, user.Token, 5*time.Minute)
assert.Equal(t, restores_enums.RestoreStatusCompleted, restore.Status)
var tableExists int
err = newDB.Get(
&tableExists,
"SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = ? AND table_name = 'test_data'",
newDBName,
)
assert.NoError(t, err)
assert.Equal(t, 1, tableExists, "Table 'test_data' should exist in restored database")
verifyMysqlDataIntegrity(t, container.DB, newDB)
err = os.Remove(filepath.Join(config.GetEnv().DataFolder, backup.ID.String()))
if err != nil {
t.Logf("Warning: Failed to delete backup file: %v", err)
}
test_utils.MakeDeleteRequest(
t,
router,
"/api/v1/databases/"+updatedDatabase.ID.String(),
"Bearer "+user.Token,
http.StatusNoContent,
)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}
func createMysqlDatabaseViaAPI(
t *testing.T,
router *gin.Engine,
name string,
workspaceID uuid.UUID,
host string,
port int,
username string,
password string,
database string,
version tools.MysqlVersion,
token string,
) *databases.Database {
request := databases.Database{
Name: name,
WorkspaceID: &workspaceID,
Type: databases.DatabaseTypeMysql,
Mysql: &mysqltypes.MysqlDatabase{
Host: host,
Port: port,
Username: username,
Password: password,
Database: &database,
Version: version,
},
}
w := workspaces_testing.MakeAPIRequest(
router,
"POST",
"/api/v1/databases/create",
"Bearer "+token,
request,
)
if w.Code != http.StatusCreated {
t.Fatalf("Failed to create MySQL database. Status: %d, Body: %s", w.Code, w.Body.String())
}
var createdDatabase databases.Database
if err := json.Unmarshal(w.Body.Bytes(), &createdDatabase); err != nil {
t.Fatalf("Failed to unmarshal database response: %v", err)
}
return &createdDatabase
}
func createMysqlRestoreViaAPI(
t *testing.T,
router *gin.Engine,
backupID uuid.UUID,
host string,
port int,
username string,
password string,
database string,
version tools.MysqlVersion,
token string,
) {
request := restores.RestoreBackupRequest{
MysqlDatabase: &mysqltypes.MysqlDatabase{
Host: host,
Port: port,
Username: username,
Password: password,
Database: &database,
Version: version,
},
}
test_utils.MakePostRequest(
t,
router,
fmt.Sprintf("/api/v1/restores/%s/restore", backupID.String()),
"Bearer "+token,
request,
http.StatusOK,
)
}
func waitForMysqlRestoreCompletion(
t *testing.T,
router *gin.Engine,
backupID uuid.UUID,
token string,
timeout time.Duration,
) *restores_models.Restore {
startTime := time.Now()
pollInterval := 500 * time.Millisecond
for {
if time.Since(startTime) > timeout {
t.Fatalf("Timeout waiting for MySQL restore completion after %v", timeout)
}
var restoresList []*restores_models.Restore
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf("/api/v1/restores/%s", backupID.String()),
"Bearer "+token,
http.StatusOK,
&restoresList,
)
for _, restore := range restoresList {
if restore.Status == restores_enums.RestoreStatusCompleted {
return restore
}
if restore.Status == restores_enums.RestoreStatusFailed {
failMsg := "unknown error"
if restore.FailMessage != nil {
failMsg = *restore.FailMessage
}
t.Fatalf("MySQL restore failed: %s", failMsg)
}
}
time.Sleep(pollInterval)
}
}
func verifyMysqlDataIntegrity(t *testing.T, originalDB *sqlx.DB, restoredDB *sqlx.DB) {
var originalData []MysqlTestDataItem
var restoredData []MysqlTestDataItem
err := originalDB.Select(
&originalData,
"SELECT id, name, value, created_at FROM test_data ORDER BY id",
)
assert.NoError(t, err)
err = restoredDB.Select(
&restoredData,
"SELECT id, name, value, created_at FROM test_data ORDER BY id",
)
assert.NoError(t, err)
assert.Equal(t, len(originalData), len(restoredData), "Should have same number of rows")
if len(originalData) > 0 && len(restoredData) > 0 {
for i := range originalData {
assert.Equal(t, originalData[i].ID, restoredData[i].ID, "ID should match")
assert.Equal(t, originalData[i].Name, restoredData[i].Name, "Name should match")
assert.Equal(t, originalData[i].Value, restoredData[i].Value, "Value should match")
}
}
}
func connectToMysqlContainer(version tools.MysqlVersion, port string) (*MysqlContainer, error) {
if port == "" {
return nil, fmt.Errorf("MySQL %s port not configured", version)
}
dbName := "testdb"
password := "rootpassword"
username := "root"
host := "127.0.0.1"
portInt, err := strconv.Atoi(port)
if err != nil {
return nil, fmt.Errorf("failed to parse port: %w", err)
}
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true",
username, password, host, portInt, dbName)
db, err := sqlx.Connect("mysql", dsn)
if err != nil {
return nil, fmt.Errorf("failed to connect to MySQL database: %w", err)
}
return &MysqlContainer{
Host: host,
Port: portInt,
Username: username,
Password: password,
Database: dbName,
Version: version,
DB: db,
}, nil
}
func setupMysqlTestData(t *testing.T, db *sqlx.DB) {
_, err := db.Exec(dropMysqlTestTableQuery)
assert.NoError(t, err)
_, err = db.Exec(createMysqlTestTableQuery)
assert.NoError(t, err)
_, err = db.Exec(insertMysqlTestDataQuery)
assert.NoError(t, err)
}
func createMysqlReadOnlyUserViaAPI(
t *testing.T,
router *gin.Engine,
databaseID uuid.UUID,
token string,
) *databases.CreateReadOnlyUserResponse {
var database databases.Database
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
fmt.Sprintf("/api/v1/databases/%s", databaseID.String()),
"Bearer "+token,
http.StatusOK,
&database,
)
var response databases.CreateReadOnlyUserResponse
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/databases/create-readonly-user",
"Bearer "+token,
database,
http.StatusOK,
&response,
)
return &response
}
func updateMysqlDatabaseCredentialsViaAPI(
t *testing.T,
router *gin.Engine,
database *databases.Database,
username string,
password string,
token string,
) *databases.Database {
database.Mysql.Username = username
database.Mysql.Password = password
w := workspaces_testing.MakeAPIRequest(
router,
"POST",
"/api/v1/databases/update",
"Bearer "+token,
database,
)
if w.Code != http.StatusOK {
t.Fatalf("Failed to update MySQL database. Status: %d, Body: %s", w.Code, w.Body.String())
}
var updatedDatabase databases.Database
if err := json.Unmarshal(w.Body.Bytes(), &updatedDatabase); err != nil {
t.Fatalf("Failed to unmarshal database response: %v", err)
}
return &updatedDatabase
}

View File

@@ -0,0 +1,214 @@
package tools
import (
"fmt"
"log/slog"
"os"
"path/filepath"
"runtime"
"strings"
env_utils "postgresus-backend/internal/util/env"
)
type MysqlVersion string
const (
MysqlVersion57 MysqlVersion = "5.7"
MysqlVersion80 MysqlVersion = "8.0"
MysqlVersion84 MysqlVersion = "8.4"
)
type MysqlExecutable string
const (
MysqlExecutableMysqldump MysqlExecutable = "mysqldump"
MysqlExecutableMysql MysqlExecutable = "mysql"
)
// GetMysqlExecutable returns the full path to a specific MySQL executable
// for the given version. Common executables include: mysqldump, mysql.
// On Windows, automatically appends .exe extension.
func GetMysqlExecutable(
version MysqlVersion,
executable MysqlExecutable,
envMode env_utils.EnvMode,
mysqlInstallDir string,
) string {
basePath := getMysqlBasePath(version, envMode, mysqlInstallDir)
executableName := string(executable)
if runtime.GOOS == "windows" {
executableName += ".exe"
}
return filepath.Join(basePath, executableName)
}
// VerifyMysqlInstallation verifies that MySQL versions 5.7, 8.0, 8.4 are installed
// in the current environment. Each version should be installed with the required
// client tools (mysqldump, mysql) available.
// In development: ./tools/mysql/mysql-{VERSION}/bin
// In production: /usr/local/mysql-{VERSION}/bin
func VerifyMysqlInstallation(
logger *slog.Logger,
envMode env_utils.EnvMode,
mysqlInstallDir string,
) {
versions := []MysqlVersion{
MysqlVersion57,
MysqlVersion80,
MysqlVersion84,
}
requiredCommands := []MysqlExecutable{
MysqlExecutableMysqldump,
MysqlExecutableMysql,
}
for _, version := range versions {
binDir := getMysqlBasePath(version, envMode, mysqlInstallDir)
logger.Info(
"Verifying MySQL installation",
"version",
string(version),
"path",
binDir,
)
if _, err := os.Stat(binDir); os.IsNotExist(err) {
if envMode == env_utils.EnvModeDevelopment {
logger.Warn(
"MySQL bin directory not found. MySQL support will be disabled. Read ./tools/readme.md for details",
"version",
string(version),
"path",
binDir,
)
} else {
logger.Warn(
"MySQL bin directory not found. MySQL support will be disabled.",
"version",
string(version),
"path",
binDir,
)
}
continue
}
for _, cmd := range requiredCommands {
cmdPath := GetMysqlExecutable(
version,
cmd,
envMode,
mysqlInstallDir,
)
logger.Info(
"Checking for MySQL command",
"command",
cmd,
"version",
string(version),
"path",
cmdPath,
)
if _, err := os.Stat(cmdPath); os.IsNotExist(err) {
if envMode == env_utils.EnvModeDevelopment {
logger.Warn(
"MySQL command not found. MySQL support for this version will be disabled. Read ./tools/readme.md for details",
"command",
cmd,
"version",
string(version),
"path",
cmdPath,
)
} else {
logger.Warn(
"MySQL command not found. MySQL support for this version will be disabled.",
"command",
cmd,
"version",
string(version),
"path",
cmdPath,
)
}
continue
}
logger.Info(
"MySQL command found",
"command",
cmd,
"version",
string(version),
)
}
logger.Info(
"Installation of MySQL verified",
"version",
string(version),
"path",
binDir,
)
}
logger.Info("MySQL version-specific client tools verification completed!")
}
// IsMysqlBackupVersionHigherThanRestoreVersion checks if backup was made with
// a newer MySQL version than the restore target
func IsMysqlBackupVersionHigherThanRestoreVersion(
backupVersion, restoreVersion MysqlVersion,
) bool {
versionOrder := map[MysqlVersion]int{
MysqlVersion57: 1,
MysqlVersion80: 2,
MysqlVersion84: 3,
}
return versionOrder[backupVersion] > versionOrder[restoreVersion]
}
// EscapeMysqlPassword escapes special characters for MySQL .my.cnf file format.
// In .my.cnf, passwords with special chars should be quoted.
// Escape backslash and quote characters.
func EscapeMysqlPassword(password string) string {
password = strings.ReplaceAll(password, "\\", "\\\\")
password = strings.ReplaceAll(password, "\"", "\\\"")
return password
}
// GetMysqlVersionEnum converts a version string to MysqlVersion enum
func GetMysqlVersionEnum(version string) MysqlVersion {
switch version {
case "5.7":
return MysqlVersion57
case "8.0":
return MysqlVersion80
case "8.4":
return MysqlVersion84
default:
panic(fmt.Sprintf("invalid mysql version: %s", version))
}
}
func getMysqlBasePath(
version MysqlVersion,
envMode env_utils.EnvMode,
mysqlInstallDir string,
) string {
if envMode == env_utils.EnvModeDevelopment {
return filepath.Join(
mysqlInstallDir,
fmt.Sprintf("mysql-%s", string(version)),
"bin",
)
}
return fmt.Sprintf("/usr/local/mysql-%s/bin", string(version))
}

View File

@@ -0,0 +1,23 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE rclone_storages (
storage_id UUID PRIMARY KEY,
config_content TEXT NOT NULL,
remote_path TEXT
);
ALTER TABLE rclone_storages
ADD CONSTRAINT fk_rclone_storages_storage
FOREIGN KEY (storage_id)
REFERENCES storages (id)
ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS rclone_storages;
-- +goose StatementEnd

View File

@@ -0,0 +1,28 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE sftp_storages (
storage_id UUID PRIMARY KEY,
host TEXT NOT NULL,
port INTEGER NOT NULL DEFAULT 22,
username TEXT NOT NULL,
password TEXT,
private_key TEXT,
path TEXT,
skip_host_key_verify BOOLEAN NOT NULL DEFAULT FALSE
);
ALTER TABLE sftp_storages
ADD CONSTRAINT fk_sftp_storages_storage
FOREIGN KEY (storage_id)
REFERENCES storages (id)
ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS sftp_storages;
-- +goose StatementEnd

View File

@@ -0,0 +1,5 @@
-- +goose Up
ALTER TABLE intervals ADD COLUMN cron_expression TEXT;
-- +goose Down
ALTER TABLE intervals DROP COLUMN cron_expression;

View File

@@ -0,0 +1,27 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE mysql_databases (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
database_id UUID REFERENCES databases(id) ON DELETE CASCADE,
version TEXT NOT NULL,
host TEXT NOT NULL,
port INT NOT NULL,
username TEXT NOT NULL,
password TEXT NOT NULL,
database TEXT,
is_https BOOLEAN NOT NULL DEFAULT FALSE
);
-- +goose StatementEnd
-- +goose StatementBegin
CREATE INDEX idx_mysql_databases_database_id ON mysql_databases(database_id);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP INDEX IF EXISTS idx_mysql_databases_database_id;
-- +goose StatementEnd
-- +goose StatementBegin
DROP TABLE IF EXISTS mysql_databases;
-- +goose StatementEnd

View File

@@ -1,2 +1,3 @@
postgresql
mysql
downloads

View File

@@ -5,7 +5,7 @@ set -e # Exit on any error
# Ensure non-interactive mode for apt
export DEBIAN_FRONTEND=noninteractive
echo "Installing PostgreSQL client tools versions 12-18 for Linux (Debian/Ubuntu)..."
echo "Installing PostgreSQL and MySQL client tools for Linux (Debian/Ubuntu)..."
echo
# Check if running on supported system
@@ -22,19 +22,27 @@ else
echo "This script requires sudo privileges to install packages."
fi
# Create postgresql directory
# Create directories
mkdir -p postgresql
mkdir -p mysql
# Get absolute path
# Get absolute paths
POSTGRES_DIR="$(pwd)/postgresql"
MYSQL_DIR="$(pwd)/mysql"
echo "Installing PostgreSQL client tools to: $POSTGRES_DIR"
echo "Installing MySQL client tools to: $MYSQL_DIR"
echo
# ========== PostgreSQL Installation ==========
echo "========================================"
echo "Installing PostgreSQL client tools (versions 12-18)..."
echo "========================================"
# Add PostgreSQL official APT repository
echo "Adding PostgreSQL official APT repository..."
$SUDO apt-get update -qq -y
$SUDO apt-get install -y -qq wget ca-certificates
$SUDO apt-get install -y -qq wget ca-certificates gnupg lsb-release
# Add GPG key
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | $SUDO apt-key add - 2>/dev/null
@@ -46,10 +54,10 @@ echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main"
echo "Updating package list..."
$SUDO apt-get update -qq -y
# Install client tools for each version
versions="12 13 14 15 16 17 18"
# Install PostgreSQL client tools for each version
pg_versions="12 13 14 15 16 17 18"
for version in $versions; do
for version in $pg_versions; do
echo "Installing PostgreSQL $version client tools..."
# Install client tools only
@@ -85,22 +93,100 @@ for version in $versions; do
echo
done
# ========== MySQL Installation ==========
echo "========================================"
echo "Installing MySQL client tools (versions 5.7, 8.0, 8.4)..."
echo "========================================"
# Download and extract MySQL client tools
mysql_versions="5.7 8.0 8.4"
for version in $mysql_versions; do
echo "Installing MySQL $version client tools..."
version_dir="$MYSQL_DIR/mysql-$version"
mkdir -p "$version_dir/bin"
# Download MySQL client tools from official CDN
# Note: 5.7 is in Downloads, 8.0 and 8.4 specific versions are in archives
case $version in
"5.7")
MYSQL_URL="https://cdn.mysql.com/Downloads/MySQL-5.7/mysql-5.7.44-linux-glibc2.12-x86_64.tar.gz"
;;
"8.0")
MYSQL_URL="https://cdn.mysql.com/archives/mysql-8.0/mysql-8.0.40-linux-glibc2.17-x86_64-minimal.tar.xz"
;;
"8.4")
MYSQL_URL="https://cdn.mysql.com/archives/mysql-8.4/mysql-8.4.3-linux-glibc2.17-x86_64-minimal.tar.xz"
;;
esac
TEMP_DIR="/tmp/mysql_install_$version"
mkdir -p "$TEMP_DIR"
cd "$TEMP_DIR"
echo " Downloading MySQL $version..."
wget -q "$MYSQL_URL" -O "mysql-$version.tar.gz" || wget -q "$MYSQL_URL" -O "mysql-$version.tar.xz"
echo " Extracting MySQL $version..."
if [[ "$MYSQL_URL" == *.xz ]]; then
tar -xJf "mysql-$version.tar.xz" 2>/dev/null || tar -xJf "mysql-$version.tar.gz" 2>/dev/null
else
tar -xzf "mysql-$version.tar.gz" 2>/dev/null || tar -xzf "mysql-$version.tar.xz" 2>/dev/null
fi
# Find extracted directory
EXTRACTED_DIR=$(ls -d mysql-*/ 2>/dev/null | head -1)
if [ -d "$EXTRACTED_DIR" ] && [ -f "$EXTRACTED_DIR/bin/mysqldump" ]; then
# Copy client binaries
cp "$EXTRACTED_DIR/bin/mysql" "$version_dir/bin/" 2>/dev/null || true
cp "$EXTRACTED_DIR/bin/mysqldump" "$version_dir/bin/" 2>/dev/null || true
chmod +x "$version_dir/bin/"*
echo " MySQL $version client tools installed successfully"
else
echo " Warning: Could not extract MySQL $version binaries"
echo " You may need to install MySQL $version client tools manually"
fi
# Cleanup
cd - >/dev/null
rm -rf "$TEMP_DIR"
echo
done
echo "========================================"
echo "Installation completed!"
echo "========================================"
echo
echo "PostgreSQL client tools are available in: $POSTGRES_DIR"
echo "MySQL client tools are available in: $MYSQL_DIR"
echo
# List installed versions
# List installed PostgreSQL versions
echo "Installed PostgreSQL client versions:"
for version in $versions; do
for version in $pg_versions; do
version_dir="$POSTGRES_DIR/postgresql-$version"
if [ -f "$version_dir/bin/pg_dump" ]; then
echo " postgresql-$version: $version_dir/bin/"
# Verify the correct version
version_output=$("$version_dir/bin/pg_dump" --version 2>/dev/null | grep -o "pg_dump (PostgreSQL) [0-9]\+\.[0-9]\+")
echo " Version check: $version_output"
fi
done
echo
echo "Usage example:"
echo " $POSTGRES_DIR/postgresql-15/bin/pg_dump --version"
echo "Installed MySQL client versions:"
for version in $mysql_versions; do
version_dir="$MYSQL_DIR/mysql-$version"
if [ -f "$version_dir/bin/mysqldump" ]; then
echo " mysql-$version: $version_dir/bin/"
version_output=$("$version_dir/bin/mysqldump" --version 2>/dev/null | head -1)
echo " Version check: $version_output"
fi
done
echo
echo "Usage examples:"
echo " $POSTGRES_DIR/postgresql-15/bin/pg_dump --version"
echo " $MYSQL_DIR/mysql-8.0/bin/mysqldump --version"

View File

@@ -2,7 +2,7 @@
set -e # Exit on any error
echo "Installing PostgreSQL client tools versions 12-18 for MacOS..."
echo "Installing PostgreSQL and MySQL client tools for MacOS..."
echo
# Check if Homebrew is installed
@@ -12,13 +12,16 @@ if ! command -v brew &> /dev/null; then
exit 1
fi
# Create postgresql directory
# Create directories
mkdir -p postgresql
mkdir -p mysql
# Get absolute path
# Get absolute paths
POSTGRES_DIR="$(pwd)/postgresql"
MYSQL_DIR="$(pwd)/mysql"
echo "Installing PostgreSQL client tools to: $POSTGRES_DIR"
echo "Installing MySQL client tools to: $MYSQL_DIR"
echo
# Update Homebrew
@@ -27,7 +30,12 @@ brew update
# Install build dependencies
echo "Installing build dependencies..."
brew install wget openssl readline zlib
brew install wget openssl readline zlib cmake
# ========== PostgreSQL Installation ==========
echo "========================================"
echo "Building PostgreSQL client tools (versions 12-18)..."
echo "========================================"
# PostgreSQL source URLs
declare -A PG_URLS=(
@@ -41,7 +49,7 @@ declare -A PG_URLS=(
)
# Create temporary build directory
BUILD_DIR="/tmp/postgresql_build_$$"
BUILD_DIR="/tmp/db_tools_build_$$"
mkdir -p "$BUILD_DIR"
echo "Using temporary build directory: $BUILD_DIR"
@@ -107,10 +115,10 @@ build_postgresql_client() {
echo
}
# Build each version
versions="12 13 14 15 16 17 18"
# Build each PostgreSQL version
pg_versions="12 13 14 15 16 17 18"
for version in $versions; do
for version in $pg_versions; do
url=${PG_URLS[$version]}
if [ -n "$url" ]; then
build_postgresql_client "$version" "$url"
@@ -119,17 +127,108 @@ for version in $versions; do
fi
done
# ========== MySQL Installation ==========
echo "========================================"
echo "Installing MySQL client tools (versions 5.7, 8.0, 8.4)..."
echo "========================================"
# Detect architecture
ARCH=$(uname -m)
if [ "$ARCH" = "arm64" ]; then
MYSQL_ARCH="arm64"
else
MYSQL_ARCH="x86_64"
fi
# MySQL download URLs for macOS (using CDN)
# Note: 5.7 is in Downloads, 8.0 and 8.4 specific versions are in archives
declare -A MYSQL_URLS=(
["5.7"]="https://cdn.mysql.com/Downloads/MySQL-5.7/mysql-5.7.44-macos10.14-x86_64.tar.gz"
["8.0"]="https://cdn.mysql.com/archives/mysql-8.0/mysql-8.0.40-macos14-${MYSQL_ARCH}.tar.gz"
["8.4"]="https://cdn.mysql.com/archives/mysql-8.4/mysql-8.4.3-macos14-${MYSQL_ARCH}.tar.gz"
)
# Function to install MySQL client tools
install_mysql_client() {
local version=$1
local url=$2
local version_dir="$MYSQL_DIR/mysql-$version"
echo "Installing MySQL $version client tools..."
# Skip if already exists
if [ -f "$version_dir/bin/mysqldump" ]; then
echo "MySQL $version already installed, skipping..."
return
fi
mkdir -p "$version_dir/bin"
cd "$BUILD_DIR"
# Download
echo " Downloading MySQL $version..."
wget -q "$url" -O "mysql-$version.tar.gz" || {
echo " Warning: Could not download MySQL $version for $MYSQL_ARCH"
echo " You may need to install MySQL $version client tools manually"
return
}
# Extract
echo " Extracting MySQL $version..."
tar -xzf "mysql-$version.tar.gz"
# Find extracted directory
EXTRACTED_DIR=$(ls -d mysql-*/ 2>/dev/null | head -1)
if [ -d "$EXTRACTED_DIR" ] && [ -f "$EXTRACTED_DIR/bin/mysqldump" ]; then
# Copy client binaries
cp "$EXTRACTED_DIR/bin/mysql" "$version_dir/bin/" 2>/dev/null || true
cp "$EXTRACTED_DIR/bin/mysqldump" "$version_dir/bin/" 2>/dev/null || true
chmod +x "$version_dir/bin/"*
echo " MySQL $version client tools installed successfully"
# Test the installation
local mysql_version=$("$version_dir/bin/mysqldump" --version 2>/dev/null | head -1)
echo " Verified: $mysql_version"
else
echo " Warning: Could not extract MySQL $version binaries"
echo " You may need to install MySQL $version client tools manually"
fi
# Clean up
rm -rf "mysql-$version.tar.gz" mysql-*/
echo
}
# Install each MySQL version
mysql_versions="5.7 8.0 8.4"
for version in $mysql_versions; do
url=${MYSQL_URLS[$version]}
if [ -n "$url" ]; then
install_mysql_client "$version" "$url"
else
echo "Warning: No URL defined for MySQL $version"
fi
done
# Clean up build directory
echo "Cleaning up build directory..."
rm -rf "$BUILD_DIR"
echo "========================================"
echo "Installation completed!"
echo "========================================"
echo
echo "PostgreSQL client tools are available in: $POSTGRES_DIR"
echo "MySQL client tools are available in: $MYSQL_DIR"
echo
# List installed versions
# List installed PostgreSQL versions
echo "Installed PostgreSQL client versions:"
for version in $versions; do
for version in $pg_versions; do
version_dir="$POSTGRES_DIR/postgresql-$version"
if [ -f "$version_dir/bin/pg_dump" ]; then
pg_version=$("$version_dir/bin/pg_dump" --version | cut -d' ' -f3)
@@ -138,8 +237,21 @@ for version in $versions; do
done
echo
echo "Usage example:"
echo " $POSTGRES_DIR/postgresql-15/bin/pg_dump --version"
echo "Installed MySQL client versions:"
for version in $mysql_versions; do
version_dir="$MYSQL_DIR/mysql-$version"
if [ -f "$version_dir/bin/mysqldump" ]; then
mysql_version=$("$version_dir/bin/mysqldump" --version 2>/dev/null | head -1)
echo " mysql-$version: $version_dir/bin/"
echo " $mysql_version"
fi
done
echo
echo "To add a specific version to your PATH temporarily:"
echo " export PATH=\"$POSTGRES_DIR/postgresql-15/bin:\$PATH\""
echo "Usage examples:"
echo " $POSTGRES_DIR/postgresql-15/bin/pg_dump --version"
echo " $MYSQL_DIR/mysql-8.0/bin/mysqldump --version"
echo
echo "To add specific versions to your PATH temporarily:"
echo " export PATH=\"$POSTGRES_DIR/postgresql-15/bin:\$PATH\""
echo " export PATH=\"$MYSQL_DIR/mysql-8.0/bin:\$PATH\""

View File

@@ -1,22 +1,34 @@
@echo off
setlocal enabledelayedexpansion
echo Downloading and installing PostgreSQL versions 12-18 for Windows...
echo Downloading and installing PostgreSQL and MySQL client tools for Windows...
echo.
:: Create downloads and postgresql directories if they don't exist
:: Create directories if they don't exist
if not exist "downloads" mkdir downloads
if not exist "postgresql" mkdir postgresql
if not exist "mysql" mkdir mysql
:: Get the absolute path to the postgresql directory
:: Get the absolute paths
set "POSTGRES_DIR=%cd%\postgresql"
set "MYSQL_DIR=%cd%\mysql"
echo PostgreSQL will be installed to: %POSTGRES_DIR%
echo MySQL will be installed to: %MYSQL_DIR%
echo.
cd downloads
:: ========== PostgreSQL Installation ==========
echo ========================================
echo Installing PostgreSQL client tools (versions 12-18)...
echo ========================================
echo.
:: PostgreSQL download URLs for Windows x64
set "BASE_URL=https://get.enterprisedb.com/postgresql"
:: Define versions and their corresponding download URLs
:: Define PostgreSQL versions and their corresponding download URLs
set "PG12_URL=%BASE_URL%/postgresql-12.20-1-windows-x64.exe"
set "PG13_URL=%BASE_URL%/postgresql-13.16-1-windows-x64.exe"
set "PG14_URL=%BASE_URL%/postgresql-14.13-1-windows-x64.exe"
@@ -25,11 +37,11 @@ set "PG16_URL=%BASE_URL%/postgresql-16.4-1-windows-x64.exe"
set "PG17_URL=%BASE_URL%/postgresql-17.0-1-windows-x64.exe"
set "PG18_URL=%BASE_URL%/postgresql-18.0-1-windows-x64.exe"
:: Array of versions
set "versions=12 13 14 15 16 17 18"
:: PostgreSQL versions
set "pg_versions=12 13 14 15 16 17 18"
:: Download and install each version
for %%v in (%versions%) do (
:: Download and install each PostgreSQL version
for %%v in (%pg_versions%) do (
echo Processing PostgreSQL %%v...
set "filename=postgresql-%%v-windows-x64.exe"
set "install_dir=%POSTGRES_DIR%\postgresql-%%v"
@@ -45,7 +57,7 @@ for %%v in (%versions%) do (
if !errorlevel! neq 0 (
echo Failed to download PostgreSQL %%v
goto :next_version
goto :next_pg_version
)
echo PostgreSQL %%v downloaded successfully
) else (
@@ -83,13 +95,132 @@ for %%v in (%versions%) do (
)
)
:next_version
:next_pg_version
echo.
)
:: ========== MySQL Installation ==========
echo ========================================
echo Installing MySQL client tools (versions 5.7, 8.0, 8.4)...
echo ========================================
echo.
:: MySQL download URLs for Windows x64 (ZIP archives) - using CDN
:: Note: 5.7 is in Downloads, 8.0 and 8.4 specific versions are in archives
set "MYSQL57_URL=https://cdn.mysql.com/Downloads/MySQL-5.7/mysql-5.7.44-winx64.zip"
set "MYSQL80_URL=https://cdn.mysql.com/archives/mysql-8.0/mysql-8.0.40-winx64.zip"
set "MYSQL84_URL=https://cdn.mysql.com/archives/mysql-8.4/mysql-8.4.3-winx64.zip"
:: MySQL versions
set "mysql_versions=5.7 8.0 8.4"
:: Download and install each MySQL version
for %%v in (%mysql_versions%) do (
echo Processing MySQL %%v...
set "version_underscore=%%v"
set "version_underscore=!version_underscore:.=!"
set "filename=mysql-%%v-winx64.zip"
set "install_dir=%MYSQL_DIR%\mysql-%%v"
:: Build the URL variable name and get its value
call set "current_url=%%MYSQL!version_underscore!_URL%%"
:: Check if already installed
if exist "!install_dir!\bin\mysqldump.exe" (
echo MySQL %%v already installed, skipping...
) else (
:: Download if not exists
if not exist "!filename!" (
echo Downloading MySQL %%v...
echo Downloading from: !current_url!
curl -L -o "!filename!" -A "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" "!current_url!"
if !errorlevel! neq 0 (
echo ERROR: Download request failed
goto :next_mysql_version
)
if not exist "!filename!" (
echo ERROR: Download failed - file not created
goto :next_mysql_version
)
for %%s in ("!filename!") do if %%~zs LSS 1000000 (
echo ERROR: Download failed - file too small, likely error page
del "!filename!" 2>nul
goto :next_mysql_version
)
echo MySQL %%v downloaded successfully
) else (
echo MySQL %%v already downloaded
)
:: Verify file exists before extraction
if not exist "!filename!" (
echo Download file not found, skipping extraction...
goto :next_mysql_version
)
:: Extract MySQL
echo Extracting MySQL %%v...
mkdir "!install_dir!" 2>nul
powershell -Command "Expand-Archive -Path '!filename!' -DestinationPath '!install_dir!_temp' -Force"
:: Move files from nested directory to install_dir
for /d %%d in ("!install_dir!_temp\mysql-*") do (
if exist "%%d\bin\mysqldump.exe" (
mkdir "!install_dir!\bin" 2>nul
copy "%%d\bin\mysql.exe" "!install_dir!\bin\" >nul 2>&1
copy "%%d\bin\mysqldump.exe" "!install_dir!\bin\" >nul 2>&1
)
)
:: Cleanup temp directory
rmdir /s /q "!install_dir!_temp" 2>nul
:: Verify installation
if exist "!install_dir!\bin\mysqldump.exe" (
echo MySQL %%v client tools installed successfully
) else (
echo Failed to install MySQL %%v - mysqldump.exe not found
)
)
:next_mysql_version
echo.
)
cd ..
echo.
echo ========================================
echo Installation process completed!
echo ========================================
echo.
echo PostgreSQL versions are installed in: %POSTGRES_DIR%
echo MySQL versions are installed in: %MYSQL_DIR%
echo.
:: List installed PostgreSQL versions
echo Installed PostgreSQL client versions:
for %%v in (%pg_versions%) do (
set "version_dir=%POSTGRES_DIR%\postgresql-%%v"
if exist "!version_dir!\bin\pg_dump.exe" (
echo postgresql-%%v: !version_dir!\bin\
)
)
echo.
echo Installed MySQL client versions:
for %%v in (%mysql_versions%) do (
set "version_dir=%MYSQL_DIR%\mysql-%%v"
if exist "!version_dir!\bin\mysqldump.exe" (
echo mysql-%%v: !version_dir!\bin\
)
)
echo.
echo Usage examples:
echo %POSTGRES_DIR%\postgresql-15\bin\pg_dump.exe --version
echo %MYSQL_DIR%\mysql-8.0\bin\mysqldump.exe --version
echo.
pause

View File

@@ -1,12 +1,14 @@
This directory is needed only for development and CI\CD.
We have to download and install all the PostgreSQL versions from 12 to 18 locally.
This is needed so we can call pg_dump, pg_dumpall, etc. on each version of the PostgreSQL database.
We have to download and install all the PostgreSQL versions from 12 to 18 and MySQL versions 5.7, 8.0, 8.4 locally.
This is needed so we can call pg_dump, pg_restore, mysqldump, mysql, etc. on each version of the database.
You do not need to install PostgreSQL fully with all the components.
We only need the client tools (pg_dump, pg_dumpall, psql, etc.) for each version.
You do not need to install the databases fully with all the components.
We only need the client tools for each version.
We have to install the following:
## Required Versions
### PostgreSQL
- PostgreSQL 12
- PostgreSQL 13
@@ -16,6 +18,12 @@ We have to install the following:
- PostgreSQL 17
- PostgreSQL 18
### MySQL
- MySQL 5.7
- MySQL 8.0
- MySQL 8.4
## Installation
Run the appropriate download script for your platform:
@@ -45,12 +53,14 @@ chmod +x download_macos.sh
### Windows
- Downloads official PostgreSQL installers from EnterpriseDB
- Downloads official MySQL ZIP archives from dev.mysql.com
- Installs client tools only (no server components)
- May require administrator privileges during installation
- May require administrator privileges during PostgreSQL installation
### Linux (Debian/Ubuntu)
- Uses the official PostgreSQL APT repository
- Downloads MySQL client tools from official archives
- Requires sudo privileges to install packages
- Creates symlinks in version-specific directories for consistency
@@ -58,17 +68,22 @@ chmod +x download_macos.sh
- Requires Homebrew to be installed
- Compiles PostgreSQL from source (client tools only)
- Takes longer than other platforms due to compilation
- Downloads pre-built MySQL binaries from dev.mysql.com
- Takes longer than other platforms due to PostgreSQL compilation
- Supports both Intel (x86_64) and Apple Silicon (arm64)
## Manual Installation
If something goes wrong with the automated scripts, install manually.
The final directory structure should match:
### PostgreSQL
```
./tools/postgresql/postgresql-{version}/bin/pg_dump
./tools/postgresql/postgresql-{version}/bin/pg_dumpall
./tools/postgresql/postgresql-{version}/bin/psql
./tools/postgresql/postgresql-{version}/bin/pg_restore
```
For example:
@@ -81,14 +96,69 @@ For example:
- `./tools/postgresql/postgresql-17/bin/pg_dump`
- `./tools/postgresql/postgresql-18/bin/pg_dump`
### MySQL
```
./tools/mysql/mysql-{version}/bin/mysqldump
./tools/mysql/mysql-{version}/bin/mysql
```
For example:
- `./tools/mysql/mysql-5.7/bin/mysqldump`
- `./tools/mysql/mysql-8.0/bin/mysqldump`
- `./tools/mysql/mysql-8.4/bin/mysqldump`
## Usage
After installation, you can use version-specific tools:
```bash
# Windows
# Windows - PostgreSQL
./postgresql/postgresql-15/bin/pg_dump.exe --version
# Linux/MacOS
# Windows - MySQL
./mysql/mysql-8.0/bin/mysqldump.exe --version
# Linux/MacOS - PostgreSQL
./postgresql/postgresql-15/bin/pg_dump --version
# Linux/MacOS - MySQL
./mysql/mysql-8.0/bin/mysqldump --version
```
## Environment Variables
The application expects these environment variables to be set (or uses defaults):
```env
# PostgreSQL tools directory (default: ./tools/postgresql)
POSTGRES_INSTALL_DIR=C:\path\to\tools\postgresql
# MySQL tools directory (default: ./tools/mysql)
MYSQL_INSTALL_DIR=C:\path\to\tools\mysql
```
## Troubleshooting
### MySQL 5.7 on Apple Silicon (M1/M2/M3)
MySQL 5.7 does not have native ARM64 binaries for macOS. The script will attempt to download the x86_64 version, which may work under Rosetta 2. If you encounter issues:
1. Ensure Rosetta 2 is installed: `softwareupdate --install-rosetta`
2. Or skip MySQL 5.7 if you don't need to support that version
### Permission Errors on Linux
If you encounter permission errors, ensure you have sudo privileges:
```bash
sudo ./download_linux.sh
```
### Download Failures
If downloads fail, you can manually download the files:
- PostgreSQL: https://www.postgresql.org/ftp/source/
- MySQL: https://dev.mysql.com/downloads/mysql/

View File

@@ -10,6 +10,7 @@
"dependencies": {
"@tailwindcss/vite": "^4.1.7",
"antd": "^5.25.1",
"cron-parser": "^5.4.0",
"dayjs": "^1.11.13",
"react": "^19.1.0",
"react-dom": "^19.1.0",
@@ -3138,6 +3139,18 @@
"toggle-selection": "^1.0.6"
}
},
"node_modules/cron-parser": {
"version": "5.4.0",
"resolved": "https://registry.npmjs.org/cron-parser/-/cron-parser-5.4.0.tgz",
"integrity": "sha512-HxYB8vTvnQFx4dLsZpGRa0uHp6X3qIzS3ZJgJ9v6l/5TJMgeWQbLkR5yiJ5hOxGbc9+jCADDnydIe15ReLZnJA==",
"license": "MIT",
"dependencies": {
"luxon": "^3.7.1"
},
"engines": {
"node": ">=18"
}
},
"node_modules/cross-spawn": {
"version": "7.0.6",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
@@ -5463,6 +5476,15 @@
"yallist": "^3.0.2"
}
},
"node_modules/luxon": {
"version": "3.7.2",
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.7.2.tgz",
"integrity": "sha512-vtEhXh/gNjI9Yg1u4jX/0YVPMvxzHuGgCm6tC5kZyb08yjGWGnqAjGJvcXbqQR2P3MyMEFnRbpcdFS6PBcLqew==",
"license": "MIT",
"engines": {
"node": ">=12"
}
},
"node_modules/magic-string": {
"version": "0.30.17",
"resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.17.tgz",

View File

@@ -15,6 +15,7 @@
"dependencies": {
"@tailwindcss/vite": "^4.1.7",
"antd": "^5.25.1",
"cron-parser": "^5.4.0",
"dayjs": "^1.11.13",
"react": "^19.1.0",
"react-dom": "^19.1.0",
@@ -24,12 +25,12 @@
"tailwindcss": "^4.1.7"
},
"devDependencies": {
"@vitest/coverage-v8": "^3.2.4",
"@eslint/js": "^9.25.0",
"@trivago/prettier-plugin-sort-imports": "^5.2.2",
"@types/react": "^19.1.2",
"@types/react-dom": "^19.1.2",
"@vitejs/plugin-react": "^4.4.1",
"@vitest/coverage-v8": "^3.2.4",
"eslint": "^9.25.0",
"eslint-plugin-react": "^7.37.5",
"eslint-plugin-react-hooks": "^5.2.0",

View File

@@ -0,0 +1,2 @@
<?xml version="1.0" encoding="utf-8"?><!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg width="800px" height="800px" viewBox="0 0 32 32" xmlns="http://www.w3.org/2000/svg"><title>file_type_mysql</title><path d="M8.785,6.865a3.055,3.055,0,0,0-.785.1V7h.038a6.461,6.461,0,0,0,.612.785c.154.306.288.611.441.917.019-.019.038-.039.038-.039a1.074,1.074,0,0,0,.4-.957,4.314,4.314,0,0,1-.23-.4c-.115-.191-.364-.287-.517-.44" style="fill:#5d87a1;fill-rule:evenodd"/><path d="M27.78,23.553a8.849,8.849,0,0,0-3.712.536c-.287.115-.745.115-.785.478.154.153.172.4.307.613a4.467,4.467,0,0,0,.995,1.167c.4.306.8.611,1.225.879.745.461,1.588.728,2.314,1.187.422.268.842.612,1.264.9.21.153.343.4.611.5v-.058a3.844,3.844,0,0,0-.291-.613c-.191-.19-.383-.363-.575-.554a9.118,9.118,0,0,0-1.99-1.932c-.613-.422-1.953-1-2.2-1.7l-.039-.039a7.69,7.69,0,0,0,1.321-.308c.65-.172,1.243-.133,1.912-.3.307-.077.862-.268.862-.268v-.3c-.342-.34-.587-.795-.947-1.116a25.338,25.338,0,0,0-3.122-2.328c-.587-.379-1.344-.623-1.969-.946-.226-.114-.6-.17-.737-.36a7.594,7.594,0,0,1-.776-1.457c-.548-1.04-1.079-2.193-1.551-3.293a20.236,20.236,0,0,0-.965-2.157A19.078,19.078,0,0,0,11.609,5a9.07,9.07,0,0,0-2.421-.776c-.474-.02-.946-.057-1.419-.075A7.55,7.55,0,0,1,6.9,3.485C5.818,2.8,3.038,1.328,2.242,3.277,1.732,4.508,3,5.718,3.435,6.343A8.866,8.866,0,0,1,4.4,7.762c.133.322.171.663.3,1A22.556,22.556,0,0,0,5.687,11.3a8.946,8.946,0,0,0,.7,1.172c.153.209.417.3.474.645a5.421,5.421,0,0,0-.436,1.419,8.336,8.336,0,0,0,.549,6.358c.3.473,1.022,1.514,1.987,1.116.851-.34.662-1.419.908-2.364.056-.229.019-.379.132-.53V19.3s.483,1.061.723,1.6a10.813,10.813,0,0,0,2.4,2.59A3.514,3.514,0,0,1,14,24.657V25h.427A1.054,1.054,0,0,0,14,24.212a9.4,9.4,0,0,1-.959-1.16,24.992,24.992,0,0,1-2.064-3.519c-.3-.6-.553-1.258-.793-1.857-.11-.231-.11-.58-.295-.7a7.266,7.266,0,0,0-.884,1.313,11.419,11.419,0,0,0-.517,2.921c-.073.02-.037,0-.073.038-.589-.155-.792-.792-1.014-1.332a8.756,8.756,0,0,1-.166-5.164c.128-.405.683-1.681.461-2.068-.111-.369-.48-.58-.682-.871a7.767,7.767,0,0,1-.663-1.237C5.912,9.5,5.69,8.3,5.212,7.216a10.4,10.4,0,0,0-.921-1.489A9.586,9.586,0,0,1,3.276,4.22c-.092-.213-.221-.561-.074-.793a.3.3,0,0,1,.259-.252c.238-.212.921.058,1.16.174a9.2,9.2,0,0,1,1.824.967c.258.194.866.685.866.685h.18c.612.133,1.3.037,1.876.21a12.247,12.247,0,0,1,2.755,1.32,16.981,16.981,0,0,1,5.969,6.545c.23.439.327.842.537,1.3.4.94.9,1.9,1.3,2.814a12.578,12.578,0,0,0,1.36,2.564c.286.4,1.435.612,1.952.822a13.7,13.7,0,0,1,1.32.535c.651.4,1.3.861,1.913,1.3.305.23,1.262.708,1.32,1.091" style="fill:#00758f;fill-rule:evenodd"/></svg>

After

Width:  |  Height:  |  Size: 2.5 KiB

View File

@@ -0,0 +1,102 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="64"
height="64"
viewBox="0 0 64 64"
version="1.1"
xml:space="preserve"
style="clip-rule:evenodd;fill-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:1.41420996"
id="svg50"
sodipodi:docname="rclone-icon.svg"
inkscape:version="0.92.4 (5da689c313, 2019-01-14)"><metadata
id="metadata56"><rdf:RDF><cc:Work
rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
id="defs54">
<clipPath
id="_clip1">
<rect
x="14"
y="579"
width="257"
height="84"
id="rect4" />
</clipPath>
</defs><sodipodi:namedview
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1"
objecttolerance="10"
gridtolerance="10"
guidetolerance="10"
inkscape:pageopacity="0"
inkscape:pageshadow="2"
inkscape:window-width="1531"
inkscape:window-height="807"
id="namedview52"
showgrid="false"
units="px"
inkscape:zoom="1.539823"
inkscape:cx="-84.425288"
inkscape:cy="26.5"
inkscape:window-x="70"
inkscape:window-y="27"
inkscape:window-maximized="0"
inkscape:current-layer="svg50" />
<g
id="g824"
transform="matrix(1.3422256,0,0,1.3422256,-2.2309418e-8,3.8420351)"><path
d="m 45.726917,21.83581 c -1.507672,-2.611426 -3.701518,-4.579735 -6.222732,-5.808561 -0.322585,1.72227 -0.932898,3.419936 -1.857594,5.021921 l -1.459147,2.532147 c 0.971853,0.539918 1.817954,1.334759 2.414598,2.368122 1.753027,3.035842 0.712146,6.919151 -2.324383,8.672176 -3.035847,1.753025 -6.919159,0.712829 -8.672186,-2.323698 l -2.944264,-5.091631 h -4.751283 l -2.375642,4.114312 2.946315,5.090948 c 4.025469,6.971776 12.939592,9.360401 19.911375,5.334937 6.971101,-4.024782 9.359727,-12.938896 5.334943,-19.910673"
style="fill:#70caf2;fill-rule:nonzero;stroke-width:0.68344086"
id="path7"
inkscape:connector-curvature="0" /><path
d="M 31.127807,0.45456543 C 24.156023,-3.5702158 15.2419,-1.1815912 11.217114,5.7895021 9.7087599,8.4009285 9.1018638,11.285048 9.2980112,14.083052 10.950572,13.501445 12.726153,13.180911 14.576228,13.180911 l 2.921711,-0.0027 c -0.01845,-1.111274 0.247406,-2.241684 0.843367,-3.2743635 1.75371,-3.036526 5.636339,-4.0774059 8.672868,-2.3236971 3.03653,1.7530242 4.076727,5.6356506 2.323701,8.6721766 l -2.936747,5.095732 2.374958,4.114995 4.751283,-6.83e-4 2.93538,-5.097099 C 40.488218,13.394145 38.099591,4.4793466 31.127807,0.45456543"
style="fill:#b4e3f9;fill-rule:nonzero;stroke-width:0.68344086"
id="path11"
inkscape:connector-curvature="0" /><path
d="m 19.297646,37.095505 -1.463932,-2.529413 c -0.9534,0.57204 -2.064675,0.906925 -3.25728,0.906925 -3.506736,0 -6.3491688,-2.842428 -6.3491688,-6.349162 0,-3.50605 2.8424328,-6.348479 6.3491688,-6.348479 l 5.881011,-0.0041 2.376326,-4.114312 -2.376326,-4.114312 -5.881695,0.0055 C 6.5254965,14.548074 1.6621211e-8,21.074248 1.6621211e-8,29.12381 1.6621211e-8,37.174056 6.5254965,43.70023 14.57575,43.70023 c 3.014659,0 5.814718,-0.915811 8.139101,-2.48294 -1.329976,-1.140662 -2.49251,-2.520528 -3.417205,-4.12183"
style="fill:#3f79ad;fill-rule:nonzero;stroke-width:0.68344086"
id="path15"
inkscape:connector-curvature="0" /></g>
</svg>

After

Width:  |  Height:  |  Size: 4.3 KiB

View File

@@ -0,0 +1,3 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg width="800px" height="800px" viewBox="0 0 1024 1024" class="icon" version="1.1" xmlns="http://www.w3.org/2000/svg"><path d="M853.333333 256H469.333333l-85.333333-85.333333H170.666667c-46.933333 0-85.333333 38.4-85.333334 85.333333v170.666667h853.333334v-85.333334c0-46.933333-38.4-85.333333-85.333334-85.333333z" fill="#FFA000" /><path d="M853.333333 256H170.666667c-46.933333 0-85.333333 38.4-85.333334 85.333333v426.666667c0 46.933333 38.4 85.333333 85.333334 85.333333h682.666666c46.933333 0 85.333333-38.4 85.333334-85.333333V341.333333c0-46.933333-38.4-85.333333-85.333334-85.333333z" fill="#FFCA28" /></svg>

After

Width:  |  Height:  |  Size: 741 B

View File

@@ -1,8 +1,11 @@
export { databaseApi } from './api/databaseApi';
export { type Database } from './model/Database';
export { DatabaseType } from './model/DatabaseType';
export { getDatabaseLogoFromType } from './model/getDatabaseLogoFromType';
export { Period } from './model/Period';
export { type PostgresqlDatabase } from './model/postgresql/PostgresqlDatabase';
export { PostgresqlVersion } from './model/postgresql/PostgresqlVersion';
export { type MysqlDatabase } from './model/mysql/MysqlDatabase';
export { MysqlVersion } from './model/mysql/MysqlVersion';
export { type IsReadOnlyResponse } from './model/IsReadOnlyResponse';
export { type CreateReadOnlyUserResponse } from './model/CreateReadOnlyUserResponse';

View File

@@ -1,6 +1,7 @@
import type { Notifier } from '../../notifiers';
import type { DatabaseType } from './DatabaseType';
import type { HealthStatus } from './HealthStatus';
import type { MysqlDatabase } from './mysql/MysqlDatabase';
import type { PostgresqlDatabase } from './postgresql/PostgresqlDatabase';
export interface Database {
@@ -10,6 +11,7 @@ export interface Database {
type: DatabaseType;
postgresql?: PostgresqlDatabase;
mysql?: MysqlDatabase;
notifiers: Notifier[];

View File

@@ -1,3 +1,4 @@
export enum DatabaseType {
POSTGRES = 'POSTGRES',
MYSQL = 'MYSQL',
}

View File

@@ -0,0 +1,12 @@
import { DatabaseType } from './DatabaseType';
export const getDatabaseLogoFromType = (type: DatabaseType) => {
switch (type) {
case DatabaseType.POSTGRES:
return '/icons/databases/postgresql.svg';
case DatabaseType.MYSQL:
return '/icons/databases/mysql.svg';
default:
return '';
}
};

View File

@@ -0,0 +1,484 @@
import { describe, expect, it } from 'vitest';
import {
MySqlConnectionStringParser,
type ParseError,
type ParseResult,
} from './MySqlConnectionStringParser';
describe('MySqlConnectionStringParser', () => {
// Helper to assert successful parse
const expectSuccess = (result: ParseResult | ParseError): ParseResult => {
expect('error' in result).toBe(false);
return result as ParseResult;
};
// Helper to assert parse error
const expectError = (result: ParseResult | ParseError): ParseError => {
expect('error' in result).toBe(true);
return result as ParseError;
};
describe('Standard MySQL URI (mysql://)', () => {
it('should parse basic mysql:// connection string', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse('mysql://myuser:mypassword@localhost:3306/mydb'),
);
expect(result.host).toBe('localhost');
expect(result.port).toBe(3306);
expect(result.username).toBe('myuser');
expect(result.password).toBe('mypassword');
expect(result.database).toBe('mydb');
expect(result.isHttps).toBe(false);
});
it('should default port to 3306 when not specified', () => {
const result = expectSuccess(MySqlConnectionStringParser.parse('mysql://user:pass@host/db'));
expect(result.port).toBe(3306);
});
it('should handle URL-encoded passwords', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse('mysql://user:p%40ss%23word@host:3306/db'),
);
expect(result.password).toBe('p@ss#word');
});
it('should handle URL-encoded usernames', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse('mysql://user%40domain:password@host:3306/db'),
);
expect(result.username).toBe('user@domain');
});
});
describe('AWS RDS Connection String', () => {
it('should parse AWS RDS connection string', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'mysql://rdsuser:rdspass@mydb.abc123xyz.us-east-1.rds.amazonaws.com:3306/mydb',
),
);
expect(result.host).toBe('mydb.abc123xyz.us-east-1.rds.amazonaws.com');
expect(result.port).toBe(3306);
expect(result.username).toBe('rdsuser');
});
});
describe('PlanetScale Connection String', () => {
it('should parse PlanetScale connection string with sslaccept', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'mysql://psuser:pspass@xxx.connect.psdb.cloud/mydb?sslaccept=strict',
),
);
expect(result.host).toBe('xxx.connect.psdb.cloud');
expect(result.username).toBe('psuser');
expect(result.database).toBe('mydb');
expect(result.isHttps).toBe(true);
});
});
describe('DigitalOcean Connection String', () => {
it('should parse DigitalOcean connection string with ssl-mode', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'mysql://doadmin:dopassword@db-mysql-nyc1-12345-do-user-123456-0.b.db.ondigitalocean.com:25060/defaultdb?ssl-mode=REQUIRED',
),
);
expect(result.host).toBe('db-mysql-nyc1-12345-do-user-123456-0.b.db.ondigitalocean.com');
expect(result.port).toBe(25060);
expect(result.username).toBe('doadmin');
expect(result.database).toBe('defaultdb');
expect(result.isHttps).toBe(true);
});
});
describe('Azure Database for MySQL Connection String', () => {
it('should parse Azure connection string with user@server format', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'mysql://myuser@myserver:mypassword@myserver.mysql.database.azure.com:3306/mydb?ssl-mode=REQUIRED',
),
);
expect(result.host).toBe('myserver.mysql.database.azure.com');
expect(result.port).toBe(3306);
expect(result.username).toBe('myuser');
expect(result.password).toBe('mypassword');
expect(result.database).toBe('mydb');
expect(result.isHttps).toBe(true);
});
});
describe('Railway Connection String', () => {
it('should parse Railway connection string', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'mysql://root:railwaypass@containers-us-west-123.railway.app:3306/railway',
),
);
expect(result.host).toBe('containers-us-west-123.railway.app');
expect(result.username).toBe('root');
expect(result.database).toBe('railway');
});
});
describe('JDBC Connection String', () => {
it('should parse JDBC connection string with user and password params', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'jdbc:mysql://localhost:3306/mydb?user=admin&password=secret',
),
);
expect(result.host).toBe('localhost');
expect(result.port).toBe(3306);
expect(result.username).toBe('admin');
expect(result.password).toBe('secret');
expect(result.database).toBe('mydb');
});
it('should parse JDBC connection string without port', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'jdbc:mysql://db.example.com/mydb?user=admin&password=secret',
),
);
expect(result.host).toBe('db.example.com');
expect(result.port).toBe(3306);
});
it('should parse JDBC with useSSL parameter', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'jdbc:mysql://host:3306/db?user=u&password=p&useSSL=true',
),
);
expect(result.isHttps).toBe(true);
});
it('should parse JDBC with sslMode parameter', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'jdbc:mysql://host:3306/db?user=u&password=p&sslMode=REQUIRED',
),
);
expect(result.isHttps).toBe(true);
});
it('should return error for JDBC without user parameter', () => {
const result = expectError(
MySqlConnectionStringParser.parse('jdbc:mysql://host:3306/db?password=secret'),
);
expect(result.error).toContain('user');
expect(result.format).toBe('JDBC');
});
it('should return error for JDBC without password parameter', () => {
const result = expectError(
MySqlConnectionStringParser.parse('jdbc:mysql://host:3306/db?user=admin'),
);
expect(result.error).toContain('Password');
expect(result.format).toBe('JDBC');
});
});
describe('SSL Mode Handling', () => {
it('should set isHttps=true for ssl=true', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse('mysql://u:p@host:3306/db?ssl=true'),
);
expect(result.isHttps).toBe(true);
});
it('should set isHttps=true for sslMode=REQUIRED', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse('mysql://u:p@host:3306/db?sslMode=REQUIRED'),
);
expect(result.isHttps).toBe(true);
});
it('should set isHttps=true for ssl-mode=REQUIRED', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse('mysql://u:p@host:3306/db?ssl-mode=REQUIRED'),
);
expect(result.isHttps).toBe(true);
});
it('should set isHttps=true for useSSL=true', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse('mysql://u:p@host:3306/db?useSSL=true'),
);
expect(result.isHttps).toBe(true);
});
it('should set isHttps=true for sslMode=verify_ca', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse('mysql://u:p@host:3306/db?sslMode=verify_ca'),
);
expect(result.isHttps).toBe(true);
});
it('should set isHttps=true for sslMode=verify_identity', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse('mysql://u:p@host:3306/db?sslMode=verify_identity'),
);
expect(result.isHttps).toBe(true);
});
it('should set isHttps=false for ssl=false', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse('mysql://u:p@host:3306/db?ssl=false'),
);
expect(result.isHttps).toBe(false);
});
it('should set isHttps=false when no ssl specified', () => {
const result = expectSuccess(MySqlConnectionStringParser.parse('mysql://u:p@host:3306/db'));
expect(result.isHttps).toBe(false);
});
});
describe('Key-Value Format', () => {
it('should parse key-value format connection string', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'host=localhost port=3306 database=mydb user=admin password=secret',
),
);
expect(result.host).toBe('localhost');
expect(result.port).toBe(3306);
expect(result.username).toBe('admin');
expect(result.password).toBe('secret');
expect(result.database).toBe('mydb');
});
it('should parse key-value format with quoted password containing spaces', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
"host=localhost port=3306 database=mydb user=admin password='my secret pass'",
),
);
expect(result.password).toBe('my secret pass');
});
it('should default port to 3306 when not specified in key-value format', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'host=localhost database=mydb user=admin password=secret',
),
);
expect(result.port).toBe(3306);
});
it('should handle hostaddr as alternative to host', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'hostaddr=192.168.1.1 port=3306 database=mydb user=admin password=secret',
),
);
expect(result.host).toBe('192.168.1.1');
});
it('should handle dbname as alternative to database', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'host=localhost port=3306 dbname=mydb user=admin password=secret',
),
);
expect(result.database).toBe('mydb');
});
it('should handle username as alternative to user', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'host=localhost port=3306 database=mydb username=admin password=secret',
),
);
expect(result.username).toBe('admin');
});
it('should parse ssl in key-value format', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'host=localhost database=mydb user=admin password=secret ssl=true',
),
);
expect(result.isHttps).toBe(true);
});
it('should parse sslMode in key-value format', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'host=localhost database=mydb user=admin password=secret sslMode=REQUIRED',
),
);
expect(result.isHttps).toBe(true);
});
it('should return error for key-value format missing host', () => {
const result = expectError(
MySqlConnectionStringParser.parse('port=3306 database=mydb user=admin password=secret'),
);
expect(result.error).toContain('Host');
expect(result.format).toBe('key-value');
});
it('should return error for key-value format missing user', () => {
const result = expectError(
MySqlConnectionStringParser.parse('host=localhost database=mydb password=secret'),
);
expect(result.error).toContain('Username');
expect(result.format).toBe('key-value');
});
it('should return error for key-value format missing password', () => {
const result = expectError(
MySqlConnectionStringParser.parse('host=localhost database=mydb user=admin'),
);
expect(result.error).toContain('Password');
expect(result.format).toBe('key-value');
});
it('should return error for key-value format missing database', () => {
const result = expectError(
MySqlConnectionStringParser.parse('host=localhost user=admin password=secret'),
);
expect(result.error).toContain('Database');
expect(result.format).toBe('key-value');
});
});
describe('Error Cases', () => {
it('should return error for empty string', () => {
const result = expectError(MySqlConnectionStringParser.parse(''));
expect(result.error).toContain('empty');
});
it('should return error for whitespace-only string', () => {
const result = expectError(MySqlConnectionStringParser.parse(' '));
expect(result.error).toContain('empty');
});
it('should return error for unrecognized format', () => {
const result = expectError(MySqlConnectionStringParser.parse('some random text'));
expect(result.error).toContain('Unrecognized');
});
it('should return error for missing username in URI', () => {
const result = expectError(
MySqlConnectionStringParser.parse('mysql://:password@host:3306/db'),
);
expect(result.error).toContain('Username');
});
it('should return error for missing password in URI', () => {
const result = expectError(MySqlConnectionStringParser.parse('mysql://user@host:3306/db'));
expect(result.error).toContain('Password');
});
it('should return error for missing database in URI', () => {
const result = expectError(MySqlConnectionStringParser.parse('mysql://user:pass@host:3306/'));
expect(result.error).toContain('Database');
});
it('should return error for invalid JDBC format', () => {
const result = expectError(MySqlConnectionStringParser.parse('jdbc:mysql://invalid'));
expect(result.format).toBe('JDBC');
});
it('should return error for postgresql:// format (wrong database type)', () => {
const result = expectError(
MySqlConnectionStringParser.parse('postgresql://user:pass@host:5432/db'),
);
expect(result.error).toContain('Unrecognized');
});
});
describe('Edge Cases', () => {
it('should handle special characters in password', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse('mysql://user:p%40ss%3Aw%2Ford@host:3306/db'),
);
expect(result.password).toBe('p@ss:w/ord');
});
it('should handle numeric database names', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse('mysql://user:pass@host:3306/12345'),
);
expect(result.database).toBe('12345');
});
it('should handle hyphenated host names', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse('mysql://user:pass@my-database-host.example.com:3306/db'),
);
expect(result.host).toBe('my-database-host.example.com');
});
it('should handle connection string with extra query parameters', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(
'mysql://user:pass@host:3306/db?ssl=true&connectTimeout=10&charset=utf8mb4',
),
);
expect(result.isHttps).toBe(true);
expect(result.database).toBe('db');
});
it('should trim whitespace from connection string', () => {
const result = expectSuccess(
MySqlConnectionStringParser.parse(' mysql://user:pass@host:3306/db '),
);
expect(result.host).toBe('host');
});
});
});

View File

@@ -0,0 +1,291 @@
export type ParseResult = {
host: string;
port: number;
username: string;
password: string;
database: string;
isHttps: boolean;
};
export type ParseError = {
error: string;
format?: string;
};
export class MySqlConnectionStringParser {
/**
* Parses a MySQL connection string in various formats.
*
* Supported formats:
* 1. Standard MySQL URI: mysql://user:pass@host:port/db
* 2. JDBC format: jdbc:mysql://host:port/db?user=x&password=y
* 3. Key-value format: host=x port=3306 database=db user=u password=p
* 4. With SSL params: mysql://user:pass@host:port/db?ssl=true or ?sslMode=REQUIRED
* 5. AWS RDS: mysql://user:pass@xxx.rds.amazonaws.com:3306/db
* 6. PlanetScale: mysql://user:pass@xxx.connect.psdb.cloud/db?sslaccept=strict
* 7. DigitalOcean: mysql://user:pass@xxx.ondigitalocean.com:25060/db?ssl-mode=REQUIRED
* 8. Azure MySQL: mysql://user@servername:pass@xxx.mysql.database.azure.com:3306/db
* 9. Railway: mysql://user:pass@xxx.railway.app:port/db
*/
static parse(connectionString: string): ParseResult | ParseError {
const trimmed = connectionString.trim();
if (!trimmed) {
return { error: 'Connection string is empty' };
}
// Try JDBC format first (starts with jdbc:)
if (trimmed.startsWith('jdbc:mysql://')) {
return this.parseJdbc(trimmed);
}
// Try key-value format (contains key=value pairs without ://)
if (this.isKeyValueFormat(trimmed)) {
return this.parseKeyValue(trimmed);
}
// Try URI format (mysql://)
if (trimmed.startsWith('mysql://')) {
return this.parseUri(trimmed);
}
return {
error: 'Unrecognized connection string format',
};
}
private static isKeyValueFormat(str: string): boolean {
// Key-value format has key=value pairs separated by spaces
// Must contain at least host= or database= to be considered key-value format
return (
!str.includes('://') &&
(str.includes('host=') || str.includes('database=')) &&
str.includes('=')
);
}
private static parseUri(connectionString: string): ParseResult | ParseError {
try {
// Handle Azure format where username contains @: user@server:pass
// Azure format: mysql://user@servername:password@host:port/db
const azureMatch = connectionString.match(
/^mysql:\/\/([^@:]+)@([^:]+):([^@]+)@([^:/?]+):?(\d+)?\/([^?]+)(?:\?(.*))?$/,
);
if (azureMatch) {
const [, user, , password, host, port, database, queryString] = azureMatch;
const isHttps = this.checkSslMode(queryString);
return {
host: host,
port: port ? parseInt(port, 10) : 3306,
username: decodeURIComponent(user),
password: decodeURIComponent(password),
database: decodeURIComponent(database),
isHttps,
};
}
// Standard URI parsing using URL API
const url = new URL(connectionString);
const host = url.hostname;
const port = url.port ? parseInt(url.port, 10) : 3306;
const username = decodeURIComponent(url.username);
const password = decodeURIComponent(url.password);
const database = decodeURIComponent(url.pathname.slice(1)); // Remove leading /
const isHttps = this.checkSslMode(url.search);
// Validate required fields
if (!host) {
return { error: 'Host is missing from connection string' };
}
if (!username) {
return { error: 'Username is missing from connection string' };
}
if (!password) {
return { error: 'Password is missing from connection string' };
}
if (!database) {
return { error: 'Database name is missing from connection string' };
}
return {
host,
port,
username,
password,
database,
isHttps,
};
} catch (e) {
return {
error: `Failed to parse connection string: ${(e as Error).message}`,
format: 'URI',
};
}
}
private static parseJdbc(connectionString: string): ParseResult | ParseError {
try {
// JDBC format: jdbc:mysql://host:port/database?user=x&password=y
const jdbcRegex = /^jdbc:mysql:\/\/([^:/?]+):?(\d+)?\/([^?]+)(?:\?(.*))?$/;
const match = connectionString.match(jdbcRegex);
if (!match) {
return {
error:
'Invalid JDBC connection string format. Expected: jdbc:mysql://host:port/database?user=x&password=y',
format: 'JDBC',
};
}
const [, host, port, database, queryString] = match;
if (!queryString) {
return {
error: 'JDBC connection string is missing query parameters (user and password)',
format: 'JDBC',
};
}
const params = new URLSearchParams(queryString);
const username = params.get('user');
const password = params.get('password');
const isHttps = this.checkSslMode(queryString);
if (!username) {
return {
error: 'Username (user parameter) is missing from JDBC connection string',
format: 'JDBC',
};
}
if (!password) {
return {
error: 'Password parameter is missing from JDBC connection string',
format: 'JDBC',
};
}
return {
host,
port: port ? parseInt(port, 10) : 3306,
username: decodeURIComponent(username),
password: decodeURIComponent(password),
database: decodeURIComponent(database),
isHttps,
};
} catch (e) {
return {
error: `Failed to parse JDBC connection string: ${(e as Error).message}`,
format: 'JDBC',
};
}
}
private static parseKeyValue(connectionString: string): ParseResult | ParseError {
try {
// Key-value format: host=x port=3306 database=db user=u password=p
// Values can be quoted with single quotes: password='my pass'
const params: Record<string, string> = {};
// Match key=value or key='quoted value'
const regex = /(\w+)=(?:'([^']*)'|(\S+))/g;
let match;
while ((match = regex.exec(connectionString)) !== null) {
const key = match[1];
const value = match[2] !== undefined ? match[2] : match[3];
params[key] = value;
}
const host = params['host'] || params['hostaddr'];
const port = params['port'];
const database = params['database'] || params['dbname'];
const username = params['user'] || params['username'];
const password = params['password'];
const ssl = params['ssl'] || params['sslMode'] || params['ssl-mode'] || params['useSSL'];
if (!host) {
return {
error: 'Host is missing from connection string. Use host=hostname',
format: 'key-value',
};
}
if (!username) {
return {
error: 'Username is missing from connection string. Use user=username',
format: 'key-value',
};
}
if (!password) {
return {
error: 'Password is missing from connection string. Use password=yourpassword',
format: 'key-value',
};
}
if (!database) {
return {
error: 'Database name is missing from connection string. Use database=database',
format: 'key-value',
};
}
const isHttps = this.isSslEnabled(ssl);
return {
host,
port: port ? parseInt(port, 10) : 3306,
username,
password,
database,
isHttps,
};
} catch (e) {
return {
error: `Failed to parse key-value connection string: ${(e as Error).message}`,
format: 'key-value',
};
}
}
private static checkSslMode(queryString: string | undefined | null): boolean {
if (!queryString) return false;
const params = new URLSearchParams(
queryString.startsWith('?') ? queryString.slice(1) : queryString,
);
// Check various MySQL SSL parameter names
const ssl = params.get('ssl');
const sslMode = params.get('sslMode');
const sslModeHyphen = params.get('ssl-mode');
const useSSL = params.get('useSSL');
const sslaccept = params.get('sslaccept');
if (ssl) return this.isSslEnabled(ssl);
if (sslMode) return this.isSslEnabled(sslMode);
if (sslModeHyphen) return this.isSslEnabled(sslModeHyphen);
if (useSSL) return this.isSslEnabled(useSSL);
if (sslaccept) return sslaccept.toLowerCase() === 'strict';
return false;
}
private static isSslEnabled(sslValue: string | null | undefined): boolean {
if (!sslValue) return false;
const lowercased = sslValue.toLowerCase();
// These values indicate SSL is enabled
const enabledValues = ['true', 'required', 'verify_ca', 'verify_identity', 'yes', '1'];
return enabledValues.includes(lowercased);
}
}

View File

@@ -0,0 +1,13 @@
import type { MysqlVersion } from './MysqlVersion';
export interface MysqlDatabase {
id: string;
version: MysqlVersion;
host: string;
port: number;
username: string;
password: string;
database?: string;
isHttps: boolean;
}

View File

@@ -0,0 +1,5 @@
export enum MysqlVersion {
MysqlVersion57 = '5.7',
MysqlVersion80 = '8.0',
MysqlVersion84 = '8.4',
}

View File

@@ -14,4 +14,7 @@ export interface PostgresqlDatabase {
// backup settings
includeSchemas?: string[];
// restore settings (not saved to DB)
isExcludeExtensions?: boolean;
}

View File

@@ -8,4 +8,6 @@ export interface Interval {
weekday?: number;
// only for MONTHLY
dayOfMonth?: number;
// only for CRON
cronExpression?: string;
}

View File

@@ -3,4 +3,5 @@ export enum IntervalType {
DAILY = 'DAILY',
WEEKLY = 'WEEKLY',
MONTHLY = 'MONTHLY',
CRON = 'CRON',
}

View File

@@ -1,7 +1,7 @@
import { getApplicationServer } from '../../../constants';
import RequestOptions from '../../../shared/api/RequestOptions';
import { apiHelper } from '../../../shared/api/apiHelper';
import type { PostgresqlDatabase } from '../../databases';
import type { MysqlDatabase, PostgresqlDatabase } from '../../databases';
import type { Restore } from '../model/Restore';
export const restoreApi = {
@@ -16,14 +16,17 @@ export const restoreApi = {
async restoreBackup({
backupId,
postgresql,
mysql,
}: {
backupId: string;
postgresql: PostgresqlDatabase;
postgresql?: PostgresqlDatabase;
mysql?: MysqlDatabase;
}) {
const requestOptions: RequestOptions = new RequestOptions();
requestOptions.setBody(
JSON.stringify({
postgresqlDatabase: postgresql,
mysqlDatabase: mysql,
}),
);

View File

@@ -9,3 +9,5 @@ export { getStorageNameFromType } from './models/getStorageNameFromType';
export { type GoogleDriveStorage } from './models/GoogleDriveStorage';
export { type AzureBlobStorage } from './models/AzureBlobStorage';
export { type FTPStorage } from './models/FTPStorage';
export { type SFTPStorage } from './models/SFTPStorage';
export { type RcloneStorage } from './models/RcloneStorage';

View File

@@ -0,0 +1,4 @@
export interface RcloneStorage {
configContent: string;
remotePath?: string;
}

View File

@@ -0,0 +1,9 @@
export interface SFTPStorage {
host: string;
port: number;
username: string;
password?: string;
privateKey?: string;
path?: string;
skipHostKeyVerify?: boolean;
}

View File

@@ -3,7 +3,9 @@ import type { FTPStorage } from './FTPStorage';
import type { GoogleDriveStorage } from './GoogleDriveStorage';
import type { LocalStorage } from './LocalStorage';
import type { NASStorage } from './NASStorage';
import type { RcloneStorage } from './RcloneStorage';
import type { S3Storage } from './S3Storage';
import type { SFTPStorage } from './SFTPStorage';
import type { StorageType } from './StorageType';
export interface Storage {
@@ -20,4 +22,6 @@ export interface Storage {
nasStorage?: NASStorage;
azureBlobStorage?: AzureBlobStorage;
ftpStorage?: FTPStorage;
sftpStorage?: SFTPStorage;
rcloneStorage?: RcloneStorage;
}

View File

@@ -5,4 +5,6 @@ export enum StorageType {
NAS = 'NAS',
AZURE_BLOB = 'AZURE_BLOB',
FTP = 'FTP',
SFTP = 'SFTP',
RCLONE = 'RCLONE',
}

View File

@@ -14,6 +14,10 @@ export const getStorageLogoFromType = (type: StorageType) => {
return '/icons/storages/azure.svg';
case StorageType.FTP:
return '/icons/storages/ftp.svg';
case StorageType.SFTP:
return '/icons/storages/sftp.svg';
case StorageType.RCLONE:
return '/icons/storages/rclone.svg';
default:
return '';
}

View File

@@ -14,6 +14,10 @@ export const getStorageNameFromType = (type: StorageType) => {
return 'Azure Blob Storage';
case StorageType.FTP:
return 'FTP';
case StorageType.SFTP:
return 'SFTP';
case StorageType.RCLONE:
return 'Rclone';
default:
return '';
}

View File

@@ -22,7 +22,7 @@ import {
backupConfigApi,
backupsApi,
} from '../../../entity/backups';
import type { Database } from '../../../entity/databases';
import { type Database, DatabaseType } from '../../../entity/databases';
import { getUserTimeFormat } from '../../../shared/time';
import { ConfirmationComponent } from '../../../shared/ui';
import { RestoresComponent } from '../../restores';
@@ -74,7 +74,8 @@ export const BackupsComponent = ({ database, isCanManageDBs, scrollContainerRef
// Find the backup to get a meaningful filename
const backup = backups.find((b) => b.id === backupId);
const createdAt = backup ? dayjs(backup.createdAt).format('YYYY-MM-DD_HH-mm-ss') : 'backup';
link.download = `${database.name}_backup_${createdAt}.dump`;
const extension = database.type === DatabaseType.MYSQL ? '.sql.zst' : '.dump.zst';
link.download = `${database.name}_backup_${createdAt}${extension}`;
// Trigger download
document.body.appendChild(link);

View File

@@ -2,6 +2,7 @@ import { InfoCircleOutlined } from '@ant-design/icons';
import {
Button,
Checkbox,
Input,
InputNumber,
Modal,
Select,
@@ -10,6 +11,7 @@ import {
TimePicker,
Tooltip,
} from 'antd';
import { CronExpressionParser } from 'cron-parser';
import dayjs, { Dayjs } from 'dayjs';
import { useEffect, useMemo, useState } from 'react';
@@ -19,10 +21,11 @@ import type { Database } from '../../../entity/databases';
import { Period } from '../../../entity/databases/model/Period';
import { type Interval, IntervalType } from '../../../entity/intervals';
import { type Storage, getStorageLogoFromType, storageApi } from '../../../entity/storages';
import { getUserTimeFormat } from '../../../shared/time';
import {
getUserTimeFormat as getIs12Hour,
getLocalDayOfMonth,
getLocalWeekday,
getUserTimeFormat,
getUtcDayOfMonth,
getUtcWeekday,
} from '../../../shared/time/utils';
@@ -77,10 +80,12 @@ export const EditBackupConfigComponent = ({
const [isShowWarn, setIsShowWarn] = useState(false);
const timeFormat = useMemo(() => {
const is12 = getUserTimeFormat();
const is12 = getIs12Hour();
return { use12Hours: is12, format: is12 ? 'h:mm A' : 'HH:mm' };
}, []);
const dateTimeFormat = useMemo(() => getUserTimeFormat(), []);
const updateBackupConfig = (patch: Partial<BackupConfig>) => {
setBackupConfig((prev) => (prev ? { ...prev, ...patch } : prev));
setIsUnsaved(true);
@@ -201,7 +206,8 @@ export const EditBackupConfigComponent = ({
Boolean(backupInterval?.interval) &&
(!backupInterval ||
((backupInterval.interval !== IntervalType.WEEKLY || displayedWeekday) &&
(backupInterval.interval !== IntervalType.MONTHLY || displayedDayOfMonth))));
(backupInterval.interval !== IntervalType.MONTHLY || displayedDayOfMonth) &&
(backupInterval.interval !== IntervalType.CRON || backupInterval.cronExpression))));
return (
<div>
@@ -230,6 +236,7 @@ export const EditBackupConfigComponent = ({
{ label: 'Daily', value: IntervalType.DAILY },
{ label: 'Weekly', value: IntervalType.WEEKLY },
{ label: 'Monthly', value: IntervalType.MONTHLY },
{ label: 'Cron', value: IntervalType.CRON },
]}
/>
</div>
@@ -269,33 +276,93 @@ export const EditBackupConfigComponent = ({
</div>
)}
{backupInterval?.interval !== IntervalType.HOURLY && (
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Backup time of day</div>
<TimePicker
value={localTime}
format={timeFormat.format}
use12Hours={timeFormat.use12Hours}
allowClear={false}
size="small"
className="w-full max-w-[200px] grow"
onChange={(t) => {
if (!t) return;
const patch: Partial<Interval> = { timeOfDay: t.utc().format('HH:mm') };
if (backupInterval?.interval === IntervalType.WEEKLY && displayedWeekday) {
patch.weekday = getUtcWeekday(displayedWeekday, t);
{backupInterval?.interval === IntervalType.CRON && (
<>
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Cron expression (UTC)</div>
<div className="flex items-center">
<Input
value={backupInterval?.cronExpression || ''}
onChange={(e) => saveInterval({ cronExpression: e.target.value })}
placeholder="0 2 * * *"
size="small"
className="w-full max-w-[200px] grow"
/>
<Tooltip
className="cursor-pointer"
title={
<div>
<div className="font-bold">
Cron format: minute hour day month weekday (UTC)
</div>
<div className="mt-1">Examples:</div>
<div> 0 2 * * * - Daily at 2:00 AM UTC</div>
<div> 0 */6 * * * - Every 6 hours</div>
<div> 0 3 * * 1 - Every Monday at 3:00 AM UTC</div>
<div> 30 4 1,15 * * - 1st and 15th at 4:30 AM UTC</div>
</div>
}
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
{backupInterval?.cronExpression &&
(() => {
try {
const interval = CronExpressionParser.parse(backupInterval.cronExpression, {
tz: 'UTC',
});
const nextRun = interval.next().toDate();
return (
<div className="mb-1 flex w-full flex-col items-start text-xs text-gray-600 sm:flex-row sm:items-center dark:text-gray-400">
<div className="mb-1 min-w-[150px] sm:mb-0" />
<div className="text-gray-600 dark:text-gray-400">
Next run {dayjs(nextRun).local().format(dateTimeFormat.format)}
<br />({dayjs(nextRun).fromNow()})
</div>
</div>
);
} catch {
return (
<div className="mb-1 flex w-full flex-col items-start text-red-500 sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0" />
<div className="text-red-500">Invalid cron expression</div>
</div>
);
}
if (backupInterval?.interval === IntervalType.MONTHLY && displayedDayOfMonth) {
patch.dayOfMonth = getUtcDayOfMonth(displayedDayOfMonth, t);
}
saveInterval(patch);
}}
/>
</div>
})()}
</>
)}
{backupInterval?.interval !== IntervalType.HOURLY &&
backupInterval?.interval !== IntervalType.CRON && (
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Backup time of day</div>
<TimePicker
value={localTime}
format={timeFormat.format}
use12Hours={timeFormat.use12Hours}
allowClear={false}
size="small"
className="w-full max-w-[200px] grow"
onChange={(t) => {
if (!t) return;
const patch: Partial<Interval> = { timeOfDay: t.utc().format('HH:mm') };
if (backupInterval?.interval === IntervalType.WEEKLY && displayedWeekday) {
patch.weekday = getUtcWeekday(displayedWeekday, t);
}
if (backupInterval?.interval === IntervalType.MONTHLY && displayedDayOfMonth) {
patch.dayOfMonth = getUtcDayOfMonth(displayedDayOfMonth, t);
}
saveInterval(patch);
}}
/>
</div>
)}
<div className="mt-4 mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
<div className="mb-1 min-w-[150px] sm:mb-0">Retry backup if failed</div>
<div className="flex items-center">
@@ -533,6 +600,7 @@ export const EditBackupConfigComponent = ({
setShowCreateStorage(false);
setStorageSelectKey((prev) => prev + 1);
}}
maskClosable={false}
>
<div className="my-3 max-w-[275px] text-gray-500 dark:text-gray-400">
Storage - is a place where backups will be stored (local disk, S3, Google Drive, etc.)

View File

@@ -1,5 +1,6 @@
import { InfoCircleOutlined } from '@ant-design/icons';
import { Tooltip } from 'antd';
import { CronExpressionParser } from 'cron-parser';
import dayjs from 'dayjs';
import { useMemo } from 'react';
import { useEffect, useState } from 'react';
@@ -10,7 +11,12 @@ import type { Database } from '../../../entity/databases';
import { Period } from '../../../entity/databases/model/Period';
import { IntervalType } from '../../../entity/intervals';
import { getStorageLogoFromType } from '../../../entity/storages/models/getStorageLogoFromType';
import { getLocalDayOfMonth, getLocalWeekday, getUserTimeFormat } from '../../../shared/time/utils';
import { getUserTimeFormat } from '../../../shared/time';
import {
getUserTimeFormat as getIs12Hour,
getLocalDayOfMonth,
getLocalWeekday,
} from '../../../shared/time/utils';
interface Props {
database: Database;
@@ -31,6 +37,7 @@ const intervalLabels = {
[IntervalType.DAILY]: 'Daily',
[IntervalType.WEEKLY]: 'Weekly',
[IntervalType.MONTHLY]: 'Monthly',
[IntervalType.CRON]: 'Cron',
};
const periodLabels = {
@@ -57,13 +64,15 @@ export const ShowBackupConfigComponent = ({ database }: Props) => {
// Detect user's preferred time format (12-hour vs 24-hour)
const timeFormat = useMemo(() => {
const is12Hour = getUserTimeFormat();
const is12Hour = getIs12Hour();
return {
use12Hours: is12Hour,
format: is12Hour ? 'h:mm A' : 'HH:mm',
};
}, []);
const dateTimeFormat = useMemo(() => getUserTimeFormat(), []);
useEffect(() => {
if (database.id) {
backupConfigApi.getBackupConfigByDbID(database.id).then((res) => {
@@ -131,13 +140,45 @@ export const ShowBackupConfigComponent = ({ database }: Props) => {
</div>
)}
{backupInterval?.interval !== IntervalType.HOURLY && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Backup time of day</div>
<div>{formattedTime}</div>
</div>
{backupInterval?.interval === IntervalType.CRON && (
<>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Cron expression (UTC)</div>
<code className="rounded bg-gray-100 px-2 py-0.5 text-sm dark:bg-gray-700">
{backupInterval?.cronExpression || ''}
</code>
</div>
{backupInterval?.cronExpression &&
(() => {
try {
const interval = CronExpressionParser.parse(backupInterval.cronExpression, {
tz: 'UTC',
});
const nextRun = interval.next().toDate();
return (
<div className="mb-1 flex w-full items-center text-xs text-gray-600 dark:text-gray-400">
<div className="min-w-[150px]" />
<div>
Next run {dayjs(nextRun).local().format(dateTimeFormat.format)}
<br />({dayjs(nextRun).fromNow()})
</div>
</div>
);
} catch {
return null;
}
})()}
</>
)}
{backupInterval?.interval !== IntervalType.HOURLY &&
backupInterval?.interval !== IntervalType.CRON && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Backup time of day</div>
<div>{formattedTime}</div>
</div>
)}
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Retry if failed</div>
<div>{backupConfig.isRetryIfFailed ? 'Yes' : 'No'}</div>

View File

@@ -4,6 +4,7 @@ import { type BackupConfig, backupConfigApi, backupsApi } from '../../../entity/
import {
type Database,
DatabaseType,
type MysqlDatabase,
Period,
type PostgresqlDatabase,
databaseApi,
@@ -21,18 +22,14 @@ interface Props {
onClose: () => void;
}
export const CreateDatabaseComponent = ({ workspaceId, onCreated, onClose }: Props) => {
const [isCreating, setIsCreating] = useState(false);
const [backupConfig, setBackupConfig] = useState<BackupConfig | undefined>();
const [database, setDatabase] = useState<Database>({
const createInitialDatabase = (workspaceId: string): Database =>
({
id: undefined as unknown as string,
name: '',
workspaceId,
storePeriod: Period.MONTH,
postgresql: {
cpuCount: 1,
} as unknown as PostgresqlDatabase,
postgresql: {} as PostgresqlDatabase,
type: DatabaseType.POSTGRES,
@@ -40,7 +37,24 @@ export const CreateDatabaseComponent = ({ workspaceId, onCreated, onClose }: Pro
notifiers: [],
sendNotificationsOn: [],
} as Database);
}) as Database;
const initializeDatabaseTypeData = (db: Database): Database => {
if (db.type === DatabaseType.POSTGRES && !db.postgresql) {
return { ...db, postgresql: {} as PostgresqlDatabase, mysql: undefined };
}
if (db.type === DatabaseType.MYSQL && !db.mysql) {
return { ...db, mysql: {} as MysqlDatabase, postgresql: undefined };
}
return db;
};
export const CreateDatabaseComponent = ({ workspaceId, onCreated, onClose }: Props) => {
const [isCreating, setIsCreating] = useState(false);
const [backupConfig, setBackupConfig] = useState<BackupConfig | undefined>();
const [database, setDatabase] = useState<Database>(createInitialDatabase(workspaceId));
const [step, setStep] = useState<
'base-info' | 'db-settings' | 'create-readonly-user' | 'backup-config' | 'notifiers'
@@ -74,11 +88,13 @@ export const CreateDatabaseComponent = ({ workspaceId, onCreated, onClose }: Pro
<EditDatabaseBaseInfoComponent
database={database}
isShowName
isShowType
isSaveToApi={false}
saveButtonText="Continue"
onCancel={() => onClose()}
onSaved={(database) => {
setDatabase({ ...database });
onSaved={(db) => {
const initializedDb = initializeDatabaseTypeData(db);
setDatabase({ ...initializedDb });
setStep('db-settings');
}}
/>

View File

@@ -179,6 +179,7 @@ export const DatabasesComponent = ({ contentHeight, workspace, isCanManageDBs }:
footer={<div />}
open={isShowAddDatabase}
onCancel={() => setIsShowAddDatabase(false)}
maskClosable={false}
width={420}
>
<div className="mt-5" />

View File

@@ -1,7 +1,7 @@
import { Button, Modal, Spin } from 'antd';
import { useEffect, useState } from 'react';
import { type Database, databaseApi } from '../../../../entity/databases';
import { type Database, DatabaseType, databaseApi } from '../../../../entity/databases';
interface Props {
database: Database;
@@ -21,6 +21,10 @@ export const CreateReadOnlyComponent = ({
const [isCreatingReadOnlyUser, setIsCreatingReadOnlyUser] = useState(false);
const [isShowSkipConfirmation, setShowSkipConfirmation] = useState(false);
const isPostgres = database.type === DatabaseType.POSTGRES;
const isMysql = database.type === DatabaseType.MYSQL;
const databaseTypeName = isPostgres ? 'PostgreSQL' : isMysql ? 'MySQL' : 'database';
const checkReadOnlyUser = async (): Promise<boolean> => {
try {
const response = await databaseApi.isUserReadOnly(database);
@@ -36,8 +40,15 @@ export const CreateReadOnlyComponent = ({
try {
const response = await databaseApi.createReadOnlyUser(database);
database.postgresql!.username = response.username;
database.postgresql!.password = response.password;
if (isPostgres && database.postgresql) {
database.postgresql.username = response.username;
database.postgresql.password = response.password;
} else if (isMysql && database.mysql) {
database.mysql.username = response.username;
database.mysql.password = response.password;
}
onReadOnlyUserUpdated(database);
onContinue();
} catch (e) {
@@ -62,7 +73,6 @@ export const CreateReadOnlyComponent = ({
const isReadOnly = await checkReadOnlyUser();
if (isReadOnly) {
// already has a read-only user
onContinue();
}
@@ -86,8 +96,8 @@ export const CreateReadOnlyComponent = ({
<p className="mb-3 text-lg font-bold">Create a read-only user for Postgresus?</p>
<p className="mb-2">
A read-only user is a PostgreSQL user with limited permissions that can only read data
from your database, not modify it. This is recommended for backup operations because:
A read-only user is a {databaseTypeName} user with limited permissions that can only read
data from your database, not modify it. This is recommended for backup operations because:
</p>
<ul className="mb-2 ml-5 list-disc">

View File

@@ -1,12 +1,20 @@
import { Button, Input } from 'antd';
import { Button, Input, Select } from 'antd';
import { useEffect, useState } from 'react';
import { type Database, databaseApi } from '../../../../entity/databases';
import {
type Database,
DatabaseType,
type MysqlDatabase,
type PostgresqlDatabase,
databaseApi,
getDatabaseLogoFromType,
} from '../../../../entity/databases';
interface Props {
database: Database;
isShowName?: boolean;
isShowType?: boolean;
isShowCancelButton?: boolean;
onCancel: () => void;
@@ -15,9 +23,15 @@ interface Props {
onSaved: (db: Database) => void;
}
const databaseTypeOptions = [
{ value: DatabaseType.POSTGRES, label: 'PostgreSQL' },
{ value: DatabaseType.MYSQL, label: 'MySQL' },
];
export const EditDatabaseBaseInfoComponent = ({
database,
isShowName,
isShowType,
isShowCancelButton,
onCancel,
saveButtonText,
@@ -33,6 +47,26 @@ export const EditDatabaseBaseInfoComponent = ({
setIsUnsaved(true);
};
const handleTypeChange = (newType: DatabaseType) => {
if (!editingDatabase) return;
const updatedDatabase: Database = {
...editingDatabase,
type: newType,
};
if (newType === DatabaseType.POSTGRES && !editingDatabase.postgresql) {
updatedDatabase.postgresql = {} as PostgresqlDatabase;
updatedDatabase.mysql = undefined;
} else if (newType === DatabaseType.MYSQL && !editingDatabase.mysql) {
updatedDatabase.mysql = {} as MysqlDatabase;
updatedDatabase.postgresql = undefined;
}
setEditingDatabase(updatedDatabase);
setIsUnsaved(true);
};
const saveDatabase = async () => {
if (!editingDatabase) return;
if (isSaveToApi) {
@@ -59,7 +93,6 @@ export const EditDatabaseBaseInfoComponent = ({
if (!editingDatabase) return null;
// mandatory-field check
const isAllFieldsFilled = !!editingDatabase.name?.trim();
return (
@@ -77,6 +110,28 @@ export const EditDatabaseBaseInfoComponent = ({
</div>
)}
{isShowType && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Database type</div>
<div className="flex items-center">
<Select
value={editingDatabase.type}
onChange={handleTypeChange}
options={databaseTypeOptions}
size="small"
className="w-[200px] grow"
/>
<img
src={getDatabaseLogoFromType(editingDatabase.type)}
alt="databaseIcon"
className="ml-2 h-4 w-4"
/>
</div>
</div>
)}
<div className="mt-5 flex">
{isShowCancelButton && (
<Button danger ghost className="mr-1" onClick={onCancel}>

View File

@@ -166,6 +166,7 @@ export const EditDatabaseNotifiersComponent = ({
setShowCreateNotifier(false);
setNotifierSelectKey((prev) => prev + 1);
}}
maskClosable={false}
>
<div className="my-3 max-w-[275px] text-gray-500 dark:text-gray-400">
Notifier - is a place where notifications will be sent (email, Slack, Telegram, etc.)

View File

@@ -1,10 +1,6 @@
import { CopyOutlined, DownOutlined, UpOutlined } from '@ant-design/icons';
import { App, Button, Input, InputNumber, Select, Switch } from 'antd';
import { useEffect, useState } from 'react';
import { type Database, DatabaseType, databaseApi } from '../../../../entity/databases';
import { ConnectionStringParser } from '../../../../entity/databases/model/postgresql/ConnectionStringParser';
import { ToastHelper } from '../../../../shared/toast';
import { type Database, DatabaseType } from '../../../../entity/databases';
import { EditMySqlSpecificDataComponent } from './EditMySqlSpecificDataComponent';
import { EditPostgreSqlSpecificDataComponent } from './EditPostgreSqlSpecificDataComponent';
interface Props {
database: Database;
@@ -20,6 +16,7 @@ interface Props {
onSaved: (database: Database) => void;
isShowDbName?: boolean;
isRestoreMode?: boolean;
}
export const EditDatabaseSpecificDataComponent = ({
@@ -35,404 +32,40 @@ export const EditDatabaseSpecificDataComponent = ({
isSaveToApi,
onSaved,
isShowDbName = true,
isRestoreMode = false,
}: Props) => {
const { message } = App.useApp();
if (database.type === DatabaseType.POSTGRES) {
return (
<EditPostgreSqlSpecificDataComponent
database={database}
isShowCancelButton={isShowCancelButton}
onCancel={onCancel}
isShowBackButton={isShowBackButton}
onBack={onBack}
saveButtonText={saveButtonText}
isSaveToApi={isSaveToApi}
onSaved={onSaved}
isShowDbName={isShowDbName}
isRestoreMode={isRestoreMode}
/>
);
}
const [editingDatabase, setEditingDatabase] = useState<Database>();
const [isSaving, setIsSaving] = useState(false);
if (database.type === DatabaseType.MYSQL) {
return (
<EditMySqlSpecificDataComponent
database={database}
isShowCancelButton={isShowCancelButton}
onCancel={onCancel}
isShowBackButton={isShowBackButton}
onBack={onBack}
saveButtonText={saveButtonText}
isSaveToApi={isSaveToApi}
onSaved={onSaved}
isShowDbName={isShowDbName}
/>
);
}
const [isConnectionTested, setIsConnectionTested] = useState(false);
const [isTestingConnection, setIsTestingConnection] = useState(false);
const [isConnectionFailed, setIsConnectionFailed] = useState(false);
const hasAdvancedValues = !!database.postgresql?.includeSchemas?.length;
const [isShowAdvanced, setShowAdvanced] = useState(hasAdvancedValues);
const [hasAutoAddedPublicSchema, setHasAutoAddedPublicSchema] = useState(false);
const parseFromClipboard = async () => {
try {
const text = await navigator.clipboard.readText();
const trimmedText = text.trim();
if (!trimmedText) {
message.error('Clipboard is empty');
return;
}
const result = ConnectionStringParser.parse(trimmedText);
if ('error' in result) {
message.error(result.error);
return;
}
if (!editingDatabase?.postgresql) return;
const updatedDatabase: Database = {
...editingDatabase,
postgresql: {
...editingDatabase.postgresql,
host: result.host,
port: result.port,
username: result.username,
password: result.password,
database: result.database,
isHttps: result.isHttps,
},
};
setEditingDatabase(autoAddPublicSchemaForSupabase(updatedDatabase));
setIsConnectionTested(false);
message.success('Connection string parsed successfully');
} catch {
message.error('Failed to read clipboard. Please check browser permissions.');
}
};
const autoAddPublicSchemaForSupabase = (updatedDatabase: Database): Database => {
if (hasAutoAddedPublicSchema) return updatedDatabase;
const host = updatedDatabase.postgresql?.host || '';
const username = updatedDatabase.postgresql?.username || '';
const isSupabase = host.includes('supabase') || username.includes('supabase');
if (isSupabase && updatedDatabase.postgresql) {
setHasAutoAddedPublicSchema(true);
const currentSchemas = updatedDatabase.postgresql.includeSchemas || [];
if (!currentSchemas.includes('public')) {
return {
...updatedDatabase,
postgresql: {
...updatedDatabase.postgresql,
includeSchemas: ['public', ...currentSchemas],
},
};
}
}
return updatedDatabase;
};
const testConnection = async () => {
if (!editingDatabase) return;
setIsTestingConnection(true);
setIsConnectionFailed(false);
try {
await databaseApi.testDatabaseConnectionDirect(editingDatabase);
setIsConnectionTested(true);
ToastHelper.showToast({
title: 'Connection test passed',
description: 'You can continue with the next step',
});
} catch (e) {
setIsConnectionFailed(true);
alert((e as Error).message);
}
setIsTestingConnection(false);
};
const saveDatabase = async () => {
if (!editingDatabase) return;
if (isSaveToApi) {
setIsSaving(true);
try {
await databaseApi.updateDatabase(editingDatabase);
} catch (e) {
alert((e as Error).message);
}
setIsSaving(false);
}
onSaved(editingDatabase);
};
useEffect(() => {
setIsSaving(false);
setIsConnectionTested(false);
setIsTestingConnection(false);
setIsConnectionFailed(false);
setEditingDatabase({ ...database });
}, [database]);
if (!editingDatabase) return null;
let isAllFieldsFilled = true;
if (!editingDatabase.postgresql?.host) isAllFieldsFilled = false;
if (!editingDatabase.postgresql?.port) isAllFieldsFilled = false;
if (!editingDatabase.postgresql?.username) isAllFieldsFilled = false;
if (!editingDatabase.id && !editingDatabase.postgresql?.password) isAllFieldsFilled = false;
if (!editingDatabase.postgresql?.database) isAllFieldsFilled = false;
const isLocalhostDb =
editingDatabase.postgresql?.host?.includes('localhost') ||
editingDatabase.postgresql?.host?.includes('127.0.0.1');
const isSupabaseDb =
editingDatabase.postgresql?.host?.includes('supabase') ||
editingDatabase.postgresql?.username?.includes('supabase');
return (
<div>
{editingDatabase.type === DatabaseType.POSTGRES && (
<>
<div className="mb-3 flex">
<div className="min-w-[150px]" />
<div
className="cursor-pointer text-sm text-gray-600 transition-colors hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-200"
onClick={parseFromClipboard}
>
<CopyOutlined className="mr-1" />
Parse from clipboard
</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Host</div>
<Input
value={editingDatabase.postgresql?.host}
onChange={(e) => {
if (!editingDatabase.postgresql) return;
const updatedDatabase = {
...editingDatabase,
postgresql: {
...editingDatabase.postgresql,
host: e.target.value.trim().replace('https://', '').replace('http://', ''),
},
};
setEditingDatabase(autoAddPublicSchemaForSupabase(updatedDatabase));
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter PG host"
/>
</div>
{isLocalhostDb && (
<div className="mb-1 flex">
<div className="min-w-[150px]" />
<div className="max-w-[200px] text-xs text-gray-500 dark:text-gray-400">
Please{' '}
<a
href="https://postgresus.com/faq/localhost"
target="_blank"
rel="noreferrer"
className="!text-blue-600 dark:!text-blue-400"
>
read this document
</a>{' '}
to study how to backup local database
</div>
</div>
)}
{isSupabaseDb && (
<div className="mb-1 flex">
<div className="min-w-[150px]" />
<div className="max-w-[200px] text-xs text-gray-500 dark:text-gray-400">
Please{' '}
<a
href="https://postgresus.com/faq/supabase"
target="_blank"
rel="noreferrer"
className="!text-blue-600 dark:!text-blue-400"
>
read this document
</a>{' '}
to study how to backup Supabase database
</div>
</div>
)}
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Port</div>
<InputNumber
type="number"
value={editingDatabase.postgresql?.port}
onChange={(e) => {
if (!editingDatabase.postgresql || e === null) return;
setEditingDatabase({
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, port: e },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter PG port"
/>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Username</div>
<Input
value={editingDatabase.postgresql?.username}
onChange={(e) => {
if (!editingDatabase.postgresql) return;
const updatedDatabase = {
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, username: e.target.value.trim() },
};
setEditingDatabase(autoAddPublicSchemaForSupabase(updatedDatabase));
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter PG username"
/>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Password</div>
<Input.Password
value={editingDatabase.postgresql?.password}
onChange={(e) => {
if (!editingDatabase.postgresql) return;
setEditingDatabase({
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, password: e.target.value.trim() },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter PG password"
/>
</div>
{isShowDbName && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">DB name</div>
<Input
value={editingDatabase.postgresql?.database}
onChange={(e) => {
if (!editingDatabase.postgresql) return;
setEditingDatabase({
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, database: e.target.value.trim() },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter PG database name (optional)"
/>
</div>
)}
<div className="mb-3 flex w-full items-center">
<div className="min-w-[150px]">Use HTTPS</div>
<Switch
checked={editingDatabase.postgresql?.isHttps}
onChange={(checked) => {
if (!editingDatabase.postgresql) return;
setEditingDatabase({
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, isHttps: checked },
});
setIsConnectionTested(false);
}}
size="small"
/>
</div>
<div className="mt-4 mb-3 flex items-center">
<div
className="flex cursor-pointer items-center text-sm text-blue-600 hover:text-blue-800"
onClick={() => setShowAdvanced(!isShowAdvanced)}
>
<span className="mr-2">Advanced settings</span>
{isShowAdvanced ? (
<UpOutlined style={{ fontSize: '12px' }} />
) : (
<DownOutlined style={{ fontSize: '12px' }} />
)}
</div>
</div>
{isShowAdvanced && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Include schemas</div>
<Select
mode="tags"
value={editingDatabase.postgresql?.includeSchemas || []}
onChange={(values) => {
if (!editingDatabase.postgresql) return;
setEditingDatabase({
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, includeSchemas: values },
});
}}
size="small"
className="max-w-[200px] grow"
placeholder="All schemas (default)"
tokenSeparators={[',']}
/>
</div>
)}
</>
)}
<div className="mt-5 flex">
{isShowCancelButton && (
<Button className="mr-1" danger ghost onClick={() => onCancel()}>
Cancel
</Button>
)}
{isShowBackButton && (
<Button className="mr-auto" type="primary" ghost onClick={() => onBack()}>
Back
</Button>
)}
{!isConnectionTested && (
<Button
type="primary"
onClick={() => testConnection()}
loading={isTestingConnection}
disabled={!isAllFieldsFilled}
className="mr-5"
>
Test connection
</Button>
)}
{isConnectionTested && (
<Button
type="primary"
onClick={() => saveDatabase()}
loading={isSaving}
disabled={!isAllFieldsFilled}
className="mr-5"
>
{saveButtonText || 'Save'}
</Button>
)}
</div>
{isConnectionFailed && (
<div className="mt-3 text-sm text-gray-500 dark:text-gray-400">
If your database uses IP whitelist, make sure Postgresus server IP is added to the allowed
list.
</div>
)}
</div>
);
return null;
};

View File

@@ -0,0 +1,344 @@
import { CopyOutlined } from '@ant-design/icons';
import { App, Button, Input, InputNumber, Switch } from 'antd';
import { useEffect, useState } from 'react';
import { type Database, databaseApi } from '../../../../entity/databases';
import { MySqlConnectionStringParser } from '../../../../entity/databases/model/mysql/MySqlConnectionStringParser';
import { ToastHelper } from '../../../../shared/toast';
interface Props {
database: Database;
isShowCancelButton?: boolean;
onCancel: () => void;
isShowBackButton: boolean;
onBack: () => void;
saveButtonText?: string;
isSaveToApi: boolean;
onSaved: (database: Database) => void;
isShowDbName?: boolean;
}
export const EditMySqlSpecificDataComponent = ({
database,
isShowCancelButton,
onCancel,
isShowBackButton,
onBack,
saveButtonText,
isSaveToApi,
onSaved,
isShowDbName = true,
}: Props) => {
const { message } = App.useApp();
const [editingDatabase, setEditingDatabase] = useState<Database>();
const [isSaving, setIsSaving] = useState(false);
const [isConnectionTested, setIsConnectionTested] = useState(false);
const [isTestingConnection, setIsTestingConnection] = useState(false);
const [isConnectionFailed, setIsConnectionFailed] = useState(false);
const parseFromClipboard = async () => {
try {
const text = await navigator.clipboard.readText();
const trimmedText = text.trim();
if (!trimmedText) {
message.error('Clipboard is empty');
return;
}
const result = MySqlConnectionStringParser.parse(trimmedText);
if ('error' in result) {
message.error(result.error);
return;
}
if (!editingDatabase?.mysql) return;
const updatedDatabase: Database = {
...editingDatabase,
mysql: {
...editingDatabase.mysql,
host: result.host,
port: result.port,
username: result.username,
password: result.password,
database: result.database,
isHttps: result.isHttps,
},
};
setEditingDatabase(updatedDatabase);
setIsConnectionTested(false);
message.success('Connection string parsed successfully');
} catch {
message.error('Failed to read clipboard. Please check browser permissions.');
}
};
const testConnection = async () => {
if (!editingDatabase) return;
setIsTestingConnection(true);
setIsConnectionFailed(false);
try {
await databaseApi.testDatabaseConnectionDirect(editingDatabase);
setIsConnectionTested(true);
ToastHelper.showToast({
title: 'Connection test passed',
description: 'You can continue with the next step',
});
} catch (e) {
setIsConnectionFailed(true);
alert((e as Error).message);
}
setIsTestingConnection(false);
};
const saveDatabase = async () => {
if (!editingDatabase) return;
if (isSaveToApi) {
setIsSaving(true);
try {
await databaseApi.updateDatabase(editingDatabase);
} catch (e) {
alert((e as Error).message);
}
setIsSaving(false);
}
onSaved(editingDatabase);
};
useEffect(() => {
setIsSaving(false);
setIsConnectionTested(false);
setIsTestingConnection(false);
setIsConnectionFailed(false);
setEditingDatabase({ ...database });
}, [database]);
if (!editingDatabase) return null;
let isAllFieldsFilled = true;
if (!editingDatabase.mysql?.host) isAllFieldsFilled = false;
if (!editingDatabase.mysql?.port) isAllFieldsFilled = false;
if (!editingDatabase.mysql?.username) isAllFieldsFilled = false;
if (!editingDatabase.id && !editingDatabase.mysql?.password) isAllFieldsFilled = false;
if (!editingDatabase.mysql?.database) isAllFieldsFilled = false;
const isLocalhostDb =
editingDatabase.mysql?.host?.includes('localhost') ||
editingDatabase.mysql?.host?.includes('127.0.0.1');
return (
<div>
<div className="mb-3 flex">
<div className="min-w-[150px]" />
<div
className="cursor-pointer text-sm text-gray-600 transition-colors hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-200"
onClick={parseFromClipboard}
>
<CopyOutlined className="mr-1" />
Parse from clipboard
</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Host</div>
<Input
value={editingDatabase.mysql?.host}
onChange={(e) => {
if (!editingDatabase.mysql) return;
setEditingDatabase({
...editingDatabase,
mysql: {
...editingDatabase.mysql,
host: e.target.value.trim().replace('https://', '').replace('http://', ''),
},
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter MySQL host"
/>
</div>
{isLocalhostDb && (
<div className="mb-1 flex">
<div className="min-w-[150px]" />
<div className="max-w-[200px] text-xs text-gray-500 dark:text-gray-400">
Please{' '}
<a
href="https://postgresus.com/faq/localhost"
target="_blank"
rel="noreferrer"
className="!text-blue-600 dark:!text-blue-400"
>
read this document
</a>{' '}
to study how to backup local database
</div>
</div>
)}
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Port</div>
<InputNumber
type="number"
value={editingDatabase.mysql?.port}
onChange={(e) => {
if (!editingDatabase.mysql || e === null) return;
setEditingDatabase({
...editingDatabase,
mysql: { ...editingDatabase.mysql, port: e },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter MySQL port"
/>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Username</div>
<Input
value={editingDatabase.mysql?.username}
onChange={(e) => {
if (!editingDatabase.mysql) return;
setEditingDatabase({
...editingDatabase,
mysql: { ...editingDatabase.mysql, username: e.target.value.trim() },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter MySQL username"
/>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Password</div>
<Input.Password
value={editingDatabase.mysql?.password}
onChange={(e) => {
if (!editingDatabase.mysql) return;
setEditingDatabase({
...editingDatabase,
mysql: { ...editingDatabase.mysql, password: e.target.value.trim() },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter MySQL password"
autoComplete="new-password"
/>
</div>
{isShowDbName && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">DB name</div>
<Input
value={editingDatabase.mysql?.database}
onChange={(e) => {
if (!editingDatabase.mysql) return;
setEditingDatabase({
...editingDatabase,
mysql: { ...editingDatabase.mysql, database: e.target.value.trim() },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter MySQL database name"
/>
</div>
)}
<div className="mb-3 flex w-full items-center">
<div className="min-w-[150px]">Use HTTPS</div>
<Switch
checked={editingDatabase.mysql?.isHttps}
onChange={(checked) => {
if (!editingDatabase.mysql) return;
setEditingDatabase({
...editingDatabase,
mysql: { ...editingDatabase.mysql, isHttps: checked },
});
setIsConnectionTested(false);
}}
size="small"
/>
</div>
<div className="mt-5 flex">
{isShowCancelButton && (
<Button className="mr-1" danger ghost onClick={() => onCancel()}>
Cancel
</Button>
)}
{isShowBackButton && (
<Button className="mr-auto" type="primary" ghost onClick={() => onBack()}>
Back
</Button>
)}
{!isConnectionTested && (
<Button
type="primary"
onClick={() => testConnection()}
loading={isTestingConnection}
disabled={!isAllFieldsFilled}
className="mr-5"
>
Test connection
</Button>
)}
{isConnectionTested && (
<Button
type="primary"
onClick={() => saveDatabase()}
loading={isSaving}
disabled={!isAllFieldsFilled}
className="mr-5"
>
{saveButtonText || 'Save'}
</Button>
)}
</div>
{isConnectionFailed && (
<div className="mt-3 text-sm text-gray-500 dark:text-gray-400">
If your database uses IP whitelist, make sure Postgresus server IP is added to the allowed
list.
</div>
)}
</div>
);
};

View File

@@ -0,0 +1,473 @@
import { CopyOutlined, DownOutlined, InfoCircleOutlined, UpOutlined } from '@ant-design/icons';
import { App, Button, Checkbox, Input, InputNumber, Select, Switch, Tooltip } from 'antd';
import { useEffect, useState } from 'react';
import { type Database, databaseApi } from '../../../../entity/databases';
import { ConnectionStringParser } from '../../../../entity/databases/model/postgresql/ConnectionStringParser';
import { ToastHelper } from '../../../../shared/toast';
interface Props {
database: Database;
isShowCancelButton?: boolean;
onCancel: () => void;
isShowBackButton: boolean;
onBack: () => void;
saveButtonText?: string;
isSaveToApi: boolean;
onSaved: (database: Database) => void;
isShowDbName?: boolean;
isRestoreMode?: boolean;
}
export const EditPostgreSqlSpecificDataComponent = ({
database,
isShowCancelButton,
onCancel,
isShowBackButton,
onBack,
saveButtonText,
isSaveToApi,
onSaved,
isShowDbName = true,
isRestoreMode = false,
}: Props) => {
const { message } = App.useApp();
const [editingDatabase, setEditingDatabase] = useState<Database>();
const [isSaving, setIsSaving] = useState(false);
const [isConnectionTested, setIsConnectionTested] = useState(false);
const [isTestingConnection, setIsTestingConnection] = useState(false);
const [isConnectionFailed, setIsConnectionFailed] = useState(false);
const hasAdvancedValues =
!!database.postgresql?.includeSchemas?.length || !!database.postgresql?.isExcludeExtensions;
const [isShowAdvanced, setShowAdvanced] = useState(hasAdvancedValues);
const [hasAutoAddedPublicSchema, setHasAutoAddedPublicSchema] = useState(false);
const parseFromClipboard = async () => {
try {
const text = await navigator.clipboard.readText();
const trimmedText = text.trim();
if (!trimmedText) {
message.error('Clipboard is empty');
return;
}
const result = ConnectionStringParser.parse(trimmedText);
if ('error' in result) {
message.error(result.error);
return;
}
if (!editingDatabase?.postgresql) return;
const updatedDatabase: Database = {
...editingDatabase,
postgresql: {
...editingDatabase.postgresql,
host: result.host,
port: result.port,
username: result.username,
password: result.password,
database: result.database,
isHttps: result.isHttps,
},
};
setEditingDatabase(autoAddPublicSchemaForSupabase(updatedDatabase));
setIsConnectionTested(false);
message.success('Connection string parsed successfully');
} catch {
message.error('Failed to read clipboard. Please check browser permissions.');
}
};
const autoAddPublicSchemaForSupabase = (updatedDatabase: Database): Database => {
if (hasAutoAddedPublicSchema) return updatedDatabase;
const host = updatedDatabase.postgresql?.host || '';
const username = updatedDatabase.postgresql?.username || '';
const isSupabase = host.includes('supabase') || username.includes('supabase');
if (isSupabase && updatedDatabase.postgresql) {
setHasAutoAddedPublicSchema(true);
const currentSchemas = updatedDatabase.postgresql.includeSchemas || [];
if (!currentSchemas.includes('public')) {
return {
...updatedDatabase,
postgresql: {
...updatedDatabase.postgresql,
includeSchemas: ['public', ...currentSchemas],
},
};
}
}
return updatedDatabase;
};
const testConnection = async () => {
if (!editingDatabase) return;
setIsTestingConnection(true);
setIsConnectionFailed(false);
try {
await databaseApi.testDatabaseConnectionDirect(editingDatabase);
setIsConnectionTested(true);
ToastHelper.showToast({
title: 'Connection test passed',
description: 'You can continue with the next step',
});
} catch (e) {
setIsConnectionFailed(true);
alert((e as Error).message);
}
setIsTestingConnection(false);
};
const saveDatabase = async () => {
if (!editingDatabase) return;
if (isSaveToApi) {
setIsSaving(true);
try {
await databaseApi.updateDatabase(editingDatabase);
} catch (e) {
alert((e as Error).message);
}
setIsSaving(false);
}
onSaved(editingDatabase);
};
useEffect(() => {
setIsSaving(false);
setIsConnectionTested(false);
setIsTestingConnection(false);
setIsConnectionFailed(false);
setEditingDatabase({ ...database });
}, [database]);
if (!editingDatabase) return null;
let isAllFieldsFilled = true;
if (!editingDatabase.postgresql?.host) isAllFieldsFilled = false;
if (!editingDatabase.postgresql?.port) isAllFieldsFilled = false;
if (!editingDatabase.postgresql?.username) isAllFieldsFilled = false;
if (!editingDatabase.id && !editingDatabase.postgresql?.password) isAllFieldsFilled = false;
if (!editingDatabase.postgresql?.database) isAllFieldsFilled = false;
const isLocalhostDb =
editingDatabase.postgresql?.host?.includes('localhost') ||
editingDatabase.postgresql?.host?.includes('127.0.0.1');
const isSupabaseDb =
editingDatabase.postgresql?.host?.includes('supabase') ||
editingDatabase.postgresql?.username?.includes('supabase');
return (
<div>
<div className="mb-3 flex">
<div className="min-w-[150px]" />
<div
className="cursor-pointer text-sm text-gray-600 transition-colors hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-200"
onClick={parseFromClipboard}
>
<CopyOutlined className="mr-1" />
Parse from clipboard
</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Host</div>
<Input
value={editingDatabase.postgresql?.host}
onChange={(e) => {
if (!editingDatabase.postgresql) return;
const updatedDatabase = {
...editingDatabase,
postgresql: {
...editingDatabase.postgresql,
host: e.target.value.trim().replace('https://', '').replace('http://', ''),
},
};
setEditingDatabase(autoAddPublicSchemaForSupabase(updatedDatabase));
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter PG host"
/>
</div>
{isLocalhostDb && (
<div className="mb-1 flex">
<div className="min-w-[150px]" />
<div className="max-w-[200px] text-xs text-gray-500 dark:text-gray-400">
Please{' '}
<a
href="https://postgresus.com/faq/localhost"
target="_blank"
rel="noreferrer"
className="!text-blue-600 dark:!text-blue-400"
>
read this document
</a>{' '}
to study how to backup local database
</div>
</div>
)}
{isSupabaseDb && (
<div className="mb-1 flex">
<div className="min-w-[150px]" />
<div className="max-w-[200px] text-xs text-gray-500 dark:text-gray-400">
Please{' '}
<a
href="https://postgresus.com/faq/supabase"
target="_blank"
rel="noreferrer"
className="!text-blue-600 dark:!text-blue-400"
>
read this document
</a>{' '}
to study how to backup Supabase database
</div>
</div>
)}
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Port</div>
<InputNumber
type="number"
value={editingDatabase.postgresql?.port}
onChange={(e) => {
if (!editingDatabase.postgresql || e === null) return;
setEditingDatabase({
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, port: e },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter PG port"
/>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Username</div>
<Input
value={editingDatabase.postgresql?.username}
onChange={(e) => {
if (!editingDatabase.postgresql) return;
const updatedDatabase = {
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, username: e.target.value.trim() },
};
setEditingDatabase(autoAddPublicSchemaForSupabase(updatedDatabase));
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter PG username"
/>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Password</div>
<Input.Password
value={editingDatabase.postgresql?.password}
onChange={(e) => {
if (!editingDatabase.postgresql) return;
setEditingDatabase({
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, password: e.target.value.trim() },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter PG password"
autoComplete="new-password"
/>
</div>
{isShowDbName && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">DB name</div>
<Input
value={editingDatabase.postgresql?.database}
onChange={(e) => {
if (!editingDatabase.postgresql) return;
setEditingDatabase({
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, database: e.target.value.trim() },
});
setIsConnectionTested(false);
}}
size="small"
className="max-w-[200px] grow"
placeholder="Enter PG database name"
/>
</div>
)}
<div className="mb-3 flex w-full items-center">
<div className="min-w-[150px]">Use HTTPS</div>
<Switch
checked={editingDatabase.postgresql?.isHttps}
onChange={(checked) => {
if (!editingDatabase.postgresql) return;
setEditingDatabase({
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, isHttps: checked },
});
setIsConnectionTested(false);
}}
size="small"
/>
</div>
<div className="mt-4 mb-3 flex items-center">
<div
className="flex cursor-pointer items-center text-sm text-blue-600 hover:text-blue-800"
onClick={() => setShowAdvanced(!isShowAdvanced)}
>
<span className="mr-2">Advanced settings</span>
{isShowAdvanced ? (
<UpOutlined style={{ fontSize: '12px' }} />
) : (
<DownOutlined style={{ fontSize: '12px' }} />
)}
</div>
</div>
{isShowAdvanced && (
<>
{!isRestoreMode && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Include schemas</div>
<Select
mode="tags"
value={editingDatabase.postgresql?.includeSchemas || []}
onChange={(values) => {
if (!editingDatabase.postgresql) return;
setEditingDatabase({
...editingDatabase,
postgresql: { ...editingDatabase.postgresql, includeSchemas: values },
});
}}
size="small"
className="max-w-[200px] grow"
placeholder="All schemas (default)"
tokenSeparators={[',']}
/>
</div>
)}
{isRestoreMode && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Exclude extensions</div>
<div className="flex items-center">
<Checkbox
checked={editingDatabase.postgresql?.isExcludeExtensions || false}
onChange={(e) => {
if (!editingDatabase.postgresql) return;
setEditingDatabase({
...editingDatabase,
postgresql: {
...editingDatabase.postgresql,
isExcludeExtensions: e.target.checked,
},
});
}}
>
Skip extensions
</Checkbox>
<Tooltip
className="cursor-pointer"
title="Skip restoring extension definitions (CREATE EXTENSION statements). Enable this if you're restoring to a managed PostgreSQL service where extensions are managed by the provider."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</div>
)}
</>
)}
<div className="mt-5 flex">
{isShowCancelButton && (
<Button className="mr-1" danger ghost onClick={() => onCancel()}>
Cancel
</Button>
)}
{isShowBackButton && (
<Button className="mr-auto" type="primary" ghost onClick={() => onBack()}>
Back
</Button>
)}
{!isConnectionTested && (
<Button
type="primary"
onClick={() => testConnection()}
loading={isTestingConnection}
disabled={!isAllFieldsFilled}
className="mr-5"
>
Test connection
</Button>
)}
{isConnectionTested && (
<Button
type="primary"
onClick={() => saveDatabase()}
loading={isSaving}
disabled={!isAllFieldsFilled}
className="mr-5"
>
{saveButtonText || 'Save'}
</Button>
)}
</div>
{isConnectionFailed && (
<div className="mt-3 text-sm text-gray-500 dark:text-gray-400">
If your database uses IP whitelist, make sure Postgresus server IP is added to the allowed
list.
</div>
)}
</div>
);
};

View File

@@ -1,11 +1,12 @@
import { type Database } from '../../../../entity/databases';
import { type Database, getDatabaseLogoFromType } from '../../../../entity/databases';
interface Props {
database: Database;
isShowName?: boolean;
isShowType?: boolean;
}
export const ShowDatabaseBaseInfoComponent = ({ database, isShowName }: Props) => {
export const ShowDatabaseBaseInfoComponent = ({ database, isShowName, isShowType }: Props) => {
return (
<div>
{isShowName && (
@@ -14,6 +15,20 @@ export const ShowDatabaseBaseInfoComponent = ({ database, isShowName }: Props) =
<div>{database.name || ''}</div>
</div>
)}
{isShowType && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Database type</div>
<div className="flex items-center">
<span>{database.type === 'POSTGRES' ? 'PostgreSQL' : 'MySQL'}</span>
<img
src={getDatabaseLogoFromType(database.type)}
alt="databaseIcon"
className="ml-2 h-4 w-4"
/>
</div>
</div>
)}
</div>
);
};

View File

@@ -1,71 +1,19 @@
import { type Database, DatabaseType, PostgresqlVersion } from '../../../../entity/databases';
import { type Database, DatabaseType } from '../../../../entity/databases';
import { ShowMySqlSpecificDataComponent } from './ShowMySqlSpecificDataComponent';
import { ShowPostgreSqlSpecificDataComponent } from './ShowPostgreSqlSpecificDataComponent';
interface Props {
database: Database;
}
const postgresqlVersionLabels = {
[PostgresqlVersion.PostgresqlVersion12]: '12',
[PostgresqlVersion.PostgresqlVersion13]: '13',
[PostgresqlVersion.PostgresqlVersion14]: '14',
[PostgresqlVersion.PostgresqlVersion15]: '15',
[PostgresqlVersion.PostgresqlVersion16]: '16',
[PostgresqlVersion.PostgresqlVersion17]: '17',
[PostgresqlVersion.PostgresqlVersion18]: '18',
};
export const ShowDatabaseSpecificDataComponent = ({ database }: Props) => {
return (
<div>
{database.type === DatabaseType.POSTGRES && (
<>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">PG version</div>
<div>
{database.postgresql?.version
? postgresqlVersionLabels[database.postgresql.version]
: ''}
</div>
</div>
if (database.type === DatabaseType.POSTGRES) {
return <ShowPostgreSqlSpecificDataComponent database={database} />;
}
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px] break-all">Host</div>
<div>{database.postgresql?.host || ''}</div>
</div>
if (database.type === DatabaseType.MYSQL) {
return <ShowMySqlSpecificDataComponent database={database} />;
}
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Port</div>
<div>{database.postgresql?.port || ''}</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Username</div>
<div>{database.postgresql?.username || ''}</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Password</div>
<div>{'*************'}</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">DB name</div>
<div>{database.postgresql?.database || ''}</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Use HTTPS</div>
<div>{database.postgresql?.isHttps ? 'Yes' : 'No'}</div>
</div>
{!!database.postgresql?.includeSchemas?.length && (
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Include schemas</div>
<div>{database.postgresql.includeSchemas.join(', ')}</div>
</div>
)}
</>
)}
</div>
);
return null;
};

View File

@@ -0,0 +1,52 @@
import { type Database, MysqlVersion } from '../../../../entity/databases';
interface Props {
database: Database;
}
const mysqlVersionLabels = {
[MysqlVersion.MysqlVersion57]: '5.7',
[MysqlVersion.MysqlVersion80]: '8.0',
[MysqlVersion.MysqlVersion84]: '8.4',
};
export const ShowMySqlSpecificDataComponent = ({ database }: Props) => {
return (
<div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">MySQL version</div>
<div>{database.mysql?.version ? mysqlVersionLabels[database.mysql.version] : ''}</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px] break-all">Host</div>
<div>{database.mysql?.host || ''}</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Port</div>
<div>{database.mysql?.port || ''}</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Username</div>
<div>{database.mysql?.username || ''}</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Password</div>
<div>{'*************'}</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">DB name</div>
<div>{database.mysql?.database || ''}</div>
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[150px]">Use HTTPS</div>
<div>{database.mysql?.isHttps ? 'Yes' : 'No'}</div>
</div>
</div>
);
};

Some files were not shown because too many files have changed in this diff Show More