Compare commits

..

66 Commits

Author SHA1 Message Date
Rostislav Dugin
81aadd19e1 FEATURE (docs): Update contribution priorities 2025-09-21 11:30:25 +03:00
Rostislav Dugin
432bdced3e FEATURE (readme): Update readme description 2025-09-21 11:26:53 +03:00
Rostislav Dugin
fcfe382a81 FIX (monitoring): Fix settings creation tests 2025-09-14 14:34:54 +03:00
Rostislav Dugin
7055b85c34 FEATURE (metrics): Add metrics for RAM & IO (first implementation) 2025-09-14 14:23:07 +03:00
Rostislav Dugin
0abc2225de FEATURE (priorities): Update priorities 2025-09-13 21:17:35 +03:00
Rostislav Dugin
31685f7bb0 FEATURE (metrics): Add metrics 2025-09-12 14:28:14 +03:00
Rostislav Dugin
9dbcf91442 REFACTOR (docs): Add clarifications to contribute [skip-release] 2025-09-11 21:02:07 +03:00
Rostislav Dugin
6ef59e888b FIX (tests): Skip Google Drive tests if env not provided 2025-09-11 20:56:02 +03:00
Rostislav Dugin
2009eabb14 FIX (dockerfile): Fix database creation SQL script 2025-09-11 16:57:33 +03:00
Rostislav Dugin
fa073ab76c FIX (dockerfile): Fix database creation SQL script 2025-09-11 16:42:18 +03:00
Rostislav Dugin
f24b3219bc FIX (dockerfile): Split goose installations to different arches 2025-09-11 13:13:04 +03:00
Rostislav Dugin
332971a014 FIX (image): Do not specify arch for image 2025-09-11 12:48:20 +03:00
Rostislav Dugin
7bb057ed2d Merge pull request #34 from RostislavDugin/fix/build_for_arm
Fix/build for arm
2025-09-11 12:35:34 +03:00
Rostislav Dugin
d814c1362b FIX (dockefile): Verify DB is not exists before creation in the image 2025-09-11 12:34:35 +03:00
Rostislav Dugin
41fe554272 Merge pull request #33 from iAmBipinPaul/main
fix(docker): compile goose for target architecture to prevent ARM exec-format errors
2025-09-11 12:05:22 +03:00
Bipin Paul
00c93340db FEATURE (docker): Refactor Dockerfile for platform compatibility and improved PostgreSQL setup 2025-09-11 06:51:27 +00:00
Bipin Paul
21770b259b FEATURE (docker): Update Dockerfile for ARM64 compatibility and improve PostgreSQL setup 2025-09-11 06:29:07 +00:00
Rostislav Dugin
5f36f269f0 FIX (notifiers): Update teams docs 2025-09-08 18:53:18 +03:00
Rostislav Dugin
76d67d6be8 FEATURE (docs): Update docs how to run frontend and backend 2025-09-08 18:05:30 +03:00
Rostislav Dugin
7adb921812 FEATURE (deploy): Make linting on each commit & PR 2025-09-08 17:52:41 +03:00
dedys
0107dab026 FEATURE (notifiers): Add MS Teams notifier 2025-09-08 17:23:47 +03:00
Rostislav Dugin
dee330ed59 FIX (databases): Validate PostgreSQL config always present during DB save 2025-09-05 20:12:34 +03:00
Rostislav Dugin
299f152704 FIX (notifiers): Fix notifier name marging 2025-08-15 15:14:08 +03:00
Rostislav Dugin
f3edf1a102 FEATURE (contribute): Update manuals ho wto contribute [skip-release] 2025-08-11 18:44:22 +03:00
Rostislav Dugin
f425160765 FEATURE (contribute): Update manuals ho wto contribute 2025-08-11 18:41:08 +03:00
Rostislav Dugin
13f2d3938f FIX (storages): Do not prefill 445 port for NAS as default value just in UI 2025-08-11 10:26:17 +03:00
Rostislav Dugin
59692cd41b FIX (directories): Do not remove temp firectory on temp files clean 2025-08-11 09:33:44 +03:00
Rostislav Dugin
ac78fe306c FEATURE (backups): Add warning when backups is disabled that backups will be removed 2025-08-09 10:27:30 +03:00
Rostislav Dugin
f1620de822 FIX (deploy): Create data and temp folders in CI \ CD to avoid tests failing 2025-08-09 10:16:07 +03:00
Rostislav Dugin
e6ce32bb60 FIX (tests): Return ensuring directories for LocalStorage to not fail tests 2025-08-09 10:12:52 +03:00
Rostislav Dugin
d4ec46e18e FIX (tests): Ensure directories for temp data created before tests 2025-08-09 10:04:51 +03:00
Rostislav Dugin
caf7e205e7 FEATURE (versions): Add version display to Postgresus 2025-08-09 09:56:29 +03:00
Rostislav Dugin
6a71dd4c3f FEATURE (notifiers): Add thread to Telegram notifications 2025-08-09 09:45:15 +03:00
Rostislav Dugin
65c7178f91 FIX (backups): Validate data and temp directory exist on app start (not only for LocalStorage) 2025-08-09 09:20:44 +03:00
Rostislav Dugin
d1aebd1ea3 FIX (database): Fix stuck when going back to DB name enter field 2025-08-09 09:11:09 +03:00
Rostislav Dugin
93f6952094 FIX (backup settings): Do not remove backups on backup settings change 2025-08-09 09:04:25 +03:00
Rostislav Dugin
22091c4c87 FIX (notifications): Fix not sent notifications on completed backup 2025-07-31 12:54:03 +03:00
Rostislav Dugin
ae280cba54 FEATURE (backups): Add zstd 5 compression level for PostgreSQL >= 16 2025-07-30 11:20:34 +03:00
Rostislav Dugin
af499396bd FIX (storages): Do not allow to enter NAS path starting from slash 2025-07-24 21:38:28 +03:00
Rostislav Dugin
72a02ad739 FIX (backups): Increase timeout from 1 hour to 23 hours 2025-07-24 21:38:04 +03:00
Rostislav Dugin
5017f38c5f FEATURE (readme): Update readme [skip-release] 2025-07-23 18:58:47 +03:00
Rostislav Dugin
2e7cc1549a FIX (deploy): Add NAS testing to CI \ CD workflow 2025-07-23 17:44:32 +03:00
Rostislav Dugin
62ff3962a1 FEATURE (storages): Add NAS storage 2025-07-23 17:35:10 +03:00
Rostislav Dugin
34afe9a347 FIX (spelling): Fix healthcheck spelling and add website to readme 2025-07-22 11:15:34 +03:00
Rostislav Dugin
4eb7c7a902 FEATURE (contirbute): Update contribute readme [skip-release] 2025-07-22 11:04:33 +03:00
Rostislav Dugin
5f3c4f23d7 FIX (dependencies): Run extra dependencies via go mod tidy 2025-07-21 21:21:44 +03:00
Rostislav Dugin
ecb8212eab FEATURE (gin): Add griz compression for static files and API responses 2025-07-21 21:19:27 +03:00
Rostislav Dugin
0e178343a8 FIX (monitoring): Fix text of down and up messages to not be the same in heading and body 2025-07-21 20:59:20 +03:00
Rostislav Dugin
0acd205f43 FIX (restores): Fix order of temp files closing that causes flaky tests 2025-07-21 20:01:19 +03:00
Rostislav Dugin
d678f9b3a2 FEATURE (container): Move PostgreSQL into container 2025-07-21 19:36:42 +03:00
Rostislav Dugin
7859951653 Merge branch 'main' of https://github.com/RostislavDugin/postgresus 2025-07-21 14:59:12 +03:00
Rostislav Dugin
7472aa1e1f FIX (backups): Do not double close backup file 2025-07-21 14:58:34 +03:00
Rostislav Dugin
9283713eab Merge pull request #6 from RostislavDugin/feature/update_readme
FEATURE (readme): Move badges under the description [skip-release]
2025-07-21 14:47:39 +03:00
Rostislav Dugin
9a9c170ffc FEATURE (readme): Move badges under the description [skip-release] 2025-07-21 14:43:48 +03:00
Rostislav Dugin
d05efc3151 FIX (deployments): Remove Docker Hub description update 2025-07-21 14:13:50 +03:00
Rostislav Dugin
1ee41fb673 FEATURE (auth): Add rate limiting for sign in edpoint to not allow brute force 2025-07-21 14:00:31 +03:00
Rostislav Dugin
529f080ca5 FEATURE (readme): Add pretty labels to GitHub 2025-07-21 13:47:47 +03:00
Rostislav Dugin
df0f7e0e7a FIX (deployment): Fix caching modules 2025-07-21 13:24:53 +03:00
Rostislav Dugin
6418de87db FIX (deployments): Use binaries instead of symlinks on PostgreSQL download 2025-07-21 13:19:40 +03:00
Rostislav Dugin
230f66bb10 FIX (deployments): Use binaries instead of symlinks on PostgreSQL download 2025-07-21 13:16:26 +03:00
Rostislav Dugin
1cd10772ae FIX (deployment): Download PostgreSQL client tools 2025-07-21 13:10:18 +03:00
Rostislav Dugin
d56518b847 FIX (deploy): Fix migrations run on deploy 2025-07-21 13:00:59 +03:00
Rostislav Dugin
64195024c6 FIX (deploy): Update text of docker compose executable 2025-07-21 12:55:04 +03:00
Rostislav Dugin
200429dbab FEATURE (deploy): Run tests on each deployment 2025-07-21 12:48:06 +03:00
Rostislav Dugin
07ad7d9a2a BREAKING CHANGE: Bump verwion to 1.0 2025-07-21 10:28:05 +03:00
Rostislav Dugin
ffefe68ca4 FIX (docker hub): Fix updating description 2025-07-21 10:02:22 +03:00
140 changed files with 5726 additions and 1105 deletions

456
.github/workflows/ci-release.yml vendored Normal file
View File

@@ -0,0 +1,456 @@
name: CI and Release
on:
push:
branches: ["**"]
pull_request:
branches: ["**"]
workflow_dispatch:
jobs:
lint-backend:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.23.3"
- name: Cache Go modules
uses: actions/cache@v4
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-go-${{ hashFiles('backend/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Install golangci-lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.60.3
echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
- name: Install swag for swagger generation
run: go install github.com/swaggo/swag/cmd/swag@v1.16.4
- name: Generate swagger docs
run: |
cd backend
swag init -d . -g cmd/main.go -o swagger
- name: Run golangci-lint
run: |
cd backend
golangci-lint run
- name: Verify go mod tidy
run: |
cd backend
go mod tidy
git diff --exit-code go.mod go.sum || (echo "go mod tidy made changes, please run 'go mod tidy' and commit the changes" && exit 1)
lint-frontend:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
cache: "npm"
cache-dependency-path: frontend/package-lock.json
- name: Install dependencies
run: |
cd frontend
npm ci
- name: Check if prettier was run
run: |
cd frontend
npm run format
git diff --exit-code || (echo "Prettier made changes, please run 'npm run format' and commit the changes" && exit 1)
- name: Check if linter was run
run: |
cd frontend
npm run lint
test-backend:
runs-on: ubuntu-latest
needs: [lint-backend]
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.23.3"
- name: Cache Go modules
uses: actions/cache@v4
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-go-${{ hashFiles('backend/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Create .env file for testing
run: |
cd backend
cat > .env << EOF
# docker-compose.yml
DEV_DB_NAME=postgresus
DEV_DB_USERNAME=postgres
DEV_DB_PASSWORD=Q1234567
#app
ENV_MODE=development
# db
DATABASE_DSN=host=localhost user=postgres password=Q1234567 dbname=postgresus port=5437 sslmode=disable
DATABASE_URL=postgres://postgres:Q1234567@localhost:5437/postgresus?sslmode=disable
# migrations
GOOSE_DRIVER=postgres
GOOSE_DBSTRING=postgres://postgres:Q1234567@localhost:5437/postgresus?sslmode=disable
GOOSE_MIGRATION_DIR=./migrations
# testing
# to get Google Drive env variables: add storage in UI and copy data from added storage here
TEST_GOOGLE_DRIVE_CLIENT_ID=${{ secrets.TEST_GOOGLE_DRIVE_CLIENT_ID }}
TEST_GOOGLE_DRIVE_CLIENT_SECRET=${{ secrets.TEST_GOOGLE_DRIVE_CLIENT_SECRET }}
TEST_GOOGLE_DRIVE_TOKEN_JSON=${{ secrets.TEST_GOOGLE_DRIVE_TOKEN_JSON }}
# testing DBs
TEST_POSTGRES_13_PORT=5001
TEST_POSTGRES_14_PORT=5002
TEST_POSTGRES_15_PORT=5003
TEST_POSTGRES_16_PORT=5004
TEST_POSTGRES_17_PORT=5005
# testing S3
TEST_MINIO_PORT=9000
TEST_MINIO_CONSOLE_PORT=9001
# testing NAS
TEST_NAS_PORT=5006
EOF
- name: Start test containers
run: |
cd backend
docker compose -f docker-compose.yml.example up -d
- name: Wait for containers to be ready
run: |
# Wait for main dev database
timeout 60 bash -c 'until docker exec dev-db pg_isready -h localhost -p 5437 -U postgres; do sleep 2; done'
# Wait for test databases
timeout 60 bash -c 'until nc -z localhost 5001; do sleep 2; done'
timeout 60 bash -c 'until nc -z localhost 5002; do sleep 2; done'
timeout 60 bash -c 'until nc -z localhost 5003; do sleep 2; done'
timeout 60 bash -c 'until nc -z localhost 5004; do sleep 2; done'
timeout 60 bash -c 'until nc -z localhost 5005; do sleep 2; done'
# Wait for MinIO
timeout 60 bash -c 'until nc -z localhost 9000; do sleep 2; done'
- name: Create data and temp directories
run: |
# Create directories that are used for backups and restore
# These paths match what's configured in config.go
mkdir -p postgresus-data/backups
mkdir -p postgresus-data/temp
- name: Install PostgreSQL client tools
run: |
chmod +x backend/tools/download_linux.sh
cd backend/tools
./download_linux.sh
- name: Run database migrations
run: |
cd backend
go install github.com/pressly/goose/v3/cmd/goose@latest
goose up
- name: Run Go tests
run: |
cd backend
go test ./internal/...
- name: Stop test containers
if: always()
run: |
cd backend
docker compose -f docker-compose.yml.example down -v
determine-version:
runs-on: ubuntu-latest
needs: [test-backend, lint-frontend]
if: ${{ github.ref == 'refs/heads/main' && !contains(github.event.head_commit.message, '[skip-release]') }}
outputs:
should_release: ${{ steps.version_bump.outputs.should_release }}
new_version: ${{ steps.version_bump.outputs.new_version }}
bump_type: ${{ steps.version_bump.outputs.bump_type }}
steps:
- name: Check out code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Install semver
run: npm install -g semver
- name: Get current version
id: current_version
run: |
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
echo "current_version=${LATEST_TAG#v}" >> $GITHUB_OUTPUT
echo "Current version: ${LATEST_TAG#v}"
- name: Analyze commits and determine version bump
id: version_bump
run: |
CURRENT_VERSION="${{ steps.current_version.outputs.current_version }}"
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
# Get commits since last tag
if [ "$LATEST_TAG" = "v0.0.0" ]; then
COMMITS=$(git log --pretty=format:"%s" --no-merges)
else
COMMITS=$(git log ${LATEST_TAG}..HEAD --pretty=format:"%s" --no-merges)
fi
echo "Analyzing commits:"
echo "$COMMITS"
# Initialize flags
HAS_FEATURE=false
HAS_FIX=false
HAS_BREAKING=false
# Analyze each commit
while IFS= read -r commit; do
if [[ "$commit" =~ ^FEATURE ]]; then
HAS_FEATURE=true
echo "Found FEATURE commit: $commit"
elif [[ "$commit" =~ ^FIX ]]; then
HAS_FIX=true
echo "Found FIX commit: $commit"
elif [[ "$commit" =~ ^REFACTOR ]]; then
HAS_FIX=true # Treat refactor as patch
echo "Found REFACTOR commit: $commit"
fi
# Check for breaking changes
if [[ "$commit" =~ BREAKING[[:space:]]CHANGE ]] || [[ "$commit" =~ "!" ]]; then
HAS_BREAKING=true
echo "Found BREAKING CHANGE: $commit"
fi
done <<< "$COMMITS"
# Determine version bump
if [ "$HAS_BREAKING" = true ]; then
BUMP_TYPE="major"
elif [ "$HAS_FEATURE" = true ]; then
BUMP_TYPE="minor"
elif [ "$HAS_FIX" = true ]; then
BUMP_TYPE="patch"
else
BUMP_TYPE="none"
fi
echo "bump_type=$BUMP_TYPE" >> $GITHUB_OUTPUT
if [ "$BUMP_TYPE" != "none" ]; then
NEW_VERSION=$(npx semver -i $BUMP_TYPE $CURRENT_VERSION)
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
echo "should_release=true" >> $GITHUB_OUTPUT
echo "New version will be: $NEW_VERSION"
else
echo "should_release=false" >> $GITHUB_OUTPUT
echo "No version bump needed"
fi
build-only:
runs-on: ubuntu-latest
needs: [test-backend, lint-frontend]
if: ${{ github.ref == 'refs/heads/main' && contains(github.event.head_commit.message, '[skip-release]') }}
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up QEMU (enables multi-arch emulation)
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push SHA-only tags
uses: docker/build-push-action@v5
with:
context: .
push: true
platforms: linux/amd64,linux/arm64
build-args: |
APP_VERSION=dev-${{ github.sha }}
tags: |
rostislavdugin/postgresus:latest
rostislavdugin/postgresus:${{ github.sha }}
build-and-push:
runs-on: ubuntu-latest
needs: [determine-version]
if: ${{ needs.determine-version.outputs.should_release == 'true' }}
permissions:
contents: write
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up QEMU (enables multi-arch emulation)
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push with version tags
uses: docker/build-push-action@v5
with:
context: .
push: true
platforms: linux/amd64,linux/arm64
build-args: |
APP_VERSION=${{ needs.determine-version.outputs.new_version }}
tags: |
rostislavdugin/postgresus:latest
rostislavdugin/postgresus:v${{ needs.determine-version.outputs.new_version }}
rostislavdugin/postgresus:${{ github.sha }}
release:
runs-on: ubuntu-latest
needs: [determine-version, build-and-push]
if: ${{ needs.determine-version.outputs.should_release == 'true' }}
permissions:
contents: write
pull-requests: write
steps:
- name: Check out code
uses: actions/checkout@v4
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Generate changelog
id: changelog
run: |
NEW_VERSION="${{ needs.determine-version.outputs.new_version }}"
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
# Get commits since last tag
if [ "$LATEST_TAG" = "v0.0.0" ]; then
COMMITS=$(git log --pretty=format:"%s|%H|%an|%ad" --date=short --no-merges)
else
COMMITS=$(git log ${LATEST_TAG}..HEAD --pretty=format:"%s|%H|%an|%ad" --date=short --no-merges)
fi
# Create changelog
CHANGELOG="# Changelog\n\n## [${NEW_VERSION}] - $(date +%Y-%m-%d)\n\n"
# Group commits by type and area
FEATURES=""
FIXES=""
REFACTORS=""
while IFS= read -r line; do
if [ -n "$line" ]; then
COMMIT_MSG=$(echo "$line" | cut -d'|' -f1)
COMMIT_HASH=$(echo "$line" | cut -d'|' -f2)
SHORT_HASH=${COMMIT_HASH:0:7}
# Parse commit message format: TYPE (area): description
if [[ "$COMMIT_MSG" == FEATURE* ]]; then
TEMP="${COMMIT_MSG#FEATURE}"
TEMP="${TEMP#"${TEMP%%[![:space:]]*}"}"
if [[ "$TEMP" == \(* ]]; then
AREA=$(echo "$TEMP" | sed 's/^(\([^)]*\)).*/\1/')
DESC=$(echo "$TEMP" | sed 's/^([^)]*):[[:space:]]*//')
FEATURES="${FEATURES}- **${AREA}**: ${DESC} ([${SHORT_HASH}](https://github.com/${{ github.repository }}/commit/${COMMIT_HASH}))\n"
fi
elif [[ "$COMMIT_MSG" == FIX* ]]; then
TEMP="${COMMIT_MSG#FIX}"
TEMP="${TEMP#"${TEMP%%[![:space:]]*}"}"
if [[ "$TEMP" == \(* ]]; then
AREA=$(echo "$TEMP" | sed 's/^(\([^)]*\)).*/\1/')
DESC=$(echo "$TEMP" | sed 's/^([^)]*):[[:space:]]*//')
FIXES="${FIXES}- **${AREA}**: ${DESC} ([${SHORT_HASH}](https://github.com/${{ github.repository }}/commit/${COMMIT_HASH}))\n"
fi
elif [[ "$COMMIT_MSG" == REFACTOR* ]]; then
TEMP="${COMMIT_MSG#REFACTOR}"
TEMP="${TEMP#"${TEMP%%[![:space:]]*}"}"
if [[ "$TEMP" == \(* ]]; then
AREA=$(echo "$TEMP" | sed 's/^(\([^)]*\)).*/\1/')
DESC=$(echo "$TEMP" | sed 's/^([^)]*):[[:space:]]*//')
REFACTORS="${REFACTORS}- **${AREA}**: ${DESC} ([${SHORT_HASH}](https://github.com/${{ github.repository }}/commit/${COMMIT_HASH}))\n"
fi
fi
fi
done <<< "$COMMITS"
# Build changelog sections
if [ -n "$FEATURES" ]; then
CHANGELOG="${CHANGELOG}### ✨ Features\n${FEATURES}\n"
fi
if [ -n "$FIXES" ]; then
CHANGELOG="${CHANGELOG}### 🐛 Bug Fixes\n${FIXES}\n"
fi
if [ -n "$REFACTORS" ]; then
CHANGELOG="${CHANGELOG}### 🔨 Refactoring\n${REFACTORS}\n"
fi
# Add Docker image info
CHANGELOG="${CHANGELOG}### 🐳 Docker\n"
CHANGELOG="${CHANGELOG}- **Image**: \`rostislavdugin/postgresus:v${NEW_VERSION}\`\n"
CHANGELOG="${CHANGELOG}- **Platforms**: linux/amd64, linux/arm64\n\n"
# Set output for GitHub release
{
echo 'changelog<<EOF'
echo -e "$CHANGELOG"
echo EOF
} >> $GITHUB_OUTPUT
- name: Create GitHub Release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: v${{ needs.determine-version.outputs.new_version }}
release_name: Release v${{ needs.determine-version.outputs.new_version }}
body: ${{ steps.changelog.outputs.changelog }}
draft: false
prerelease: false

View File

@@ -1,120 +0,0 @@
name: Build & push Docker image
on:
push:
branches: [main]
workflow_dispatch: {}
jobs:
lint-backend:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.23.3"
- name: Cache Go modules
uses: actions/cache@v4
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Install golangci-lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.60.3
echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
- name: Install swag for swagger generation
run: go install github.com/swaggo/swag/cmd/swag@v1.16.4
- name: Generate swagger docs
run: |
cd backend
swag init -d . -g cmd/main.go -o swagger
- name: Run golangci-lint
run: |
cd backend
golangci-lint run
- name: Verify go mod tidy
run: |
cd backend
go mod tidy
git diff --exit-code go.mod go.sum || (echo "go mod tidy made changes, please run 'go mod tidy' and commit the changes" && exit 1)
lint-frontend:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
cache: "npm"
cache-dependency-path: frontend/package-lock.json
- name: Install dependencies
run: |
cd frontend
npm ci
- name: Check if prettier was run
run: |
cd frontend
npm run format
git diff --exit-code || (echo "Prettier made changes, please run 'npm run format' and commit the changes" && exit 1)
- name: Check if linter was run
run: |
cd frontend
npm run lint
build-and-push:
runs-on: ubuntu-latest
needs: [lint-backend, lint-frontend]
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up QEMU (enables multi-arch emulation)
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
push: true
platforms: linux/amd64,linux/arm64 # both chip families
tags: |
rostislavdugin/postgresus:latest
rostislavdugin/postgresus:${{ github.sha }}
- name: Update Docker Hub description
uses: peter-evans/dockerhub-description@v4
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: rostislavdugin/postgresus
short-description: "PostgreSQL monitoring and backups solution"
readme-filepath: ./README.md

View File

@@ -1,235 +0,0 @@
name: Automated Release
on:
push:
branches: [main]
workflow_dispatch:
jobs:
release:
runs-on: ubuntu-latest
if: ${{ !contains(github.event.head_commit.message, '[skip-release]') }}
permissions:
contents: write
pull-requests: write
steps:
- name: Check out code
uses: actions/checkout@v4
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Install dependencies
run: |
npm install -g conventional-changelog-cli
npm install -g semver
- name: Get current version
id: current_version
run: |
# Get the latest tag, default to 0.0.0 if no tags exist
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
echo "current_version=${LATEST_TAG#v}" >> $GITHUB_OUTPUT
echo "Current version: ${LATEST_TAG#v}"
- name: Analyze commits and determine version bump
id: version_bump
run: |
CURRENT_VERSION="${{ steps.current_version.outputs.current_version }}"
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
# Get commits since last tag
if [ "$LATEST_TAG" = "v0.0.0" ]; then
COMMITS=$(git log --pretty=format:"%s" --no-merges)
else
COMMITS=$(git log ${LATEST_TAG}..HEAD --pretty=format:"%s" --no-merges)
fi
echo "Analyzing commits:"
echo "$COMMITS"
# Initialize flags
HAS_FEATURE=false
HAS_FIX=false
HAS_BREAKING=false
# Analyze each commit
while IFS= read -r commit; do
if [[ "$commit" =~ ^FEATURE ]]; then
HAS_FEATURE=true
echo "Found FEATURE commit: $commit"
elif [[ "$commit" =~ ^FIX ]]; then
HAS_FIX=true
echo "Found FIX commit: $commit"
elif [[ "$commit" =~ ^REFACTOR ]]; then
HAS_FIX=true # Treat refactor as patch
echo "Found REFACTOR commit: $commit"
fi
# Check for breaking changes
if [[ "$commit" =~ BREAKING[[:space:]]CHANGE ]] || [[ "$commit" =~ "!" ]]; then
HAS_BREAKING=true
echo "Found BREAKING CHANGE: $commit"
fi
done <<< "$COMMITS"
# Determine version bump
if [ "$HAS_BREAKING" = true ]; then
BUMP_TYPE="major"
elif [ "$HAS_FEATURE" = true ]; then
BUMP_TYPE="minor"
elif [ "$HAS_FIX" = true ]; then
BUMP_TYPE="patch"
else
BUMP_TYPE="none"
fi
echo "bump_type=$BUMP_TYPE" >> $GITHUB_OUTPUT
if [ "$BUMP_TYPE" != "none" ]; then
NEW_VERSION=$(npx semver -i $BUMP_TYPE $CURRENT_VERSION)
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
echo "should_release=true" >> $GITHUB_OUTPUT
echo "New version will be: $NEW_VERSION"
else
echo "should_release=false" >> $GITHUB_OUTPUT
echo "No version bump needed"
fi
- name: Generate changelog
id: changelog
if: steps.version_bump.outputs.should_release == 'true'
run: |
CURRENT_VERSION="${{ steps.current_version.outputs.current_version }}"
NEW_VERSION="${{ steps.version_bump.outputs.new_version }}"
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
# Get commits since last tag
if [ "$LATEST_TAG" = "v0.0.0" ]; then
COMMITS=$(git log --pretty=format:"%s|%H|%an|%ad" --date=short --no-merges)
else
COMMITS=$(git log ${LATEST_TAG}..HEAD --pretty=format:"%s|%H|%an|%ad" --date=short --no-merges)
fi
# Create changelog
CHANGELOG="# Changelog\n\n## [${NEW_VERSION}] - $(date +%Y-%m-%d)\n\n"
# Group commits by type and area
FEATURES=""
FIXES=""
REFACTORS=""
while IFS= read -r line; do
if [ -n "$line" ]; then
COMMIT_MSG=$(echo "$line" | cut -d'|' -f1)
COMMIT_HASH=$(echo "$line" | cut -d'|' -f2)
COMMIT_AUTHOR=$(echo "$line" | cut -d'|' -f3)
COMMIT_DATE=$(echo "$line" | cut -d'|' -f4)
SHORT_HASH=${COMMIT_HASH:0:7}
# Parse commit message format: TYPE (area): description
if [[ "$COMMIT_MSG" == FEATURE* ]]; then
# Extract area and description
TEMP="${COMMIT_MSG#FEATURE}"
TEMP="${TEMP#"${TEMP%%[![:space:]]*}"}" # trim leading spaces
if [[ "$TEMP" == \(* ]]; then
AREA=$(echo "$TEMP" | sed 's/^(\([^)]*\)).*/\1/')
DESC=$(echo "$TEMP" | sed 's/^([^)]*):[[:space:]]*//')
FEATURES="${FEATURES}- **${AREA}**: ${DESC} ([${SHORT_HASH}](https://github.com/${{ github.repository }}/commit/${COMMIT_HASH}))\n"
fi
elif [[ "$COMMIT_MSG" == FIX* ]]; then
# Extract area and description
TEMP="${COMMIT_MSG#FIX}"
TEMP="${TEMP#"${TEMP%%[![:space:]]*}"}" # trim leading spaces
if [[ "$TEMP" == \(* ]]; then
AREA=$(echo "$TEMP" | sed 's/^(\([^)]*\)).*/\1/')
DESC=$(echo "$TEMP" | sed 's/^([^)]*):[[:space:]]*//')
FIXES="${FIXES}- **${AREA}**: ${DESC} ([${SHORT_HASH}](https://github.com/${{ github.repository }}/commit/${COMMIT_HASH}))\n"
fi
elif [[ "$COMMIT_MSG" == REFACTOR* ]]; then
# Extract area and description
TEMP="${COMMIT_MSG#REFACTOR}"
TEMP="${TEMP#"${TEMP%%[![:space:]]*}"}" # trim leading spaces
if [[ "$TEMP" == \(* ]]; then
AREA=$(echo "$TEMP" | sed 's/^(\([^)]*\)).*/\1/')
DESC=$(echo "$TEMP" | sed 's/^([^)]*):[[:space:]]*//')
REFACTORS="${REFACTORS}- **${AREA}**: ${DESC} ([${SHORT_HASH}](https://github.com/${{ github.repository }}/commit/${COMMIT_HASH}))\n"
fi
fi
fi
done <<< "$COMMITS"
# Build changelog sections
if [ -n "$FEATURES" ]; then
CHANGELOG="${CHANGELOG}### ✨ Features\n${FEATURES}\n"
fi
if [ -n "$FIXES" ]; then
CHANGELOG="${CHANGELOG}### 🐛 Bug Fixes\n${FIXES}\n"
fi
if [ -n "$REFACTORS" ]; then
CHANGELOG="${CHANGELOG}### 🔨 Refactoring\n${REFACTORS}\n"
fi
# Save changelog to file
echo -e "$CHANGELOG" > RELEASE_CHANGELOG.md
# Update main CHANGELOG.md - preserve all version history
if [ -f "CHANGELOG.md" ]; then
# Get the header until [Unreleased] section
sed -n '1,/## \[Unreleased\]/p' CHANGELOG.md > NEW_CHANGELOG.md
echo "" >> NEW_CHANGELOG.md
# Add the new release (without the "# Changelog" header)
echo "## [${NEW_VERSION}] - $(date +%Y-%m-%d)" >> NEW_CHANGELOG.md
echo "" >> NEW_CHANGELOG.md
# Add the new release sections
if [ -n "$FEATURES" ]; then
echo "### ✨ Features" >> NEW_CHANGELOG.md
echo -e "$FEATURES" >> NEW_CHANGELOG.md
fi
if [ -n "$FIXES" ]; then
echo "### 🐛 Bug Fixes" >> NEW_CHANGELOG.md
echo -e "$FIXES" >> NEW_CHANGELOG.md
fi
if [ -n "$REFACTORS" ]; then
echo "### 🔨 Refactoring" >> NEW_CHANGELOG.md
echo -e "$REFACTORS" >> NEW_CHANGELOG.md
fi
# Get existing releases (everything after first ## [version] pattern)
sed -n '/## \[[0-9]/,$p' CHANGELOG.md >> NEW_CHANGELOG.md
# Replace the original file
mv NEW_CHANGELOG.md CHANGELOG.md
else
echo -e "$CHANGELOG" > CHANGELOG.md
fi
# Set output for GitHub release (escape newlines)
{
echo 'changelog<<EOF'
echo -e "$CHANGELOG"
echo EOF
} >> $GITHUB_OUTPUT
- name: Create GitHub Release
if: steps.version_bump.outputs.should_release == 'true'
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: v${{ steps.version_bump.outputs.new_version }}
release_name: Release v${{ steps.version_bump.outputs.new_version }}
body: ${{ steps.changelog.outputs.changelog }}
draft: false
prerelease: false

3
.gitignore vendored
View File

@@ -3,4 +3,5 @@ postgresus-data/
.env
pgdata/
docker-compose.yml
node_modules/
node_modules/
.idea

View File

@@ -1,10 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
<!-- This file is automatically updated by the release workflow -->

View File

@@ -3,24 +3,40 @@ FROM --platform=$BUILDPLATFORM node:24-alpine AS frontend-build
WORKDIR /frontend
# Add version for the frontend build
ARG APP_VERSION=dev
ENV VITE_APP_VERSION=$APP_VERSION
COPY frontend/package.json frontend/package-lock.json ./
RUN npm ci
COPY frontend/ ./
# Copy .env file (with fallback to .env.production.example)
RUN if [ ! -f .env ]; then \
if [ -f .env.production.example ]; then \
cp .env.production.example .env; \
fi; \
fi
if [ -f .env.production.example ]; then \
cp .env.production.example .env; \
fi; \
fi
RUN npm run build
# ========= BUILD BACKEND =========
# Backend build stage
FROM --platform=$BUILDPLATFORM golang:1.23.3 AS backend-build
# Install Go public tools needed in runtime
RUN curl -fsSL https://raw.githubusercontent.com/pressly/goose/master/install.sh | sh
# Make TARGET args available early so tools built here match the final image arch
ARG TARGETOS
ARG TARGETARCH
# Install Go public tools needed in runtime. Use `go build` for goose so the
# binary is compiled for the target architecture instead of downloading a
# prebuilt binary which may have the wrong architecture (causes exec format
# errors on ARM).
RUN git clone --depth 1 --branch v3.24.3 https://github.com/pressly/goose.git /tmp/goose && \
cd /tmp/goose/cmd/goose && \
GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH:-amd64} \
go build -o /usr/local/bin/goose . && \
rm -rf /tmp/goose
RUN go install github.com/swaggo/swag/cmd/swag@v1.16.4
# Set working directory
@@ -45,25 +61,38 @@ ARG TARGETOS
ARG TARGETARCH
ARG TARGETVARIANT
RUN CGO_ENABLED=0 \
GOOS=$TARGETOS \
GOARCH=$TARGETARCH \
go build -o /app/main ./cmd/main.go
GOOS=$TARGETOS \
GOARCH=$TARGETARCH \
go build -o /app/main ./cmd/main.go
# ========= RUNTIME =========
FROM --platform=$TARGETPLATFORM debian:bookworm-slim
FROM debian:bookworm-slim
# Install PostgreSQL client tools (versions 13-17)
# Add version metadata to runtime image
ARG APP_VERSION=dev
LABEL org.opencontainers.image.version=$APP_VERSION
ENV APP_VERSION=$APP_VERSION
# Set production mode for Docker containers
ENV ENV_MODE=production
# Install PostgreSQL server and client tools (versions 13-17)
RUN apt-get update && apt-get install -y --no-install-recommends \
wget ca-certificates gnupg lsb-release && \
wget -qO- https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - && \
echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" \
> /etc/apt/sources.list.d/pgdg.list && \
apt-get update && \
apt-get install -y --no-install-recommends \
postgresql-client-13 postgresql-client-14 postgresql-client-15 \
postgresql-client-16 postgresql-client-17 && \
rm -rf /var/lib/apt/lists/*
wget ca-certificates gnupg lsb-release sudo gosu && \
wget -qO- https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - && \
echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" \
> /etc/apt/sources.list.d/pgdg.list && \
apt-get update && \
apt-get install -y --no-install-recommends \
postgresql-17 postgresql-client-13 postgresql-client-14 postgresql-client-15 \
postgresql-client-16 postgresql-client-17 && \
rm -rf /var/lib/apt/lists/*
# Create postgres user and set up directories
RUN useradd -m -s /bin/bash postgres || true && \
mkdir -p /postgresus-data/pgdata && \
chown -R postgres:postgres /postgresus-data/pgdata
WORKDIR /app
@@ -82,12 +111,78 @@ COPY --from=backend-build /app/ui/build ./ui/build
# Copy .env file (with fallback to .env.production.example)
COPY backend/.env* /app/
RUN if [ ! -f /app/.env ]; then \
if [ -f /app/.env.production.example ]; then \
cp /app/.env.production.example /app/.env; \
fi; \
if [ -f /app/.env.production.example ]; then \
cp /app/.env.production.example /app/.env; \
fi; \
fi
# Create startup script
COPY <<EOF /app/start.sh
#!/bin/bash
set -e
# PostgreSQL 17 binary paths
PG_BIN="/usr/lib/postgresql/17/bin"
# Ensure proper ownership of data directory
echo "Setting up data directory permissions..."
mkdir -p /postgresus-data/pgdata
chown -R postgres:postgres /postgresus-data
# Initialize PostgreSQL if not already initialized
if [ ! -s "/postgresus-data/pgdata/PG_VERSION" ]; then
echo "Initializing PostgreSQL database..."
gosu postgres \$PG_BIN/initdb -D /postgresus-data/pgdata --encoding=UTF8 --locale=C.UTF-8
# Configure PostgreSQL
echo "host all all 127.0.0.1/32 md5" >> /postgresus-data/pgdata/pg_hba.conf
echo "local all all trust" >> /postgresus-data/pgdata/pg_hba.conf
echo "port = 5437" >> /postgresus-data/pgdata/postgresql.conf
echo "listen_addresses = 'localhost'" >> /postgresus-data/pgdata/postgresql.conf
echo "shared_buffers = 256MB" >> /postgresus-data/pgdata/postgresql.conf
echo "max_connections = 100" >> /postgresus-data/pgdata/postgresql.conf
fi
# Start PostgreSQL in background
echo "Starting PostgreSQL..."
gosu postgres \$PG_BIN/postgres -D /postgresus-data/pgdata -p 5437 &
POSTGRES_PID=\$!
# Wait for PostgreSQL to be ready
echo "Waiting for PostgreSQL to be ready..."
for i in {1..30}; do
if gosu postgres \$PG_BIN/pg_isready -p 5437 -h localhost >/dev/null 2>&1; then
echo "PostgreSQL is ready!"
break
fi
if [ \$i -eq 30 ]; then
echo "PostgreSQL failed to start"
exit 1
fi
sleep 1
done
# Create database and set password for postgres user
echo "Setting up database and user..."
gosu postgres \$PG_BIN/psql -p 5437 -h localhost -d postgres << 'SQL'
ALTER USER postgres WITH PASSWORD 'Q1234567';
SELECT 'CREATE DATABASE postgresus OWNER postgres'
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'postgresus')
\\gexec
\\q
SQL
# Start the main application
echo "Starting Postgresus application..."
exec ./main
EOF
RUN chmod +x /app/start.sh
EXPOSE 4005
ENTRYPOINT ["./main"]
CMD []
# Volume for PostgreSQL data
VOLUME ["/postgresus-data"]
ENTRYPOINT ["/app/start.sh"]
CMD []

View File

@@ -1,8 +1,17 @@
<div align="center">
<img src="assets/logo.svg" alt="Postgresus Logo" width="250"/>
<img src="assets/logo.svg" style="margin-bottom: 20px;" alt="Postgresus Logo" width="250"/>
<h3>PostgreSQL backup</h3>
<p>Free, open source and self-hosted solution for automated PostgreSQL backups. With multiple storage options and notifications</p>
<h3>PostgreSQL monitoring and backup</h3>
<p>Free, open source and self-hosted solution for automated PostgreSQL monitoring and backups. With multiple storage options and notifications</p>
<!-- Badges -->
[![MIT License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)
[![Docker Pulls](https://img.shields.io/docker/pulls/rostislavdugin/postgresus?color=brightgreen)](https://hub.docker.com/r/rostislavdugin/postgresus)
[![Platform](https://img.shields.io/badge/platform-linux%20%7C%20macos%20%7C%20windows-lightgrey)](https://github.com/RostislavDugin/postgresus)
[![PostgreSQL](https://img.shields.io/badge/PostgreSQL-13%20%7C%2014%20%7C%2015%20%7C%2016%20%7C%2017-336791?logo=postgresql&logoColor=white)](https://www.postgresql.org/)
[![Self Hosted](https://img.shields.io/badge/self--hosted-yes-brightgreen)](https://github.com/RostislavDugin/postgresus)
[![Open Source](https://img.shields.io/badge/open%20source-❤️-red)](https://github.com/RostislavDugin/postgresus)
<p>
<a href="#-features">Features</a> •
@@ -11,8 +20,14 @@
<a href="#-license">License</a> •
<a href="#-contributing">Contributing</a>
</p>
<p style="margin-top: 20px; margin-bottom: 20px; font-size: 1.2em;">
<a href="https://postgresus.com" target="_blank"><strong>🌐 Postgresus website</strong></a>
</p>
<img src="assets/dashboard.svg" alt="Postgresus Dashboard" width="800"/>
</div>
---
@@ -28,12 +43,12 @@
### 🗄️ **Multiple Storage Destinations**
- **Local storage**: Keep backups on your VPS/server
- **Cloud storage**: S3, Cloudflare R2, Google Drive, Dropbox, and more (coming soon)
- **Cloud storage**: S3, Cloudflare R2, Google Drive, NAS, Dropbox and more
- **Secure**: All data stays under your control
### 📱 **Smart Notifications**
- **Multiple channels**: Email, Telegram, Slack, webhooks (coming soon)
- **Multiple channels**: Email, Telegram, Slack, Discord, webhooks
- **Real-time updates**: Success and failure notifications
- **Team integration**: Perfect for DevOps workflows
@@ -55,21 +70,29 @@
- **Historical data**: View trends and patterns over time
- **Alert system**: Get notified when issues are detected
### 📦 Installation
You have three ways to install Postgresus:
- Script (recommended)
- Simple Docker run
- Docker Compose setup
<img src="assets/healthchecks.svg" alt="Postgresus Dashboard" width="800"/>
---
## 📦 Installation
You have two ways to install Postgresus: via automated script (recommended) or manual Docker Compose setup.
You have three ways to install Postgresus: automated script (recommended), simple Docker run, or Docker Compose setup.
### Option 1: Automated Installation Script (Recommended, Linux only)
The installation script will:
- ✅ Install Docker with Docker Compose (if not already installed)
-Create optimized `docker-compose.yml` configuration
-Set up automatic startup on system reboot via cron
- ✅ Install Docker with Docker Compose(if not already installed)
-Set up Postgresus
-Configure automatic startup on system reboot
```bash
sudo apt-get install -y curl && \
@@ -77,7 +100,26 @@ sudo curl -sSL https://raw.githubusercontent.com/RostislavDugin/postgresus/refs/
| sudo bash
```
### Option 2: Manual Docker Compose Setup
### Option 2: Simple Docker Run
The easiest way to run Postgresus with embedded PostgreSQL:
```bash
docker run -d \
--name postgresus \
-p 4005:4005 \
-v ./postgresus-data:/postgresus-data \
--restart unless-stopped \
rostislavdugin/postgresus:latest
```
This single command will:
- ✅ Start Postgresus
- ✅ Store all data in `./postgresus-data` directory
- ✅ Automatically restart on system reboot
### Option 3: Docker Compose Setup
Create a `docker-compose.yml` file with the following configuration:
@@ -92,29 +134,6 @@ services:
- "4005:4005"
volumes:
- ./postgresus-data:/postgresus-data
depends_on:
postgresus-db:
condition: service_healthy
restart: unless-stopped
postgresus-db:
container_name: postgresus-db
image: postgres:17
# we use default values, but do not expose
# PostgreSQL ports so it is safe
environment:
- POSTGRES_DB=postgresus
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=Q1234567
volumes:
- ./pgdata:/var/lib/postgresql/data
command: -p 5437
shm_size: 10gb
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres -d postgresus -p 5437"]
interval: 5s
timeout: 5s
retries: 5
restart: unless-stopped
```

View File

@@ -24,4 +24,6 @@ TEST_POSTGRES_16_PORT=5004
TEST_POSTGRES_17_PORT=5005
# testing S3
TEST_MINIO_PORT=9000
TEST_MINIO_CONSOLE_PORT=9001
TEST_MINIO_CONSOLE_PORT=9001
# testing NAS
TEST_NAS_PORT=5006

View File

@@ -5,9 +5,9 @@ DEV_DB_PASSWORD=Q1234567
#app
ENV_MODE=production
# db
DATABASE_DSN=host=postgresus-db user=postgres password=Q1234567 dbname=postgresus port=5437 sslmode=disable
DATABASE_URL=postgres://postgres:Q1234567@postgresus-db:5437/postgresus?sslmode=disable
DATABASE_DSN=host=localhost user=postgres password=Q1234567 dbname=postgresus port=5437 sslmode=disable
DATABASE_URL=postgres://postgres:Q1234567@localhost:5437/postgresus?sslmode=disable
# migrations
GOOSE_DRIVER=postgres
GOOSE_DBSTRING=postgres://postgres:Q1234567@postgresus-db:5437/postgresus?sslmode=disable
GOOSE_DBSTRING=postgres://postgres:Q1234567@localhost:5437/postgresus?sslmode=disable
GOOSE_MIGRATION_DIR=./migrations

3
backend/.gitignore vendored
View File

@@ -11,4 +11,5 @@ swagger/swagger.json
swagger/swagger.yaml
postgresus-backend.exe
ui/build/*
pgdata-for-restore/
pgdata-for-restore/
temp/

View File

@@ -1,15 +0,0 @@
repos:
- repo: local
hooks:
- id: golangci-lint-fmt
name: Format Go Code using golangci-lint fmt
entry: golangci-lint fmt ./...
language: system
types: [go]
- id: golangci-lint-run
name: Run golangci-lint for static analysis
entry: golangci-lint run
language: system
types: [go]
pass_filenames: false

20
backend/Makefile Normal file
View File

@@ -0,0 +1,20 @@
run:
go run cmd/main.go
test:
go test -count=1 ./internal/...
lint:
golangci-lint fmt && golangci-lint run
migration-create:
goose create $(name) sql
migration-up:
goose up
migration-down:
goose down
swagger:
swag init -g ./cmd/main.go -o swagger

View File

@@ -9,44 +9,39 @@ instead of postgresus-db from docker-compose.yml in the root folder.
# Run
To build:
> go build /cmd/main.go
To run:
> go run /cmd/main.go
> make run
To run tests:
> go test ./internal/...
> make test
Before commit (make sure `golangci-lint` is installed):
> golangci-lint fmt
> golangci-lint run
> make lint
# Migrations
To create migration:
> goose create MIGRATION_NAME sql
> make migration-create name=MIGRATION_NAME
To run migrations:
> goose up
> make migration-up
If latest migration failed:
To rollback on migration:
> goose down
> make migration-down
# Swagger
To generate swagger docs:
> swag init -g .\cmd\main.go -o swagger
> make swagger
Swagger URL is:

View File

@@ -20,6 +20,9 @@ import (
"postgresus-backend/internal/features/disk"
healthcheck_attempt "postgresus-backend/internal/features/healthcheck/attempt"
healthcheck_config "postgresus-backend/internal/features/healthcheck/config"
postgres_monitoring_collectors "postgresus-backend/internal/features/monitoring/postgres/collectors"
postgres_monitoring_metrics "postgresus-backend/internal/features/monitoring/postgres/metrics"
postgres_monitoring_settings "postgresus-backend/internal/features/monitoring/postgres/settings"
"postgresus-backend/internal/features/notifiers"
"postgresus-backend/internal/features/restores"
"postgresus-backend/internal/features/storages"
@@ -31,6 +34,7 @@ import (
_ "postgresus-backend/swagger" // swagger docs
"github.com/gin-contrib/cors"
"github.com/gin-contrib/gzip"
"github.com/gin-gonic/gin"
swaggerFiles "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
@@ -49,6 +53,17 @@ func main() {
runMigrations(log)
// create directories that used for backups and restore
err := files_utils.EnsureDirectories([]string{
config.GetEnv().TempFolder,
config.GetEnv().DataFolder,
})
if err != nil {
log.Error("Failed to ensure directories", "error", err)
os.Exit(1)
}
// Handle password reset if flag is provided
newPassword := flag.String("new-password", "", "Set a new password for the user")
flag.Parse()
@@ -61,6 +76,15 @@ func main() {
gin.SetMode(gin.ReleaseMode)
ginApp := gin.Default()
// Add GZIP compression middleware
ginApp.Use(gzip.Gzip(
gzip.DefaultCompression,
// Don't compress already compressed files
gzip.WithExcludedExtensions(
[]string{".png", ".gif", ".jpeg", ".jpg", ".ico", ".svg", ".pdf", ".mp4"},
),
))
enableCors(ginApp)
setUpRoutes(ginApp)
setUpDependencies()
@@ -137,6 +161,8 @@ func setUpRoutes(r *gin.Engine) {
healthcheckAttemptController := healthcheck_attempt.GetHealthcheckAttemptController()
diskController := disk.GetDiskController()
backupConfigController := backups_config.GetBackupConfigController()
postgresMonitoringSettingsController := postgres_monitoring_settings.GetPostgresMonitoringSettingsController()
postgresMonitoringMetricsController := postgres_monitoring_metrics.GetPostgresMonitoringMetricsController()
downdetectContoller.RegisterRoutes(v1)
userController.RegisterRoutes(v1)
@@ -150,13 +176,15 @@ func setUpRoutes(r *gin.Engine) {
healthcheckConfigController.RegisterRoutes(v1)
healthcheckAttemptController.RegisterRoutes(v1)
backupConfigController.RegisterRoutes(v1)
postgresMonitoringSettingsController.RegisterRoutes(v1)
postgresMonitoringMetricsController.RegisterRoutes(v1)
}
func setUpDependencies() {
backups.SetupDependencies()
backups.SetupDependencies()
restores.SetupDependencies()
healthcheck_config.SetupDependencies()
postgres_monitoring_settings.SetupDependencies()
}
func runBackgroundTasks(log *slog.Logger) {
@@ -176,7 +204,15 @@ func runBackgroundTasks(log *slog.Logger) {
})
go runWithPanicLogging(log, "healthcheck attempt background service", func() {
healthcheck_attempt.GetHealthcheckAttemptBackgroundService().RunBackgroundTasks()
healthcheck_attempt.GetHealthcheckAttemptBackgroundService().Run()
})
go runWithPanicLogging(log, "postgres monitoring metrics background service", func() {
postgres_monitoring_metrics.GetPostgresMonitoringMetricsBackgroundService().Run()
})
go runWithPanicLogging(log, "postgres monitoring collectors background service", func() {
postgres_monitoring_collectors.GetDbMonitoringBackgroundService().Run()
})
}

View File

@@ -86,3 +86,19 @@ services:
- POSTGRES_PASSWORD=testpassword
container_name: test-postgres-17
shm_size: 1gb
# Test NAS server (Samba)
test-nas:
image: dperson/samba:latest
ports:
- "${TEST_NAS_PORT:-445}:445"
environment:
- USERID=1000
- GROUPID=1000
volumes:
- ./temp/nas:/shared
command: >
-u "testuser;testpassword"
-s "backups;/shared;yes;no;no;testuser"
-p
container_name: test-nas

View File

@@ -4,6 +4,7 @@ go 1.23.3
require (
github.com/gin-contrib/cors v1.7.5
github.com/gin-contrib/gzip v1.2.3
github.com/gin-gonic/gin v1.10.0
github.com/golang-jwt/jwt/v4 v4.5.2
github.com/google/uuid v1.6.0
@@ -19,6 +20,7 @@ require (
github.com/swaggo/gin-swagger v1.6.0
github.com/swaggo/swag v1.16.4
golang.org/x/crypto v0.39.0
golang.org/x/time v0.12.0
gorm.io/driver/postgres v1.5.11
gorm.io/gorm v1.26.1
)
@@ -27,9 +29,11 @@ require (
cloud.google.com/go/auth v0.16.2 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.7.0 // indirect
github.com/geoffgarside/ber v1.1.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
github.com/hirochachacha/go-smb2 v1.1.0
google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/grpc v1.73.0 // indirect

View File

@@ -35,10 +35,12 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
github.com/geoffgarside/ber v1.1.0 h1:qTmFG4jJbwiSzSXoNJeHcOprVzZ8Ulde2Rrrifu5U9w=
github.com/geoffgarside/ber v1.1.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc=
github.com/gin-contrib/cors v1.7.5 h1:cXC9SmofOrRg0w9PigwGlHG3ztswH6bqq4vJVXnvYMk=
github.com/gin-contrib/cors v1.7.5/go.mod h1:4q3yi7xBEDDWKapjT2o1V7mScKDDr8k+jZ0fSquGoy0=
github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4=
github.com/gin-contrib/gzip v0.0.6/go.mod h1:QOJlmV2xmayAjkNS2Y8NQsMneuRShOU/kjovCXNuzzk=
github.com/gin-contrib/gzip v1.2.3 h1:dAhT722RuEG330ce2agAs75z7yB+NKvX/ZM1r8w0u2U=
github.com/gin-contrib/gzip v1.2.3/go.mod h1:ad72i4Bzmaypk8M762gNXa2wkxxjbz0icRNnuLJ9a/c=
github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w=
github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM=
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
@@ -91,6 +93,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU
github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=
github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
github.com/ilyakaznacheev/cleanenv v1.5.0 h1:0VNZXggJE2OYdXE87bfSSwGxeiGt9moSR2lOrsHHvr4=
github.com/ilyakaznacheev/cleanenv v1.5.0/go.mod h1:a5aDzaJrLCQZsazHol1w8InnDcOX0OColm64SlIi6gk=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
@@ -210,12 +214,14 @@ go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2
golang.org/x/arch v0.17.0 h1:4O3dfLzd+lQewptAHqjewQZQDyEdejz3VwgeYwkZneU=
golang.org/x/arch v0.17.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
@@ -230,6 +236,7 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -252,6 +259,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=

View File

@@ -41,6 +41,8 @@ type EnvVariables struct {
TestMinioPort string `env:"TEST_MINIO_PORT"`
TestMinioConsolePort string `env:"TEST_MINIO_CONSOLE_PORT"`
TestNASPort string `env:"TEST_NAS_PORT"`
}
var (
@@ -161,6 +163,11 @@ func loadEnvVariables() {
log.Error("TEST_MINIO_CONSOLE_PORT is empty")
os.Exit(1)
}
if env.TestNASPort == "" {
log.Error("TEST_NAS_PORT is empty")
os.Exit(1)
}
}
log.Info("Environment variables loaded successfully!")

View File

@@ -242,7 +242,7 @@ func (s *BackupService) MakeBackup(databaseID uuid.UUID, isLastTry bool) {
)
}
if !isLastTry {
if backup.Status != BackupStatusCompleted && !isLastTry {
return
}

View File

@@ -60,8 +60,7 @@ func (uc *CreatePostgresqlBackupUsecase) Execute(
}
args := []string{
"-Fc", // custom format with built-in compression
"-Z", "6", // balanced compression level (0-9, 6 is balanced)
"-Fc", // custom format with built-in compression
"--no-password", // Use environment variable for password, prevent prompts
"-h", pg.Host,
"-p", strconv.Itoa(pg.Port),
@@ -70,6 +69,17 @@ func (uc *CreatePostgresqlBackupUsecase) Execute(
"--verbose", // Add verbose output to help with debugging
}
// Use zstd compression level 5 for PostgreSQL 15+ (better compression and speed)
// Fall back to gzip compression level 5 for older versions
if pg.Version == tools.PostgresqlVersion13 || pg.Version == tools.PostgresqlVersion14 ||
pg.Version == tools.PostgresqlVersion15 {
args = append(args, "-Z", "5")
uc.logger.Info("Using gzip compression level 5 (zstd not available)", "version", pg.Version)
} else {
args = append(args, "--compress=zstd:5")
uc.logger.Info("Using zstd compression level 5", "version", pg.Version)
}
return uc.streamToStorage(
backupID,
backupConfig,
@@ -100,7 +110,9 @@ func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
) error {
uc.logger.Info("Streaming PostgreSQL backup to storage", "pgBin", pgBin, "args", args)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute)
// if backup not fit into 23 hours, Postgresus
// seems not to work for such database size
ctx, cancel := context.WithTimeout(context.Background(), 23*time.Hour)
defer cancel()
// Monitor for shutdown and cancel context if needed
@@ -255,10 +267,10 @@ func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
copyResultCh <- err
}()
// Wait for the dump and copy to finish
waitErr := cmd.Wait()
// Wait for the copy to finish first, then the dump process
copyErr := <-copyResultCh
bytesWritten := <-bytesWrittenCh
waitErr := cmd.Wait()
// Check for shutdown before finalizing
if config.IsShouldShutdown() {

View File

@@ -56,7 +56,8 @@ func (s *BackupConfigService) SaveBackupConfig(
if existingConfig != nil {
// If storage is changing, notify the listener
if s.dbStorageChangeListener != nil &&
!storageIDsEqual(existingConfig.StorageID, backupConfig.StorageID) {
backupConfig.Storage != nil &&
!storageIDsEqual(existingConfig.StorageID, &backupConfig.Storage.ID) {
if err := s.dbStorageChangeListener.OnBeforeBackupsStorageChange(
backupConfig.DatabaseID,
); err != nil {

View File

@@ -7,6 +7,7 @@ import (
"log/slog"
"postgresus-backend/internal/util/tools"
"regexp"
"slices"
"time"
"github.com/google/uuid"
@@ -175,3 +176,101 @@ func buildConnectionStringForDB(p *PostgresqlDatabase, dbName string) string {
sslMode,
)
}
func (p *PostgresqlDatabase) InstallExtensions(extensions []tools.PostgresqlExtension) error {
if len(extensions) == 0 {
return nil
}
if p.Database == nil || *p.Database == "" {
return errors.New("database name is required for installing extensions")
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Build connection string for the specific database
connStr := buildConnectionStringForDB(p, *p.Database)
// Connect to database
conn, err := pgx.Connect(ctx, connStr)
if err != nil {
return fmt.Errorf("failed to connect to database '%s': %w", *p.Database, err)
}
defer func() {
if closeErr := conn.Close(ctx); closeErr != nil {
fmt.Println("failed to close connection: %w", closeErr)
}
}()
// Check which extensions are already installed
installedExtensions, err := p.getInstalledExtensions(ctx, conn)
if err != nil {
return fmt.Errorf("failed to check installed extensions: %w", err)
}
// Install missing extensions
for _, extension := range extensions {
if contains(installedExtensions, string(extension)) {
continue // Extension already installed
}
if err := p.installExtension(ctx, conn, string(extension)); err != nil {
return fmt.Errorf("failed to install extension '%s': %w", extension, err)
}
}
return nil
}
// getInstalledExtensions queries the database for currently installed extensions
func (p *PostgresqlDatabase) getInstalledExtensions(
ctx context.Context,
conn *pgx.Conn,
) ([]string, error) {
query := "SELECT extname FROM pg_extension"
rows, err := conn.Query(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to query installed extensions: %w", err)
}
defer rows.Close()
var extensions []string
for rows.Next() {
var extname string
if err := rows.Scan(&extname); err != nil {
return nil, fmt.Errorf("failed to scan extension name: %w", err)
}
extensions = append(extensions, extname)
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("error iterating over extension rows: %w", err)
}
return extensions, nil
}
// installExtension installs a single PostgreSQL extension
func (p *PostgresqlDatabase) installExtension(
ctx context.Context,
conn *pgx.Conn,
extensionName string,
) error {
query := fmt.Sprintf("CREATE EXTENSION IF NOT EXISTS %s", extensionName)
_, err := conn.Exec(ctx, query)
if err != nil {
return fmt.Errorf("failed to execute CREATE EXTENSION: %w", err)
}
return nil
}
// contains checks if a string slice contains a specific string
func contains(slice []string, item string) bool {
return slices.Contains(slice, item)
}

View File

@@ -1,6 +1,7 @@
package databases
import (
"errors"
"postgresus-backend/internal/features/databases/databases/postgresql"
"postgresus-backend/internal/storage"
@@ -21,9 +22,12 @@ func (r *DatabaseRepository) Save(database *Database) (*Database, error) {
err := db.Transaction(func(tx *gorm.DB) error {
switch database.Type {
case DatabaseTypePostgres:
if database.Postgresql != nil {
database.Postgresql.DatabaseID = &database.ID
if database.Postgresql == nil {
return errors.New("postgresql configuration is required for PostgreSQL database")
}
// Ensure DatabaseID is always set and never nil
database.Postgresql.DatabaseID = &database.ID
}
if isNew {
@@ -43,17 +47,15 @@ func (r *DatabaseRepository) Save(database *Database) (*Database, error) {
// Save the specific database type
switch database.Type {
case DatabaseTypePostgres:
if database.Postgresql != nil {
database.Postgresql.DatabaseID = &database.ID
if database.Postgresql.ID == uuid.Nil {
database.Postgresql.ID = uuid.New()
if err := tx.Create(database.Postgresql).Error; err != nil {
return err
}
} else {
if err := tx.Save(database.Postgresql).Error; err != nil {
return err
}
database.Postgresql.DatabaseID = &database.ID
if database.Postgresql.ID == uuid.Nil {
database.Postgresql.ID = uuid.New()
if err := tx.Create(database.Postgresql).Error; err != nil {
return err
}
} else {
if err := tx.Save(database.Postgresql).Error; err != nil {
return err
}
}
}

View File

@@ -13,7 +13,7 @@ type HealthcheckAttemptBackgroundService struct {
logger *slog.Logger
}
func (s *HealthcheckAttemptBackgroundService) RunBackgroundTasks() {
func (s *HealthcheckAttemptBackgroundService) Run() {
// first healthcheck immediately
s.checkDatabases()

View File

@@ -224,7 +224,7 @@ func (uc *CheckPgHealthUseCase) sendDbStatusNotification(
messageBody := ""
if newHealthStatus == databases.HealthStatusAvailable {
messageTitle = fmt.Sprintf("✅ [%s] DB is back online", database.Name)
messageTitle = fmt.Sprintf("✅ [%s] DB is online", database.Name)
messageBody = fmt.Sprintf("✅ [%s] DB is back online", database.Name)
} else {
messageTitle = fmt.Sprintf("❌ [%s] DB is unavailable", database.Name)

View File

@@ -303,7 +303,7 @@ func Test_CheckPgHealthUseCase(t *testing.T) {
t,
"SendNotification",
mock.Anything,
fmt.Sprintf("✅ [%s] DB is back online", database.Name),
fmt.Sprintf("✅ [%s] DB is online", database.Name),
fmt.Sprintf("✅ [%s] DB is back online", database.Name),
)
})

View File

@@ -0,0 +1,292 @@
package postgres_monitoring_collectors
import (
"context"
"fmt"
"log/slog"
"postgresus-backend/internal/config"
"postgresus-backend/internal/features/databases"
"postgresus-backend/internal/features/databases/databases/postgresql"
postgres_monitoring_metrics "postgresus-backend/internal/features/monitoring/postgres/metrics"
postgres_monitoring_settings "postgresus-backend/internal/features/monitoring/postgres/settings"
"sync"
"sync/atomic"
"time"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
)
type DbMonitoringBackgroundService struct {
databaseService *databases.DatabaseService
monitoringSettingsService *postgres_monitoring_settings.PostgresMonitoringSettingsService
metricsService *postgres_monitoring_metrics.PostgresMonitoringMetricService
logger *slog.Logger
isRunning int32
lastRunTimes map[uuid.UUID]time.Time
lastRunTimesMutex sync.RWMutex
}
func (s *DbMonitoringBackgroundService) Run() {
for {
if config.IsShouldShutdown() {
s.logger.Info("stopping background monitoring tasks")
return
}
s.processMonitoringTasks()
time.Sleep(1 * time.Second)
}
}
func (s *DbMonitoringBackgroundService) processMonitoringTasks() {
if !atomic.CompareAndSwapInt32(&s.isRunning, 0, 1) {
s.logger.Warn("skipping background task execution, previous task still running")
return
}
defer atomic.StoreInt32(&s.isRunning, 0)
dbsWithEnabledDbMonitoring, err := s.monitoringSettingsService.GetAllDbsWithEnabledDbMonitoring()
if err != nil {
s.logger.Error("failed to get all databases with enabled db monitoring", "error", err)
return
}
for _, dbSettings := range dbsWithEnabledDbMonitoring {
s.processDatabase(&dbSettings)
}
}
func (s *DbMonitoringBackgroundService) processDatabase(
settings *postgres_monitoring_settings.PostgresMonitoringSettings,
) {
db, err := s.databaseService.GetDatabaseByID(settings.DatabaseID)
if err != nil {
s.logger.Error("failed to get database by id", "error", err)
return
}
if db.Type != databases.DatabaseTypePostgres {
return
}
if !s.isReadyForNextRun(settings) {
return
}
err = s.collectAndSaveMetrics(db, settings)
if err != nil {
s.logger.Error("failed to collect and save metrics", "error", err)
return
}
s.updateLastRunTime(db)
}
func (s *DbMonitoringBackgroundService) collectAndSaveMetrics(
db *databases.Database,
settings *postgres_monitoring_settings.PostgresMonitoringSettings,
) error {
if db.Postgresql == nil {
return nil
}
s.logger.Debug("collecting metrics for database", "database_id", db.ID)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
conn, err := s.connectToDatabase(ctx, db)
if err != nil {
return fmt.Errorf("failed to connect to database: %w", err)
}
if conn == nil {
return nil
}
defer func() {
if closeErr := conn.Close(ctx); closeErr != nil {
s.logger.Error("Failed to close connection", "error", closeErr)
}
}()
var metrics []postgres_monitoring_metrics.PostgresMonitoringMetric
now := time.Now().UTC()
if settings.IsDbResourcesMonitoringEnabled {
dbMetrics, err := s.collectDatabaseResourceMetrics(ctx, conn, db.ID, now)
if err != nil {
s.logger.Error("failed to collect database resource metrics", "error", err)
} else {
metrics = append(metrics, dbMetrics...)
}
}
if len(metrics) > 0 {
if err := s.metricsService.Insert(metrics); err != nil {
return fmt.Errorf("failed to insert metrics: %w", err)
}
s.logger.Debug(
"successfully collected and saved metrics",
"count",
len(metrics),
"database_id",
db.ID,
)
}
return nil
}
func (s *DbMonitoringBackgroundService) isReadyForNextRun(
settings *postgres_monitoring_settings.PostgresMonitoringSettings,
) bool {
s.lastRunTimesMutex.RLock()
defer s.lastRunTimesMutex.RUnlock()
if s.lastRunTimes == nil {
return true
}
lastRun, exists := s.lastRunTimes[settings.DatabaseID]
if !exists {
return true
}
return time.Since(lastRun) >= time.Duration(settings.MonitoringIntervalSeconds)*time.Second
}
func (s *DbMonitoringBackgroundService) updateLastRunTime(db *databases.Database) {
s.lastRunTimesMutex.Lock()
defer s.lastRunTimesMutex.Unlock()
if s.lastRunTimes == nil {
s.lastRunTimes = make(map[uuid.UUID]time.Time)
}
s.lastRunTimes[db.ID] = time.Now().UTC()
}
func (s *DbMonitoringBackgroundService) connectToDatabase(
ctx context.Context,
db *databases.Database,
) (*pgx.Conn, error) {
if db.Postgresql == nil {
return nil, nil
}
if db.Postgresql.Database == nil || *db.Postgresql.Database == "" {
return nil, nil
}
connStr := s.buildConnectionString(db.Postgresql)
return pgx.Connect(ctx, connStr)
}
func (s *DbMonitoringBackgroundService) buildConnectionString(
pg *postgresql.PostgresqlDatabase,
) string {
sslMode := "disable"
if pg.IsHttps {
sslMode = "require"
}
return fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s",
pg.Host,
pg.Port,
pg.Username,
pg.Password,
*pg.Database,
sslMode,
)
}
func (s *DbMonitoringBackgroundService) collectDatabaseResourceMetrics(
ctx context.Context,
conn *pgx.Conn,
databaseID uuid.UUID,
timestamp time.Time,
) ([]postgres_monitoring_metrics.PostgresMonitoringMetric, error) {
var metrics []postgres_monitoring_metrics.PostgresMonitoringMetric
// Collect I/O statistics
ioMetrics, err := s.collectIOMetrics(ctx, conn, databaseID, timestamp)
if err != nil {
s.logger.Warn("failed to collect I/O metrics", "error", err)
} else {
metrics = append(metrics, ioMetrics...)
}
// Collect memory usage (approximation based on buffer usage)
ramMetric, err := s.collectRAMUsageMetric(ctx, conn, databaseID, timestamp)
if err != nil {
s.logger.Warn("failed to collect RAM usage metric", "error", err)
} else {
metrics = append(metrics, ramMetric)
}
return metrics, nil
}
func (s *DbMonitoringBackgroundService) collectIOMetrics(
ctx context.Context,
conn *pgx.Conn,
databaseID uuid.UUID,
timestamp time.Time,
) ([]postgres_monitoring_metrics.PostgresMonitoringMetric, error) {
var blocksRead, blocksHit int64
query := `
SELECT
COALESCE(SUM(blks_read), 0) as total_reads,
COALESCE(SUM(blks_hit), 0) as total_hits
FROM pg_stat_database
WHERE datname = current_database()
`
err := conn.QueryRow(ctx, query).Scan(&blocksRead, &blocksHit)
if err != nil {
return nil, err
}
// Calculate I/O activity as total blocks accessed (PostgreSQL block size is typically 8KB)
const pgBlockSize = 8192 // 8KB
totalIOBytes := float64((blocksRead + blocksHit) * pgBlockSize)
return []postgres_monitoring_metrics.PostgresMonitoringMetric{
{
DatabaseID: databaseID,
Metric: postgres_monitoring_metrics.MetricsTypeDbIO,
ValueType: postgres_monitoring_metrics.MetricsValueTypeByte,
Value: totalIOBytes,
CreatedAt: timestamp,
},
}, nil
}
func (s *DbMonitoringBackgroundService) collectRAMUsageMetric(
ctx context.Context,
conn *pgx.Conn,
databaseID uuid.UUID,
timestamp time.Time,
) (postgres_monitoring_metrics.PostgresMonitoringMetric, error) {
var sharedBuffers int64
query := `
SELECT
COALESCE(SUM(blks_hit), 0) * 8192 as buffer_usage
FROM pg_stat_database
WHERE datname = current_database()
`
err := conn.QueryRow(ctx, query).Scan(&sharedBuffers)
if err != nil {
return postgres_monitoring_metrics.PostgresMonitoringMetric{}, err
}
return postgres_monitoring_metrics.PostgresMonitoringMetric{
DatabaseID: databaseID,
Metric: postgres_monitoring_metrics.MetricsTypeDbRAM,
ValueType: postgres_monitoring_metrics.MetricsValueTypeByte,
Value: float64(sharedBuffers),
CreatedAt: timestamp,
}, nil
}

View File

@@ -0,0 +1,23 @@
package postgres_monitoring_collectors
import (
"postgresus-backend/internal/features/databases"
postgres_monitoring_metrics "postgresus-backend/internal/features/monitoring/postgres/metrics"
postgres_monitoring_settings "postgresus-backend/internal/features/monitoring/postgres/settings"
"postgresus-backend/internal/util/logger"
"sync"
)
var dbMonitoringBackgroundService = &DbMonitoringBackgroundService{
databases.GetDatabaseService(),
postgres_monitoring_settings.GetPostgresMonitoringSettingsService(),
postgres_monitoring_metrics.GetPostgresMonitoringMetricsService(),
logger.GetLogger(),
0,
nil,
sync.RWMutex{},
}
func GetDbMonitoringBackgroundService() *DbMonitoringBackgroundService {
return dbMonitoringBackgroundService
}

View File

@@ -0,0 +1,33 @@
package postgres_monitoring_metrics
import (
"postgresus-backend/internal/config"
"postgresus-backend/internal/util/logger"
"time"
)
var log = logger.GetLogger()
type PostgresMonitoringMetricsBackgroundService struct {
metricsRepository *PostgresMonitoringMetricRepository
}
func (s *PostgresMonitoringMetricsBackgroundService) Run() {
for {
if config.IsShouldShutdown() {
return
}
s.RemoveOldMetrics()
time.Sleep(5 * time.Minute)
}
}
func (s *PostgresMonitoringMetricsBackgroundService) RemoveOldMetrics() {
monthAgo := time.Now().UTC().Add(-3 * 30 * 24 * time.Hour)
if err := s.metricsRepository.RemoveOlderThan(monthAgo); err != nil {
log.Error("Failed to remove old metrics", "error", err)
}
}

View File

@@ -0,0 +1,62 @@
package postgres_monitoring_metrics
import (
"net/http"
"postgresus-backend/internal/features/users"
"github.com/gin-gonic/gin"
)
type PostgresMonitoringMetricsController struct {
metricsService *PostgresMonitoringMetricService
userService *users.UserService
}
func (c *PostgresMonitoringMetricsController) RegisterRoutes(router *gin.RouterGroup) {
router.POST("/postgres-monitoring-metrics/get", c.GetMetrics)
}
// GetMetrics
// @Summary Get postgres monitoring metrics
// @Description Get postgres monitoring metrics for a database within a time range
// @Tags postgres-monitoring-metrics
// @Accept json
// @Produce json
// @Param request body GetMetricsRequest true "Metrics request data"
// @Success 200 {object} []PostgresMonitoringMetric
// @Failure 400
// @Failure 401
// @Router /postgres-monitoring-metrics/get [post]
func (c *PostgresMonitoringMetricsController) GetMetrics(ctx *gin.Context) {
var requestDTO GetMetricsRequest
if err := ctx.ShouldBindJSON(&requestDTO); err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
authorizationHeader := ctx.GetHeader("Authorization")
if authorizationHeader == "" {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "authorization header is required"})
return
}
user, err := c.userService.GetUserFromToken(authorizationHeader)
if err != nil {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "invalid token"})
return
}
metrics, err := c.metricsService.GetMetrics(
user,
requestDTO.DatabaseID,
requestDTO.MetricType,
requestDTO.From,
requestDTO.To,
)
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
ctx.JSON(http.StatusOK, metrics)
}

View File

@@ -0,0 +1,35 @@
package postgres_monitoring_metrics
import (
"postgresus-backend/internal/features/databases"
"postgresus-backend/internal/features/users"
)
var metricsRepository = &PostgresMonitoringMetricRepository{}
var metricsService = &PostgresMonitoringMetricService{
metricsRepository,
databases.GetDatabaseService(),
}
var metricsController = &PostgresMonitoringMetricsController{
metricsService,
users.GetUserService(),
}
var metricsBackgroundService = &PostgresMonitoringMetricsBackgroundService{
metricsRepository,
}
func GetPostgresMonitoringMetricsController() *PostgresMonitoringMetricsController {
return metricsController
}
func GetPostgresMonitoringMetricsService() *PostgresMonitoringMetricService {
return metricsService
}
func GetPostgresMonitoringMetricsRepository() *PostgresMonitoringMetricRepository {
return metricsRepository
}
func GetPostgresMonitoringMetricsBackgroundService() *PostgresMonitoringMetricsBackgroundService {
return metricsBackgroundService
}

View File

@@ -0,0 +1,14 @@
package postgres_monitoring_metrics
import (
"time"
"github.com/google/uuid"
)
type GetMetricsRequest struct {
DatabaseID uuid.UUID `json:"databaseId" binding:"required"`
MetricType PostgresMonitoringMetricType `json:"metricType"`
From time.Time `json:"from" binding:"required"`
To time.Time `json:"to" binding:"required"`
}

View File

@@ -0,0 +1,16 @@
package postgres_monitoring_metrics
type PostgresMonitoringMetricType string
const (
// db resources (don't need extensions)
MetricsTypeDbRAM PostgresMonitoringMetricType = "DB_RAM_USAGE"
MetricsTypeDbIO PostgresMonitoringMetricType = "DB_IO_USAGE"
)
type PostgresMonitoringMetricValueType string
const (
MetricsValueTypeByte PostgresMonitoringMetricValueType = "BYTE"
MetricsValueTypePercent PostgresMonitoringMetricValueType = "PERCENT"
)

View File

@@ -0,0 +1,20 @@
package postgres_monitoring_metrics
import (
"time"
"github.com/google/uuid"
)
type PostgresMonitoringMetric struct {
ID uuid.UUID `json:"id" gorm:"column:id;primaryKey;type:uuid;default:gen_random_uuid()"`
DatabaseID uuid.UUID `json:"databaseId" gorm:"column:database_id;not null;type:uuid"`
Metric PostgresMonitoringMetricType `json:"metric" gorm:"column:metric;not null"`
ValueType PostgresMonitoringMetricValueType `json:"valueType" gorm:"column:value_type;not null"`
Value float64 `json:"value" gorm:"column:value;not null"`
CreatedAt time.Time `json:"createdAt" gorm:"column:created_at;not null"`
}
func (p *PostgresMonitoringMetric) TableName() string {
return "postgres_monitoring_metrics"
}

View File

@@ -0,0 +1,45 @@
package postgres_monitoring_metrics
import (
"postgresus-backend/internal/storage"
"time"
"github.com/google/uuid"
)
type PostgresMonitoringMetricRepository struct{}
func (r *PostgresMonitoringMetricRepository) Insert(metrics []PostgresMonitoringMetric) error {
return storage.GetDb().Create(&metrics).Error
}
func (r *PostgresMonitoringMetricRepository) GetByMetrics(
databaseID uuid.UUID,
metricType PostgresMonitoringMetricType,
from time.Time,
to time.Time,
) ([]PostgresMonitoringMetric, error) {
var metrics []PostgresMonitoringMetric
query := storage.GetDb().
Where("database_id = ?", databaseID).
Where("created_at >= ?", from).
Where("created_at <= ?", to).
Where("metric = ?", metricType)
if err := query.
Order("created_at DESC").
Find(&metrics).Error; err != nil {
return nil, err
}
return metrics, nil
}
func (r *PostgresMonitoringMetricRepository) RemoveOlderThan(
olderThan time.Time,
) error {
return storage.GetDb().
Where("created_at < ?", olderThan).
Delete(&PostgresMonitoringMetric{}).Error
}

View File

@@ -0,0 +1,42 @@
package postgres_monitoring_metrics
import (
"errors"
"postgresus-backend/internal/features/databases"
users_models "postgresus-backend/internal/features/users/models"
"time"
"github.com/google/uuid"
)
type PostgresMonitoringMetricService struct {
metricsRepository *PostgresMonitoringMetricRepository
databaseService *databases.DatabaseService
}
func (s *PostgresMonitoringMetricService) Insert(metrics []PostgresMonitoringMetric) error {
if len(metrics) == 0 {
return nil
}
return s.metricsRepository.Insert(metrics)
}
func (s *PostgresMonitoringMetricService) GetMetrics(
user *users_models.User,
databaseID uuid.UUID,
metricType PostgresMonitoringMetricType,
from time.Time,
to time.Time,
) ([]PostgresMonitoringMetric, error) {
database, err := s.databaseService.GetDatabaseByID(databaseID)
if err != nil {
return nil, err
}
if database.UserID != user.ID {
return nil, errors.New("database not found")
}
return s.metricsRepository.GetByMetrics(databaseID, metricType, from, to)
}

View File

@@ -0,0 +1,227 @@
package postgres_monitoring_metrics
import (
"postgresus-backend/internal/features/databases"
"postgresus-backend/internal/features/notifiers"
"postgresus-backend/internal/features/storages"
"postgresus-backend/internal/features/users"
users_models "postgresus-backend/internal/features/users/models"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
// Helper function to get a proper users_models.User for testing
func getTestUserModel() *users_models.User {
signInResponse := users.GetTestUser()
// Get the user service to retrieve the full user model
userService := users.GetUserService()
user, err := userService.GetFirstUser()
if err != nil {
panic(err)
}
// Verify we got the right user
if user.ID != signInResponse.UserID {
panic("user ID mismatch")
}
return user
}
func Test_GetMetrics_MetricsReturned(t *testing.T) {
// Setup test data
testUser := getTestUserModel()
testUserResponse := users.GetTestUser()
storage := storages.CreateTestStorage(testUserResponse.UserID)
notifier := notifiers.CreateTestNotifier(testUserResponse.UserID)
database := databases.CreateTestDatabase(testUserResponse.UserID, storage, notifier)
defer storages.RemoveTestStorage(storage.ID)
defer notifiers.RemoveTestNotifier(notifier)
defer databases.RemoveTestDatabase(database)
// Get service and repository
service := GetPostgresMonitoringMetricsService()
repository := GetPostgresMonitoringMetricsRepository()
// Create test metrics
now := time.Now().UTC()
testMetrics := []PostgresMonitoringMetric{
{
DatabaseID: database.ID,
Metric: MetricsTypeDbRAM,
ValueType: MetricsValueTypeByte,
Value: 1024000,
CreatedAt: now.Add(-2 * time.Hour),
},
{
DatabaseID: database.ID,
Metric: MetricsTypeDbRAM,
ValueType: MetricsValueTypeByte,
Value: 2048000,
CreatedAt: now.Add(-1 * time.Hour),
},
}
// Insert test metrics
err := repository.Insert(testMetrics)
assert.NoError(t, err)
// Test getting DB RAM metrics
from := now.Add(-3 * time.Hour)
to := now
metrics, err := service.GetMetrics(testUser, database.ID, MetricsTypeDbRAM, from, to)
assert.NoError(t, err)
assert.Len(t, metrics, 2)
// Verify metrics are ordered by created_at DESC
assert.True(t, metrics[0].CreatedAt.After(metrics[1].CreatedAt))
assert.Equal(t, float64(2048000), metrics[0].Value)
assert.Equal(t, float64(1024000), metrics[1].Value)
assert.Equal(t, MetricsTypeDbRAM, metrics[0].Metric)
assert.Equal(t, MetricsValueTypeByte, metrics[0].ValueType)
// Test access control - create another user and test they can't access this database
anotherUser := &users_models.User{
ID: uuid.New(),
}
_, err = service.GetMetrics(anotherUser, database.ID, MetricsTypeDbRAM, from, to)
assert.Error(t, err)
assert.Contains(t, err.Error(), "database not found")
// Test with non-existent database
nonExistentDbID := uuid.New()
_, err = service.GetMetrics(testUser, nonExistentDbID, MetricsTypeDbRAM, from, to)
assert.Error(t, err)
}
func Test_GetMetricsWithPagination_PaginationWorks(t *testing.T) {
// Setup test data
testUser := getTestUserModel()
testUserResponse := users.GetTestUser()
storage := storages.CreateTestStorage(testUserResponse.UserID)
notifier := notifiers.CreateTestNotifier(testUserResponse.UserID)
database := databases.CreateTestDatabase(testUserResponse.UserID, storage, notifier)
defer storages.RemoveTestStorage(storage.ID)
defer notifiers.RemoveTestNotifier(notifier)
defer databases.RemoveTestDatabase(database)
// Get repository and service
repository := GetPostgresMonitoringMetricsRepository()
service := GetPostgresMonitoringMetricsService()
// Create many test metrics for pagination testing
now := time.Now().UTC()
testMetrics := []PostgresMonitoringMetric{}
for i := 0; i < 25; i++ {
testMetrics = append(testMetrics, PostgresMonitoringMetric{
DatabaseID: database.ID,
Metric: MetricsTypeDbRAM,
ValueType: MetricsValueTypeByte,
Value: float64(1000000 + i*100000),
CreatedAt: now.Add(-time.Duration(i) * time.Minute),
})
}
// Insert test metrics
err := repository.Insert(testMetrics)
assert.NoError(t, err)
// Test getting all metrics via service (should return all 25)
from := now.Add(-30 * time.Minute)
to := now
allMetrics, err := service.GetMetrics(testUser, database.ID, MetricsTypeDbRAM, from, to)
assert.NoError(t, err)
assert.Len(t, allMetrics, 25)
// Verify they are ordered by created_at DESC (most recent first)
for i := 0; i < len(allMetrics)-1; i++ {
assert.True(t, allMetrics[i].CreatedAt.After(allMetrics[i+1].CreatedAt) ||
allMetrics[i].CreatedAt.Equal(allMetrics[i+1].CreatedAt))
}
// Note: Since the current repository doesn't have pagination methods,
// this test demonstrates the need for pagination but tests current behavior.
// TODO: Add GetByMetricsWithLimit method to repository and update service
t.Logf("All metrics count: %d (pagination methods should be added)", len(allMetrics))
}
func Test_GetMetricsWithFilterByType_FilterWorks(t *testing.T) {
// Setup test data
testUser := getTestUserModel()
testUserResponse := users.GetTestUser()
storage := storages.CreateTestStorage(testUserResponse.UserID)
notifier := notifiers.CreateTestNotifier(testUserResponse.UserID)
database := databases.CreateTestDatabase(testUserResponse.UserID, storage, notifier)
defer storages.RemoveTestStorage(storage.ID)
defer notifiers.RemoveTestNotifier(notifier)
defer databases.RemoveTestDatabase(database)
// Get service and repository
service := GetPostgresMonitoringMetricsService()
repository := GetPostgresMonitoringMetricsRepository()
// Create test metrics of different types
now := time.Now().UTC()
testMetrics := []PostgresMonitoringMetric{
// DB RAM metrics
{
DatabaseID: database.ID,
Metric: MetricsTypeDbRAM,
ValueType: MetricsValueTypeByte,
Value: 1024000,
CreatedAt: now.Add(-2 * time.Hour),
},
{
DatabaseID: database.ID,
Metric: MetricsTypeDbRAM,
ValueType: MetricsValueTypeByte,
Value: 2048000,
CreatedAt: now.Add(-1 * time.Hour),
},
}
// Insert test metrics
err := repository.Insert(testMetrics)
assert.NoError(t, err)
from := now.Add(-3 * time.Hour)
to := now
// Test filtering by DB RAM type
ramMetrics, err := service.GetMetrics(testUser, database.ID, MetricsTypeDbRAM, from, to)
assert.NoError(t, err)
assert.Len(t, ramMetrics, 2)
for _, metric := range ramMetrics {
assert.Equal(t, MetricsTypeDbRAM, metric.Metric)
assert.Equal(t, MetricsValueTypeByte, metric.ValueType)
}
// Test filtering by non-existent metric type (should return empty)
ioMetrics, err := service.GetMetrics(testUser, database.ID, MetricsTypeDbIO, from, to)
assert.NoError(t, err)
assert.Len(t, ioMetrics, 0)
// Test time filtering - get only recent metrics (last hour)
recentFrom := now.Add(-1 * time.Hour)
recentRamMetrics, err := service.GetMetrics(
testUser,
database.ID,
MetricsTypeDbRAM,
recentFrom,
to,
)
assert.NoError(t, err)
assert.Len(t, recentRamMetrics, 1) // Only the metric from 1 hour ago
assert.Equal(t, float64(2048000), recentRamMetrics[0].Value)
}

View File

@@ -0,0 +1,97 @@
package postgres_monitoring_settings
import (
"net/http"
"postgresus-backend/internal/features/users"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
type PostgresMonitoringSettingsController struct {
postgresMonitoringSettingsService *PostgresMonitoringSettingsService
userService *users.UserService
}
func (c *PostgresMonitoringSettingsController) RegisterRoutes(router *gin.RouterGroup) {
router.POST("/postgres-monitoring-settings/save", c.SaveSettings)
router.GET("/postgres-monitoring-settings/database/:id", c.GetSettingsByDbID)
}
// SaveSettings
// @Summary Save postgres monitoring settings
// @Description Save or update postgres monitoring settings for a database
// @Tags postgres-monitoring-settings
// @Accept json
// @Produce json
// @Param request body PostgresMonitoringSettings true "Postgres monitoring settings data"
// @Success 200 {object} PostgresMonitoringSettings
// @Failure 400
// @Failure 401
// @Router /postgres-monitoring-settings/save [post]
func (c *PostgresMonitoringSettingsController) SaveSettings(ctx *gin.Context) {
var requestDTO PostgresMonitoringSettings
if err := ctx.ShouldBindJSON(&requestDTO); err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
authorizationHeader := ctx.GetHeader("Authorization")
if authorizationHeader == "" {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "authorization header is required"})
return
}
user, err := c.userService.GetUserFromToken(authorizationHeader)
if err != nil {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "invalid token"})
return
}
err = c.postgresMonitoringSettingsService.Save(user, &requestDTO)
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
ctx.JSON(http.StatusOK, requestDTO)
}
// GetSettingsByDbID
// @Summary Get postgres monitoring settings by database ID
// @Description Get postgres monitoring settings for a specific database
// @Tags postgres-monitoring-settings
// @Produce json
// @Param id path string true "Database ID"
// @Success 200 {object} PostgresMonitoringSettings
// @Failure 400
// @Failure 401
// @Failure 404
// @Router /postgres-monitoring-settings/database/{id} [get]
func (c *PostgresMonitoringSettingsController) GetSettingsByDbID(ctx *gin.Context) {
dbID := ctx.Param("id")
if dbID == "" {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "database ID is required"})
return
}
authorizationHeader := ctx.GetHeader("Authorization")
if authorizationHeader == "" {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "authorization header is required"})
return
}
user, err := c.userService.GetUserFromToken(authorizationHeader)
if err != nil {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "invalid token"})
return
}
settings, err := c.postgresMonitoringSettingsService.GetByDbID(user, uuid.MustParse(dbID))
if err != nil {
ctx.JSON(http.StatusNotFound, gin.H{"error": "postgres monitoring settings not found"})
return
}
ctx.JSON(http.StatusOK, settings)
}

View File

@@ -0,0 +1,32 @@
package postgres_monitoring_settings
import (
"postgresus-backend/internal/features/databases"
"postgresus-backend/internal/features/users"
)
var postgresMonitoringSettingsRepository = &PostgresMonitoringSettingsRepository{}
var postgresMonitoringSettingsService = &PostgresMonitoringSettingsService{
databases.GetDatabaseService(),
postgresMonitoringSettingsRepository,
}
var postgresMonitoringSettingsController = &PostgresMonitoringSettingsController{
postgresMonitoringSettingsService,
users.GetUserService(),
}
func GetPostgresMonitoringSettingsController() *PostgresMonitoringSettingsController {
return postgresMonitoringSettingsController
}
func GetPostgresMonitoringSettingsService() *PostgresMonitoringSettingsService {
return postgresMonitoringSettingsService
}
func GetPostgresMonitoringSettingsRepository() *PostgresMonitoringSettingsRepository {
return postgresMonitoringSettingsRepository
}
func SetupDependencies() {
databases.GetDatabaseService().AddDbCreationListener(postgresMonitoringSettingsService)
}

View File

@@ -0,0 +1,72 @@
package postgres_monitoring_settings
import (
"postgresus-backend/internal/features/databases"
"postgresus-backend/internal/util/tools"
"strings"
"github.com/google/uuid"
"gorm.io/gorm"
)
type PostgresMonitoringSettings struct {
DatabaseID uuid.UUID `json:"databaseId" gorm:"primaryKey;column:database_id;not null"`
Database *databases.Database `json:"database" gorm:"foreignKey:DatabaseID"`
IsDbResourcesMonitoringEnabled bool `json:"isDbResourcesMonitoringEnabled" gorm:"column:is_db_resources_monitoring_enabled;not null"`
MonitoringIntervalSeconds int64 `json:"monitoringIntervalSeconds" gorm:"column:monitoring_interval_seconds;not null"`
InstalledExtensions []tools.PostgresqlExtension `json:"installedExtensions" gorm:"-"`
InstalledExtensionsRaw string `json:"-" gorm:"column:installed_extensions_raw"`
}
func (p *PostgresMonitoringSettings) TableName() string {
return "postgres_monitoring_settings"
}
func (p *PostgresMonitoringSettings) AfterFind(tx *gorm.DB) error {
if p.InstalledExtensionsRaw != "" {
rawExtensions := strings.Split(p.InstalledExtensionsRaw, ",")
p.InstalledExtensions = make([]tools.PostgresqlExtension, len(rawExtensions))
for i, ext := range rawExtensions {
p.InstalledExtensions[i] = tools.PostgresqlExtension(ext)
}
} else {
p.InstalledExtensions = []tools.PostgresqlExtension{}
}
return nil
}
func (p *PostgresMonitoringSettings) BeforeSave(tx *gorm.DB) error {
extensions := make([]string, len(p.InstalledExtensions))
for i, ext := range p.InstalledExtensions {
extensions[i] = string(ext)
}
p.InstalledExtensionsRaw = strings.Join(extensions, ",")
return nil
}
func (p *PostgresMonitoringSettings) AddInstalledExtensions(
extensions []tools.PostgresqlExtension,
) {
for _, ext := range extensions {
exists := false
for _, existing := range p.InstalledExtensions {
if existing == ext {
exists = true
break
}
}
if !exists {
p.InstalledExtensions = append(p.InstalledExtensions, ext)
}
}
}

View File

@@ -0,0 +1,65 @@
package postgres_monitoring_settings
import (
"errors"
"postgresus-backend/internal/storage"
"github.com/google/uuid"
"gorm.io/gorm"
)
type PostgresMonitoringSettingsRepository struct{}
func (r *PostgresMonitoringSettingsRepository) Save(settings *PostgresMonitoringSettings) error {
return storage.GetDb().Save(settings).Error
}
func (r *PostgresMonitoringSettingsRepository) GetByDbID(
dbID uuid.UUID,
) (*PostgresMonitoringSettings, error) {
var settings PostgresMonitoringSettings
if err := storage.
GetDb().
Where("database_id = ?", dbID).
First(&settings).Error; err != nil {
return nil, err
}
return &settings, nil
}
func (r *PostgresMonitoringSettingsRepository) GetByDbIDWithRelations(
dbID uuid.UUID,
) (*PostgresMonitoringSettings, error) {
var settings PostgresMonitoringSettings
if err := storage.
GetDb().
Preload("Database").
Where("database_id = ?", dbID).
First(&settings).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, err
}
return &settings, nil
}
func (r *PostgresMonitoringSettingsRepository) GetAllDbsWithEnabledDbMonitoring() (
[]PostgresMonitoringSettings, error,
) {
var settings []PostgresMonitoringSettings
if err := storage.
GetDb().
Where("is_db_resources_monitoring_enabled = ?", true).
Find(&settings).Error; err != nil {
return nil, err
}
return settings, nil
}

View File

@@ -0,0 +1,92 @@
package postgres_monitoring_settings
import (
"errors"
"postgresus-backend/internal/features/databases"
users_models "postgresus-backend/internal/features/users/models"
"postgresus-backend/internal/util/logger"
"github.com/google/uuid"
)
var log = logger.GetLogger()
type PostgresMonitoringSettingsService struct {
databaseService *databases.DatabaseService
postgresMonitoringSettingsRepository *PostgresMonitoringSettingsRepository
}
func (s *PostgresMonitoringSettingsService) OnDatabaseCreated(dbID uuid.UUID) {
db, err := s.databaseService.GetDatabaseByID(dbID)
if err != nil {
return
}
if db.Type != databases.DatabaseTypePostgres {
return
}
settings := &PostgresMonitoringSettings{
DatabaseID: dbID,
IsDbResourcesMonitoringEnabled: true,
MonitoringIntervalSeconds: 60,
}
err = s.postgresMonitoringSettingsRepository.Save(settings)
if err != nil {
log.Error("failed to save postgres monitoring settings", "error", err)
}
}
func (s *PostgresMonitoringSettingsService) Save(
user *users_models.User,
settings *PostgresMonitoringSettings,
) error {
db, err := s.databaseService.GetDatabaseByID(settings.DatabaseID)
if err != nil {
return err
}
if db.UserID != user.ID {
return errors.New("user does not have access to this database")
}
return s.postgresMonitoringSettingsRepository.Save(settings)
}
func (s *PostgresMonitoringSettingsService) GetByDbID(
user *users_models.User,
dbID uuid.UUID,
) (*PostgresMonitoringSettings, error) {
dbSettings, err := s.postgresMonitoringSettingsRepository.GetByDbIDWithRelations(dbID)
if err != nil {
return nil, err
}
if dbSettings == nil {
s.OnDatabaseCreated(dbID)
dbSettings, err := s.postgresMonitoringSettingsRepository.GetByDbIDWithRelations(dbID)
if err != nil {
return nil, err
}
if dbSettings == nil {
return nil, errors.New("postgres monitoring settings not found")
}
return s.GetByDbID(user, dbID)
}
if dbSettings.Database.UserID != user.ID {
return nil, errors.New("user does not have access to this database")
}
return dbSettings, nil
}
func (s *PostgresMonitoringSettingsService) GetAllDbsWithEnabledDbMonitoring() (
[]PostgresMonitoringSettings, error,
) {
return s.postgresMonitoringSettingsRepository.GetAllDbsWithEnabledDbMonitoring()
}

View File

@@ -0,0 +1,108 @@
package postgres_monitoring_settings
import (
"postgresus-backend/internal/features/databases"
"postgresus-backend/internal/features/notifiers"
"postgresus-backend/internal/features/storages"
"postgresus-backend/internal/features/users"
users_models "postgresus-backend/internal/features/users/models"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
// Helper function to get a proper users_models.User for testing
func getTestUserModel() *users_models.User {
signInResponse := users.GetTestUser()
// Get the user service to retrieve the full user model
userService := users.GetUserService()
user, err := userService.GetFirstUser()
if err != nil {
panic(err)
}
// Verify we got the right user
if user.ID != signInResponse.UserID {
panic("user ID mismatch")
}
return user
}
func Test_DatabaseCreated_SettingsCreated(t *testing.T) {
// Get or create a test user
testUserResponse := users.GetTestUser()
storage := storages.CreateTestStorage(testUserResponse.UserID)
notifier := notifiers.CreateTestNotifier(testUserResponse.UserID)
database := databases.CreateTestDatabase(testUserResponse.UserID, storage, notifier)
defer storages.RemoveTestStorage(storage.ID)
defer notifiers.RemoveTestNotifier(notifier)
defer databases.RemoveTestDatabase(database)
// Get the monitoring settings service
service := GetPostgresMonitoringSettingsService()
// Execute - trigger the database creation event
service.OnDatabaseCreated(database.ID)
// Verify settings were created by attempting to retrieve them
// Note: Since we can't easily mock the extension installation without major changes,
// we focus on testing the settings creation and default values logic
settingsRepo := GetPostgresMonitoringSettingsRepository()
settings, err := settingsRepo.GetByDbID(database.ID)
assert.NoError(t, err)
assert.NotNil(t, settings)
// Verify default settings values
assert.Equal(t, database.ID, settings.DatabaseID)
assert.Equal(t, int64(60), settings.MonitoringIntervalSeconds)
assert.True(t, settings.IsDbResourcesMonitoringEnabled) // Always enabled
}
func Test_GetSettingsByDbID_SettingsReturned(t *testing.T) {
// Get or create a test user
testUser := getTestUserModel()
testUserResponse := users.GetTestUser()
storage := storages.CreateTestStorage(testUserResponse.UserID)
notifier := notifiers.CreateTestNotifier(testUserResponse.UserID)
database := databases.CreateTestDatabase(testUserResponse.UserID, storage, notifier)
defer storages.RemoveTestStorage(storage.ID)
defer notifiers.RemoveTestNotifier(notifier)
defer databases.RemoveTestDatabase(database)
service := GetPostgresMonitoringSettingsService()
// Test 1: Get settings that don't exist yet - should auto-create them
settings, err := service.GetByDbID(testUser, database.ID)
assert.NoError(t, err)
assert.NotNil(t, settings)
assert.Equal(t, database.ID, settings.DatabaseID)
assert.Equal(t, int64(60), settings.MonitoringIntervalSeconds)
assert.True(t, settings.IsDbResourcesMonitoringEnabled) // Always enabled
// Test 2: Get settings that already exist
existingSettings, err := service.GetByDbID(testUser, database.ID)
assert.NoError(t, err)
assert.NotNil(t, existingSettings)
assert.Equal(t, settings.DatabaseID, existingSettings.DatabaseID)
assert.Equal(t, settings.MonitoringIntervalSeconds, existingSettings.MonitoringIntervalSeconds)
// Test 3: Access control - create another user and test they can't access this database
anotherUser := &users_models.User{
ID: uuid.New(),
// Other fields can be empty for this test
}
_, err = service.GetByDbID(anotherUser, database.ID)
assert.Error(t, err)
assert.Contains(t, err.Error(), "user does not have access to this database")
// Test 4: Try to get settings for non-existent database
nonExistentDbID := uuid.New()
_, err = service.GetByDbID(testUser, nonExistentDbID)
assert.Error(t, err) // Should fail because database doesn't exist
}

View File

@@ -8,4 +8,5 @@ const (
NotifierTypeWebhook NotifierType = "WEBHOOK"
NotifierTypeSlack NotifierType = "SLACK"
NotifierTypeDiscord NotifierType = "DISCORD"
NotifierTypeTeams NotifierType = "TEAMS"
)

View File

@@ -6,6 +6,7 @@ import (
discord_notifier "postgresus-backend/internal/features/notifiers/models/discord"
"postgresus-backend/internal/features/notifiers/models/email_notifier"
slack_notifier "postgresus-backend/internal/features/notifiers/models/slack"
teams_notifier "postgresus-backend/internal/features/notifiers/models/teams"
telegram_notifier "postgresus-backend/internal/features/notifiers/models/telegram"
webhook_notifier "postgresus-backend/internal/features/notifiers/models/webhook"
@@ -20,11 +21,12 @@ type Notifier struct {
LastSendError *string `json:"lastSendError" gorm:"column:last_send_error;type:text"`
// specific notifier
TelegramNotifier *telegram_notifier.TelegramNotifier `json:"telegramNotifier" gorm:"foreignKey:NotifierID"`
EmailNotifier *email_notifier.EmailNotifier `json:"emailNotifier" gorm:"foreignKey:NotifierID"`
WebhookNotifier *webhook_notifier.WebhookNotifier `json:"webhookNotifier" gorm:"foreignKey:NotifierID"`
SlackNotifier *slack_notifier.SlackNotifier `json:"slackNotifier" gorm:"foreignKey:NotifierID"`
DiscordNotifier *discord_notifier.DiscordNotifier `json:"discordNotifier" gorm:"foreignKey:NotifierID"`
TelegramNotifier *telegram_notifier.TelegramNotifier `json:"telegramNotifier" gorm:"foreignKey:NotifierID"`
EmailNotifier *email_notifier.EmailNotifier `json:"emailNotifier" gorm:"foreignKey:NotifierID"`
WebhookNotifier *webhook_notifier.WebhookNotifier `json:"webhookNotifier" gorm:"foreignKey:NotifierID"`
SlackNotifier *slack_notifier.SlackNotifier `json:"slackNotifier" gorm:"foreignKey:NotifierID"`
DiscordNotifier *discord_notifier.DiscordNotifier `json:"discordNotifier" gorm:"foreignKey:NotifierID"`
TeamsNotifier *teams_notifier.TeamsNotifier `json:"teamsNotifier,omitempty" gorm:"foreignKey:NotifierID;constraint:OnDelete:CASCADE"`
}
func (n *Notifier) TableName() string {
@@ -64,6 +66,8 @@ func (n *Notifier) getSpecificNotifier() NotificationSender {
return n.SlackNotifier
case NotifierTypeDiscord:
return n.DiscordNotifier
case NotifierTypeTeams:
return n.TeamsNotifier
default:
panic("unknown notifier type: " + string(n.NotifierType))
}

View File

@@ -0,0 +1,96 @@
package teams_notifier
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"log/slog"
"net/http"
"net/url"
"github.com/google/uuid"
)
type TeamsNotifier struct {
NotifierID uuid.UUID `gorm:"type:uuid;primaryKey;column:notifier_id" json:"notifierId"`
WebhookURL string `gorm:"type:text;not null;column:power_automate_url" json:"powerAutomateUrl"`
}
func (TeamsNotifier) TableName() string {
return "teams_notifiers"
}
func (n *TeamsNotifier) Validate() error {
if n.WebhookURL == "" {
return errors.New("webhook_url is required")
}
u, err := url.Parse(n.WebhookURL)
if err != nil || (u.Scheme != "http" && u.Scheme != "https") {
return errors.New("invalid webhook_url")
}
return nil
}
type cardAttachment struct {
ContentType string `json:"contentType"`
Content interface{} `json:"content"`
}
type payload struct {
Title string `json:"title"`
Text string `json:"text"`
Attachments []cardAttachment `json:"attachments,omitempty"`
}
func (n *TeamsNotifier) Send(logger *slog.Logger, heading, message string) error {
if err := n.Validate(); err != nil {
return err
}
card := map[string]any{
"type": "AdaptiveCard",
"version": "1.4",
"body": []any{
map[string]any{
"type": "TextBlock",
"size": "Medium",
"weight": "Bolder",
"text": heading,
},
map[string]any{"type": "TextBlock", "wrap": true, "text": message},
},
}
p := payload{
Title: heading,
Text: message,
Attachments: []cardAttachment{
{ContentType: "application/vnd.microsoft.card.adaptive", Content: card},
},
}
body, _ := json.Marshal(p)
req, err := http.NewRequest(http.MethodPost, n.WebhookURL, bytes.NewReader(body))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer func() {
if closeErr := resp.Body.Close(); closeErr != nil {
logger.Error("failed to close response body", "error", closeErr)
}
}()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf("teams webhook returned status %d", resp.StatusCode)
}
return nil
}

View File

@@ -7,6 +7,7 @@ import (
"log/slog"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/google/uuid"
@@ -16,6 +17,7 @@ type TelegramNotifier struct {
NotifierID uuid.UUID `json:"notifierId" gorm:"primaryKey;column:notifier_id"`
BotToken string `json:"botToken" gorm:"not null;column:bot_token"`
TargetChatID string `json:"targetChatId" gorm:"not null;column:target_chat_id"`
ThreadID *int64 `json:"threadId" gorm:"column:thread_id"`
}
func (t *TelegramNotifier) TableName() string {
@@ -47,6 +49,10 @@ func (t *TelegramNotifier) Send(logger *slog.Logger, heading string, message str
data.Set("text", fullMessage)
data.Set("parse_mode", "HTML")
if t.ThreadID != nil && *t.ThreadID != 0 {
data.Set("message_thread_id", strconv.FormatInt(*t.ThreadID, 10))
}
req, err := http.NewRequest("POST", apiURL, strings.NewReader(data.Encode()))
if err != nil {
return fmt.Errorf("failed to create request: %w", err)

View File

@@ -13,6 +13,7 @@ func (r *NotifierRepository) Save(notifier *Notifier) (*Notifier, error) {
db := storage.GetDb()
err := db.Transaction(func(tx *gorm.DB) error {
switch notifier.NotifierType {
case NotifierTypeTelegram:
if notifier.TelegramNotifier != nil {
@@ -34,30 +35,36 @@ func (r *NotifierRepository) Save(notifier *Notifier) (*Notifier, error) {
if notifier.DiscordNotifier != nil {
notifier.DiscordNotifier.NotifierID = notifier.ID
}
case NotifierTypeTeams:
if notifier.TeamsNotifier != nil {
notifier.TeamsNotifier.NotifierID = notifier.ID
}
}
if notifier.ID == uuid.Nil {
if err := tx.Create(notifier).
if err := tx.
Omit(
"TelegramNotifier",
"EmailNotifier",
"WebhookNotifier",
"SlackNotifier",
"DiscordNotifier",
"TeamsNotifier",
).
Error; err != nil {
Create(notifier).Error; err != nil {
return err
}
} else {
if err := tx.Save(notifier).
if err := tx.
Omit(
"TelegramNotifier",
"EmailNotifier",
"WebhookNotifier",
"SlackNotifier",
"DiscordNotifier",
"TeamsNotifier",
).
Error; err != nil {
Save(notifier).Error; err != nil {
return err
}
}
@@ -65,39 +72,46 @@ func (r *NotifierRepository) Save(notifier *Notifier) (*Notifier, error) {
switch notifier.NotifierType {
case NotifierTypeTelegram:
if notifier.TelegramNotifier != nil {
notifier.TelegramNotifier.NotifierID = notifier.ID // Ensure ID is set
notifier.TelegramNotifier.NotifierID = notifier.ID
if err := tx.Save(notifier.TelegramNotifier).Error; err != nil {
return err
}
}
case NotifierTypeEmail:
if notifier.EmailNotifier != nil {
notifier.EmailNotifier.NotifierID = notifier.ID // Ensure ID is set
notifier.EmailNotifier.NotifierID = notifier.ID
if err := tx.Save(notifier.EmailNotifier).Error; err != nil {
return err
}
}
case NotifierTypeWebhook:
if notifier.WebhookNotifier != nil {
notifier.WebhookNotifier.NotifierID = notifier.ID // Ensure ID is set
notifier.WebhookNotifier.NotifierID = notifier.ID
if err := tx.Save(notifier.WebhookNotifier).Error; err != nil {
return err
}
}
case NotifierTypeSlack:
if notifier.SlackNotifier != nil {
notifier.SlackNotifier.NotifierID = notifier.ID // Ensure ID is set
notifier.SlackNotifier.NotifierID = notifier.ID
if err := tx.Save(notifier.SlackNotifier).Error; err != nil {
return err
}
}
case NotifierTypeDiscord:
if notifier.DiscordNotifier != nil {
notifier.DiscordNotifier.NotifierID = notifier.ID // Ensure ID is set
notifier.DiscordNotifier.NotifierID = notifier.ID
if err := tx.Save(notifier.DiscordNotifier).Error; err != nil {
return err
}
}
case NotifierTypeTeams:
if notifier.TeamsNotifier != nil {
notifier.TeamsNotifier.NotifierID = notifier.ID
if err := tx.Save(notifier.TeamsNotifier).Error; err != nil {
return err
}
}
}
return nil
@@ -120,6 +134,7 @@ func (r *NotifierRepository) FindByID(id uuid.UUID) (*Notifier, error) {
Preload("WebhookNotifier").
Preload("SlackNotifier").
Preload("DiscordNotifier").
Preload("TeamsNotifier").
Where("id = ?", id).
First(&notifier).Error; err != nil {
return nil, err
@@ -138,6 +153,7 @@ func (r *NotifierRepository) FindByUserID(userID uuid.UUID) ([]*Notifier, error)
Preload("WebhookNotifier").
Preload("SlackNotifier").
Preload("DiscordNotifier").
Preload("TeamsNotifier").
Where("user_id = ?", userID).
Order("name ASC").
Find(&notifiers).Error; err != nil {
@@ -149,7 +165,7 @@ func (r *NotifierRepository) FindByUserID(userID uuid.UUID) ([]*Notifier, error)
func (r *NotifierRepository) Delete(notifier *Notifier) error {
return storage.GetDb().Transaction(func(tx *gorm.DB) error {
// Delete specific notifier based on type
switch notifier.NotifierType {
case NotifierTypeTelegram:
if notifier.TelegramNotifier != nil {
@@ -181,9 +197,14 @@ func (r *NotifierRepository) Delete(notifier *Notifier) error {
return err
}
}
case NotifierTypeTeams:
if notifier.TeamsNotifier != nil {
if err := tx.Delete(notifier.TeamsNotifier).Error; err != nil {
return err
}
}
}
// Delete the main notifier
return tx.Delete(notifier).Error
})
}

View File

@@ -20,6 +20,7 @@ import (
pgtypes "postgresus-backend/internal/features/databases/databases/postgresql"
"postgresus-backend/internal/features/restores/models"
"postgresus-backend/internal/features/storages"
files_utils "postgresus-backend/internal/util/files"
"postgresus-backend/internal/util/tools"
"github.com/google/uuid"
@@ -163,7 +164,7 @@ func (uc *RestorePostgresqlBackupUsecase) restoreFromStorage(
// Add the temporary backup file as the last argument to pg_restore
args = append(args, tempBackupFile)
return uc.executePgRestore(ctx, pgBin, args, pgpassFile, pgConfig)
return uc.executePgRestore(ctx, pgBin, args, pgpassFile, pgConfig, backup)
}
// downloadBackupToTempFile downloads backup data from storage to a temporary file
@@ -172,6 +173,13 @@ func (uc *RestorePostgresqlBackupUsecase) downloadBackupToTempFile(
backup *backups.Backup,
storage *storages.Storage,
) (string, func(), error) {
err := files_utils.EnsureDirectories([]string{
config.GetEnv().TempFolder,
})
if err != nil {
return "", nil, fmt.Errorf("failed to ensure directories: %w", err)
}
// Create temporary directory for backup data
tempDir, err := os.MkdirTemp(config.GetEnv().TempFolder, "restore_"+uuid.New().String())
if err != nil {
@@ -222,11 +230,8 @@ func (uc *RestorePostgresqlBackupUsecase) downloadBackupToTempFile(
return "", nil, fmt.Errorf("failed to write backup to temporary file: %w", err)
}
// Close the temp file to ensure all data is written
if err := tempFile.Close(); err != nil {
cleanupFunc()
return "", nil, fmt.Errorf("failed to close temporary backup file: %w", err)
}
// Close the temp file to ensure all data is written - this is handled by defer
// Removing explicit close to avoid double-close error
uc.logger.Info("Backup file written to temporary location", "tempFile", tempBackupFile)
return tempBackupFile, cleanupFunc, nil
@@ -239,6 +244,7 @@ func (uc *RestorePostgresqlBackupUsecase) executePgRestore(
args []string,
pgpassFile string,
pgConfig *pgtypes.PostgresqlDatabase,
backup *backups.Backup,
) error {
cmd := exec.CommandContext(ctx, pgBin, args...)
uc.logger.Info("Executing PostgreSQL restore command", "command", cmd.String())
@@ -287,7 +293,7 @@ func (uc *RestorePostgresqlBackupUsecase) executePgRestore(
return fmt.Errorf("restore cancelled due to shutdown")
}
return uc.handlePgRestoreError(waitErr, stderrOutput, pgBin, args)
return uc.handlePgRestoreError(waitErr, stderrOutput, pgBin, args, backup, pgConfig)
}
return nil
@@ -339,6 +345,8 @@ func (uc *RestorePostgresqlBackupUsecase) handlePgRestoreError(
stderrOutput []byte,
pgBin string,
args []string,
backup *backups.Backup,
pgConfig *pgtypes.PostgresqlDatabase,
) error {
// Enhanced error handling for PostgreSQL connection and restore issues
stderrStr := string(stderrOutput)
@@ -407,8 +415,20 @@ func (uc *RestorePostgresqlBackupUsecase) handlePgRestoreError(
stderrStr,
)
} else if containsIgnoreCase(stderrStr, "database") && containsIgnoreCase(stderrStr, "does not exist") {
backupDbName := "unknown"
if backup.Database != nil && backup.Database.Postgresql != nil && backup.Database.Postgresql.Database != nil {
backupDbName = *backup.Database.Postgresql.Database
}
targetDbName := "unknown"
if pgConfig.Database != nil {
targetDbName = *pgConfig.Database
}
errorMsg = fmt.Sprintf(
"Target database does not exist. Create the database before restoring. stderr: %s",
"Target database does not exist (backup db %s, not found %s). Create the database before restoring. stderr: %s",
backupDbName,
targetDbName,
stderrStr,
)
}

View File

@@ -6,4 +6,5 @@ const (
StorageTypeLocal StorageType = "LOCAL"
StorageTypeS3 StorageType = "S3"
StorageTypeGoogleDrive StorageType = "GOOGLE_DRIVE"
StorageTypeNAS StorageType = "NAS"
)

View File

@@ -6,6 +6,7 @@ import (
"log/slog"
google_drive_storage "postgresus-backend/internal/features/storages/models/google_drive"
local_storage "postgresus-backend/internal/features/storages/models/local"
nas_storage "postgresus-backend/internal/features/storages/models/nas"
s3_storage "postgresus-backend/internal/features/storages/models/s3"
"github.com/google/uuid"
@@ -22,6 +23,7 @@ type Storage struct {
LocalStorage *local_storage.LocalStorage `json:"localStorage" gorm:"foreignKey:StorageID"`
S3Storage *s3_storage.S3Storage `json:"s3Storage" gorm:"foreignKey:StorageID"`
GoogleDriveStorage *google_drive_storage.GoogleDriveStorage `json:"googleDriveStorage" gorm:"foreignKey:StorageID"`
NASStorage *nas_storage.NASStorage `json:"nasStorage" gorm:"foreignKey:StorageID"`
}
func (s *Storage) SaveFile(logger *slog.Logger, fileID uuid.UUID, file io.Reader) error {
@@ -69,6 +71,8 @@ func (s *Storage) getSpecificStorage() StorageFileSaver {
return s.S3Storage
case StorageTypeGoogleDrive:
return s.GoogleDriveStorage
case StorageTypeNAS:
return s.NASStorage
default:
panic("invalid storage type: " + string(s.Type))
}

View File

@@ -10,8 +10,10 @@ import (
"postgresus-backend/internal/config"
google_drive_storage "postgresus-backend/internal/features/storages/models/google_drive"
local_storage "postgresus-backend/internal/features/storages/models/local"
nas_storage "postgresus-backend/internal/features/storages/models/nas"
s3_storage "postgresus-backend/internal/features/storages/models/s3"
"postgresus-backend/internal/util/logger"
"strconv"
"testing"
"time"
@@ -44,6 +46,14 @@ func Test_Storage_BasicOperations(t *testing.T) {
require.NoError(t, err, "Failed to setup test file")
defer os.Remove(testFilePath)
// Setup NAS port
nasPort := 445
if portStr := config.GetEnv().TestNASPort; portStr != "" {
if port, err := strconv.Atoi(portStr); err == nil {
nasPort = port
}
}
// Run tests
testCases := []struct {
name string
@@ -65,14 +75,39 @@ func Test_Storage_BasicOperations(t *testing.T) {
},
},
{
name: "NASStorage",
storage: &nas_storage.NASStorage{
StorageID: uuid.New(),
Host: "localhost",
Port: nasPort,
Share: "backups",
Username: "testuser",
Password: "testpassword",
UseSSL: false,
Domain: "",
Path: "test-files",
},
},
}
// Add Google Drive storage test only if environment variables are available
env := config.GetEnv()
if env.TestGoogleDriveClientID != "" && env.TestGoogleDriveClientSecret != "" &&
env.TestGoogleDriveTokenJSON != "" {
testCases = append(testCases, struct {
name string
storage StorageFileSaver
}{
name: "GoogleDriveStorage",
storage: &google_drive_storage.GoogleDriveStorage{
StorageID: uuid.New(),
ClientID: config.GetEnv().TestGoogleDriveClientID,
ClientSecret: config.GetEnv().TestGoogleDriveClientSecret,
TokenJSON: config.GetEnv().TestGoogleDriveTokenJSON,
ClientID: env.TestGoogleDriveClientID,
ClientSecret: env.TestGoogleDriveClientSecret,
TokenJSON: env.TestGoogleDriveTokenJSON,
},
},
})
} else {
t.Log("Skipping Google Drive storage test: missing environment variables")
}
for _, tc := range testCases {
@@ -197,8 +232,6 @@ func setupS3Container(ctx context.Context) (*S3Container, error) {
func validateEnvVariables(t *testing.T) {
env := config.GetEnv()
assert.NotEmpty(t, env.TestGoogleDriveClientID, "TEST_GOOGLE_DRIVE_CLIENT_ID is empty")
assert.NotEmpty(t, env.TestGoogleDriveClientSecret, "TEST_GOOGLE_DRIVE_CLIENT_SECRET is empty")
assert.NotEmpty(t, env.TestGoogleDriveTokenJSON, "TEST_GOOGLE_DRIVE_TOKEN_JSON is empty")
assert.NotEmpty(t, env.TestMinioPort, "TEST_MINIO_PORT is empty")
assert.NotEmpty(t, env.TestNASPort, "TEST_NAS_PORT is empty")
}

View File

@@ -7,6 +7,7 @@ import (
"os"
"path/filepath"
"postgresus-backend/internal/config"
files_utils "postgresus-backend/internal/util/files"
"github.com/google/uuid"
)
@@ -25,9 +26,11 @@ func (l *LocalStorage) TableName() string {
func (l *LocalStorage) SaveFile(logger *slog.Logger, fileID uuid.UUID, file io.Reader) error {
logger.Info("Starting to save file to local storage", "fileId", fileID.String())
if err := l.ensureDirectories(); err != nil {
logger.Error("Failed to ensure directories", "fileId", fileID.String(), "error", err)
return err
err := files_utils.EnsureDirectories([]string{
config.GetEnv().TempFolder,
})
if err != nil {
return fmt.Errorf("failed to ensure directories: %w", err)
}
tempFilePath := filepath.Join(config.GetEnv().TempFolder, fileID.String())
@@ -62,8 +65,8 @@ func (l *LocalStorage) SaveFile(logger *slog.Logger, fileID uuid.UUID, file io.R
return fmt.Errorf("failed to sync temp file: %w", err)
}
err = tempFile.Close()
if err != nil {
// Close the temp file explicitly before moving it (required on Windows)
if err = tempFile.Close(); err != nil {
logger.Error("Failed to close temp file", "fileId", fileID.String(), "error", err)
return fmt.Errorf("failed to close temp file: %w", err)
}
@@ -134,14 +137,10 @@ func (l *LocalStorage) DeleteFile(fileID uuid.UUID) error {
}
func (l *LocalStorage) Validate() error {
return l.ensureDirectories()
return nil
}
func (l *LocalStorage) TestConnection() error {
if err := l.ensureDirectories(); err != nil {
return err
}
testFile := filepath.Join(config.GetEnv().TempFolder, "test_connection")
f, err := os.Create(testFile)
if err != nil {
@@ -157,19 +156,3 @@ func (l *LocalStorage) TestConnection() error {
return nil
}
func (l *LocalStorage) ensureDirectories() error {
// Standard permissions for directories: owner
// can read/write/execute, others can read/execute
const directoryPermissions = 0755
if err := os.MkdirAll(config.GetEnv().DataFolder, directoryPermissions); err != nil {
return fmt.Errorf("failed to create backups directory: %w", err)
}
if err := os.MkdirAll(config.GetEnv().TempFolder, directoryPermissions); err != nil {
return fmt.Errorf("failed to create temp directory: %w", err)
}
return nil
}

View File

@@ -0,0 +1,401 @@
package nas_storage
import (
"crypto/tls"
"errors"
"fmt"
"io"
"log/slog"
"net"
"path/filepath"
"strings"
"time"
"github.com/google/uuid"
"github.com/hirochachacha/go-smb2"
)
type NASStorage struct {
StorageID uuid.UUID `json:"storageId" gorm:"primaryKey;type:uuid;column:storage_id"`
Host string `json:"host" gorm:"not null;type:text;column:host"`
Port int `json:"port" gorm:"not null;default:445;column:port"`
Share string `json:"share" gorm:"not null;type:text;column:share"`
Username string `json:"username" gorm:"not null;type:text;column:username"`
Password string `json:"password" gorm:"not null;type:text;column:password"`
UseSSL bool `json:"useSsl" gorm:"not null;default:false;column:use_ssl"`
Domain string `json:"domain" gorm:"type:text;column:domain"`
Path string `json:"path" gorm:"type:text;column:path"`
}
func (n *NASStorage) TableName() string {
return "nas_storages"
}
func (n *NASStorage) SaveFile(logger *slog.Logger, fileID uuid.UUID, file io.Reader) error {
logger.Info("Starting to save file to NAS storage", "fileId", fileID.String(), "host", n.Host)
session, err := n.createSession()
if err != nil {
logger.Error("Failed to create NAS session", "fileId", fileID.String(), "error", err)
return fmt.Errorf("failed to create NAS session: %w", err)
}
defer func() {
if logoffErr := session.Logoff(); logoffErr != nil {
logger.Error(
"Failed to logoff NAS session",
"fileId",
fileID.String(),
"error",
logoffErr,
)
}
}()
fs, err := session.Mount(n.Share)
if err != nil {
logger.Error(
"Failed to mount NAS share",
"fileId",
fileID.String(),
"share",
n.Share,
"error",
err,
)
return fmt.Errorf("failed to mount share '%s': %w", n.Share, err)
}
defer func() {
if umountErr := fs.Umount(); umountErr != nil {
logger.Error(
"Failed to unmount NAS share",
"fileId",
fileID.String(),
"error",
umountErr,
)
}
}()
// Ensure the directory exists
if n.Path != "" {
if err := n.ensureDirectory(fs, n.Path); err != nil {
logger.Error(
"Failed to ensure directory",
"fileId",
fileID.String(),
"path",
n.Path,
"error",
err,
)
return fmt.Errorf("failed to ensure directory: %w", err)
}
}
filePath := n.getFilePath(fileID.String())
logger.Debug("Creating file on NAS", "fileId", fileID.String(), "filePath", filePath)
nasFile, err := fs.Create(filePath)
if err != nil {
logger.Error(
"Failed to create file on NAS",
"fileId",
fileID.String(),
"filePath",
filePath,
"error",
err,
)
return fmt.Errorf("failed to create file on NAS: %w", err)
}
defer func() {
if closeErr := nasFile.Close(); closeErr != nil {
logger.Error("Failed to close NAS file", "fileId", fileID.String(), "error", closeErr)
}
}()
logger.Debug("Copying file data to NAS", "fileId", fileID.String())
_, err = io.Copy(nasFile, file)
if err != nil {
logger.Error("Failed to write file to NAS", "fileId", fileID.String(), "error", err)
return fmt.Errorf("failed to write file to NAS: %w", err)
}
logger.Info(
"Successfully saved file to NAS storage",
"fileId",
fileID.String(),
"filePath",
filePath,
)
return nil
}
func (n *NASStorage) GetFile(fileID uuid.UUID) (io.ReadCloser, error) {
session, err := n.createSession()
if err != nil {
return nil, fmt.Errorf("failed to create NAS session: %w", err)
}
fs, err := session.Mount(n.Share)
if err != nil {
_ = session.Logoff()
return nil, fmt.Errorf("failed to mount share '%s': %w", n.Share, err)
}
filePath := n.getFilePath(fileID.String())
// Check if file exists
_, err = fs.Stat(filePath)
if err != nil {
_ = fs.Umount()
_ = session.Logoff()
return nil, fmt.Errorf("file not found: %s", fileID.String())
}
nasFile, err := fs.Open(filePath)
if err != nil {
_ = fs.Umount()
_ = session.Logoff()
return nil, fmt.Errorf("failed to open file from NAS: %w", err)
}
// Return a wrapped reader that cleans up resources when closed
return &nasFileReader{
file: nasFile,
fs: fs,
session: session,
}, nil
}
func (n *NASStorage) DeleteFile(fileID uuid.UUID) error {
session, err := n.createSession()
if err != nil {
return fmt.Errorf("failed to create NAS session: %w", err)
}
defer func() {
_ = session.Logoff()
}()
fs, err := session.Mount(n.Share)
if err != nil {
return fmt.Errorf("failed to mount share '%s': %w", n.Share, err)
}
defer func() {
_ = fs.Umount()
}()
filePath := n.getFilePath(fileID.String())
// Check if file exists before trying to delete
_, err = fs.Stat(filePath)
if err != nil {
// File doesn't exist, consider it already deleted
return nil
}
err = fs.Remove(filePath)
if err != nil {
return fmt.Errorf("failed to delete file from NAS: %w", err)
}
return nil
}
func (n *NASStorage) Validate() error {
if n.Host == "" {
return errors.New("NAS host is required")
}
if n.Share == "" {
return errors.New("NAS share is required")
}
if n.Username == "" {
return errors.New("NAS username is required")
}
if n.Password == "" {
return errors.New("NAS password is required")
}
if n.Port <= 0 || n.Port > 65535 {
return errors.New("NAS port must be between 1 and 65535")
}
// Test the configuration by creating a session
return n.TestConnection()
}
func (n *NASStorage) TestConnection() error {
session, err := n.createSession()
if err != nil {
return fmt.Errorf("failed to connect to NAS: %w", err)
}
defer func() {
_ = session.Logoff()
}()
// Try to mount the share to verify access
fs, err := session.Mount(n.Share)
if err != nil {
return fmt.Errorf("failed to access share '%s': %w", n.Share, err)
}
defer func() {
_ = fs.Umount()
}()
// If path is specified, check if it exists or can be created
if n.Path != "" {
if err := n.ensureDirectory(fs, n.Path); err != nil {
return fmt.Errorf("failed to access or create path '%s': %w", n.Path, err)
}
}
return nil
}
func (n *NASStorage) createSession() (*smb2.Session, error) {
// Create connection with timeout
conn, err := n.createConnection()
if err != nil {
return nil, err
}
// Create SMB2 dialer
d := &smb2.Dialer{
Initiator: &smb2.NTLMInitiator{
User: n.Username,
Password: n.Password,
Domain: n.Domain,
},
}
// Create session
session, err := d.Dial(conn)
if err != nil {
_ = conn.Close()
return nil, fmt.Errorf("failed to create SMB session: %w", err)
}
return session, nil
}
func (n *NASStorage) createConnection() (net.Conn, error) {
address := net.JoinHostPort(n.Host, fmt.Sprintf("%d", n.Port))
// Create connection with timeout
dialer := &net.Dialer{
Timeout: 10 * time.Second,
}
if n.UseSSL {
// Use TLS connection
tlsConfig := &tls.Config{
ServerName: n.Host,
InsecureSkipVerify: false, // Change to true if you want to skip cert verification
}
conn, err := tls.DialWithDialer(dialer, "tcp", address, tlsConfig)
if err != nil {
return nil, fmt.Errorf("failed to create SSL connection to %s: %w", address, err)
}
return conn, nil
} else {
// Use regular TCP connection
conn, err := dialer.Dial("tcp", address)
if err != nil {
return nil, fmt.Errorf("failed to create connection to %s: %w", address, err)
}
return conn, nil
}
}
func (n *NASStorage) ensureDirectory(fs *smb2.Share, path string) error {
// Clean and normalize the path
path = filepath.Clean(path)
path = strings.ReplaceAll(path, "\\", "/")
// Check if directory already exists
_, err := fs.Stat(path)
if err == nil {
return nil // Directory exists
}
// Try to create the directory (including parent directories)
parts := strings.Split(path, "/")
currentPath := ""
for _, part := range parts {
if part == "" || part == "." {
continue
}
if currentPath == "" {
currentPath = part
} else {
currentPath = currentPath + "/" + part
}
// Check if this part of the path exists
_, err := fs.Stat(currentPath)
if err != nil {
// Directory doesn't exist, try to create it
err = fs.Mkdir(currentPath, 0755)
if err != nil {
return fmt.Errorf("failed to create directory '%s': %w", currentPath, err)
}
}
}
return nil
}
func (n *NASStorage) getFilePath(filename string) string {
if n.Path == "" {
return filename
}
// Clean path and use forward slashes for SMB
cleanPath := filepath.Clean(n.Path)
cleanPath = strings.ReplaceAll(cleanPath, "\\", "/")
return cleanPath + "/" + filename
}
// nasFileReader wraps the NAS file and handles cleanup of resources
type nasFileReader struct {
file *smb2.File
fs *smb2.Share
session *smb2.Session
}
func (r *nasFileReader) Read(p []byte) (n int, err error) {
return r.file.Read(p)
}
func (r *nasFileReader) Close() error {
// Close resources in reverse order
var errors []error
if r.file != nil {
if err := r.file.Close(); err != nil {
errors = append(errors, fmt.Errorf("failed to close file: %w", err))
}
}
if r.fs != nil {
if err := r.fs.Umount(); err != nil {
errors = append(errors, fmt.Errorf("failed to unmount share: %w", err))
}
}
if r.session != nil {
if err := r.session.Logoff(); err != nil {
errors = append(errors, fmt.Errorf("failed to logoff session: %w", err))
}
}
if len(errors) > 0 {
// Return the first error, but log others if needed
return errors[0]
}
return nil
}

View File

@@ -26,17 +26,21 @@ func (r *StorageRepository) Save(storage *Storage) (*Storage, error) {
if storage.GoogleDriveStorage != nil {
storage.GoogleDriveStorage.StorageID = storage.ID
}
case StorageTypeNAS:
if storage.NASStorage != nil {
storage.NASStorage.StorageID = storage.ID
}
}
if storage.ID == uuid.Nil {
if err := tx.Create(storage).
Omit("LocalStorage", "S3Storage", "GoogleDriveStorage").
Omit("LocalStorage", "S3Storage", "GoogleDriveStorage", "NASStorage").
Error; err != nil {
return err
}
} else {
if err := tx.Save(storage).
Omit("LocalStorage", "S3Storage", "GoogleDriveStorage").
Omit("LocalStorage", "S3Storage", "GoogleDriveStorage", "NASStorage").
Error; err != nil {
return err
}
@@ -64,6 +68,13 @@ func (r *StorageRepository) Save(storage *Storage) (*Storage, error) {
return err
}
}
case StorageTypeNAS:
if storage.NASStorage != nil {
storage.NASStorage.StorageID = storage.ID // Ensure ID is set
if err := tx.Save(storage.NASStorage).Error; err != nil {
return err
}
}
}
return nil
@@ -84,6 +95,7 @@ func (r *StorageRepository) FindByID(id uuid.UUID) (*Storage, error) {
Preload("LocalStorage").
Preload("S3Storage").
Preload("GoogleDriveStorage").
Preload("NASStorage").
Where("id = ?", id).
First(&s).Error; err != nil {
return nil, err
@@ -100,6 +112,7 @@ func (r *StorageRepository) FindByUserID(userID uuid.UUID) ([]*Storage, error) {
Preload("LocalStorage").
Preload("S3Storage").
Preload("GoogleDriveStorage").
Preload("NASStorage").
Where("user_id = ?", userID).
Order("name ASC").
Find(&storages).Error; err != nil {
@@ -131,6 +144,12 @@ func (r *StorageRepository) Delete(s *Storage) error {
return err
}
}
case StorageTypeNAS:
if s.NASStorage != nil {
if err := tx.Delete(s.NASStorage).Error; err != nil {
return err
}
}
}
// Delete the main storage

View File

@@ -222,10 +222,14 @@ func verifyDataIntegrity(t *testing.T, originalDB *sqlx.DB, restoredDB *sqlx.DB)
assert.NoError(t, err)
assert.Equal(t, len(originalData), len(restoredData), "Should have same number of rows")
for i := range originalData {
assert.Equal(t, originalData[i].ID, restoredData[i].ID, "ID should match")
assert.Equal(t, originalData[i].Name, restoredData[i].Name, "Name should match")
assert.Equal(t, originalData[i].Value, restoredData[i].Value, "Value should match")
// Only compare data if both slices have elements (to avoid panic)
if len(originalData) > 0 && len(restoredData) > 0 {
for i := range originalData {
assert.Equal(t, originalData[i].ID, restoredData[i].ID, "ID should match")
assert.Equal(t, originalData[i].Name, restoredData[i].Name, "Name should match")
assert.Equal(t, originalData[i].Value, restoredData[i].Value, "Value should match")
}
}
}

View File

@@ -4,10 +4,12 @@ import (
"net/http"
"github.com/gin-gonic/gin"
"golang.org/x/time/rate"
)
type UserController struct {
userService *UserService
userService *UserService
signinLimiter *rate.Limiter
}
func (c *UserController) RegisterRoutes(router *gin.RouterGroup) {
@@ -51,8 +53,18 @@ func (c *UserController) SignUp(ctx *gin.Context) {
// @Param request body SignInRequest true "User signin data"
// @Success 200 {object} SignInResponse
// @Failure 400
// @Failure 429 {object} map[string]string "Rate limit exceeded"
// @Router /users/signin [post]
func (c *UserController) SignIn(ctx *gin.Context) {
// We use rate limiter to prevent brute force attacks
if !c.signinLimiter.Allow() {
ctx.JSON(
http.StatusTooManyRequests,
gin.H{"error": "Rate limit exceeded. Please try again later."},
)
return
}
var request SignInRequest
if err := ctx.ShouldBindJSON(&request); err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request format"})

View File

@@ -2,6 +2,8 @@ package users
import (
user_repositories "postgresus-backend/internal/features/users/repositories"
"golang.org/x/time/rate"
)
var secretKeyRepository = &user_repositories.SecretKeyRepository{}
@@ -12,6 +14,7 @@ var userService = &UserService{
}
var userController = &UserController{
userService,
rate.NewLimiter(rate.Limit(3), 3), // 3 RPS with burst of 3
}
func GetUserService() *UserService {

View File

@@ -1,7 +1,27 @@
package files_utils
import "os"
import (
"fmt"
"os"
"path/filepath"
)
func CleanFolder(folder string) error {
return os.RemoveAll(folder)
if _, err := os.Stat(folder); os.IsNotExist(err) {
return nil
}
entries, err := os.ReadDir(folder)
if err != nil {
return fmt.Errorf("failed to read directory %s: %w", folder, err)
}
for _, entry := range entries {
itemPath := filepath.Join(folder, entry.Name())
if err := os.RemoveAll(itemPath); err != nil {
return fmt.Errorf("failed to remove %s: %w", itemPath, err)
}
}
return nil
}

View File

@@ -0,0 +1,22 @@
package files_utils
import (
"fmt"
"os"
)
func EnsureDirectories(directories []string) error {
const directoryPermissions = 0755
for _, directory := range directories {
if _, err := os.Stat(directory); os.IsNotExist(err) {
if err := os.MkdirAll(directory, directoryPermissions); err != nil {
return fmt.Errorf("failed to create directory %s: %w", directory, err)
}
} else if err != nil {
return fmt.Errorf("failed to check directory %s: %w", directory, err)
}
}
return nil
}

View File

@@ -5,6 +5,13 @@ import (
"strconv"
)
type PostgresqlExtension string
const (
// needed for queries monitoring
PostgresqlExtensionPgStatMonitor PostgresqlExtension = "pg_stat_statements"
)
type PostgresqlVersion string
const (

View File

@@ -0,0 +1,30 @@
-- +goose Up
-- +goose StatementBegin
-- Create NAS storages table
CREATE TABLE nas_storages (
storage_id UUID PRIMARY KEY,
host TEXT NOT NULL,
port INTEGER NOT NULL DEFAULT 445,
share TEXT NOT NULL,
username TEXT NOT NULL,
password TEXT NOT NULL,
use_ssl BOOLEAN NOT NULL DEFAULT FALSE,
domain TEXT,
path TEXT
);
ALTER TABLE nas_storages
ADD CONSTRAINT fk_nas_storages_storage
FOREIGN KEY (storage_id)
REFERENCES storages (id)
ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS nas_storages;
-- +goose StatementEnd

View File

@@ -0,0 +1,15 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE telegram_notifiers
ADD COLUMN thread_id BIGINT;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE telegram_notifiers
DROP COLUMN IF EXISTS thread_id;
-- +goose StatementEnd

View File

@@ -0,0 +1,20 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE teams_notifiers (
notifier_id UUID PRIMARY KEY,
power_automate_url TEXT NOT NULL
);
ALTER TABLE teams_notifiers
ADD CONSTRAINT fk_teams_notifiers_notifier
FOREIGN KEY (notifier_id)
REFERENCES notifiers (id)
ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS teams_notifiers;
-- +goose StatementEnd

View File

@@ -0,0 +1,60 @@
-- +goose Up
-- +goose StatementBegin
-- Create postgres_monitoring_settings table
CREATE TABLE postgres_monitoring_settings (
database_id UUID PRIMARY KEY,
is_db_resources_monitoring_enabled BOOLEAN NOT NULL DEFAULT FALSE,
monitoring_interval_seconds BIGINT NOT NULL DEFAULT 60,
installed_extensions_raw TEXT
);
-- Add foreign key constraint for postgres_monitoring_settings
ALTER TABLE postgres_monitoring_settings
ADD CONSTRAINT fk_postgres_monitoring_settings_database_id
FOREIGN KEY (database_id)
REFERENCES databases (id)
ON DELETE CASCADE;
-- Create postgres_monitoring_metrics table
CREATE TABLE postgres_monitoring_metrics (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
database_id UUID NOT NULL,
metric TEXT NOT NULL,
value_type TEXT NOT NULL,
value DOUBLE PRECISION NOT NULL,
created_at TIMESTAMPTZ NOT NULL
);
-- Add foreign key constraint for postgres_monitoring_metrics
ALTER TABLE postgres_monitoring_metrics
ADD CONSTRAINT fk_postgres_monitoring_metrics_database_id
FOREIGN KEY (database_id)
REFERENCES databases (id)
ON DELETE CASCADE;
-- Add indexes for performance
CREATE INDEX idx_postgres_monitoring_metrics_database_id
ON postgres_monitoring_metrics (database_id);
CREATE INDEX idx_postgres_monitoring_metrics_created_at
ON postgres_monitoring_metrics (created_at);
CREATE INDEX idx_postgres_monitoring_metrics_database_metric_created_at
ON postgres_monitoring_metrics (database_id, metric, created_at);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
-- Drop indexes first
DROP INDEX IF EXISTS idx_postgres_monitoring_metrics_database_metric_created_at;
DROP INDEX IF EXISTS idx_postgres_monitoring_metrics_created_at;
DROP INDEX IF EXISTS idx_postgres_monitoring_metrics_database_id;
-- Drop tables in reverse order
DROP TABLE IF EXISTS postgres_monitoring_metrics;
DROP TABLE IF EXISTS postgres_monitoring_settings;
-- +goose StatementEnd

View File

@@ -0,0 +1 @@
This is test data for storage testing

View File

@@ -0,0 +1 @@
This is test data for storage testing

View File

@@ -2,6 +2,9 @@
set -e # Exit on any error
# Ensure non-interactive mode for apt
export DEBIAN_FRONTEND=noninteractive
echo "Installing PostgreSQL client tools versions 13-17 for Linux (Debian/Ubuntu)..."
echo
@@ -30,18 +33,18 @@ echo
# Add PostgreSQL official APT repository
echo "Adding PostgreSQL official APT repository..."
$SUDO apt-get update -qq
$SUDO apt-get install -y wget ca-certificates
$SUDO apt-get update -qq -y
$SUDO apt-get install -y -qq wget ca-certificates
# Add GPG key
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | $SUDO apt-key add -
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | $SUDO apt-key add - 2>/dev/null
# Add repository
echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" | $SUDO tee /etc/apt/sources.list.d/pgdg.list
echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" | $SUDO tee /etc/apt/sources.list.d/pgdg.list >/dev/null
# Update package list
echo "Updating package list..."
$SUDO apt-get update -qq
$SUDO apt-get update -qq -y
# Install client tools for each version
versions="13 14 15 16 17"
@@ -50,35 +53,34 @@ for version in $versions; do
echo "Installing PostgreSQL $version client tools..."
# Install client tools only
$SUDO apt-get install -y postgresql-client-$version
$SUDO apt-get install -y -qq postgresql-client-$version
# Create version-specific directory and symlinks
version_dir="$POSTGRES_DIR/postgresql-$version"
mkdir -p "$version_dir/bin"
# Create symlinks to the installed binaries
if [ -f "/usr/bin/pg_dump" ]; then
# If multiple versions, binaries are usually named with version suffix
if [ -f "/usr/bin/pg_dump-$version" ]; then
ln -sf "/usr/bin/pg_dump-$version" "$version_dir/bin/pg_dump"
ln -sf "/usr/bin/pg_dumpall-$version" "$version_dir/bin/pg_dumpall"
ln -sf "/usr/bin/psql-$version" "$version_dir/bin/psql"
ln -sf "/usr/bin/pg_restore-$version" "$version_dir/bin/pg_restore"
ln -sf "/usr/bin/createdb-$version" "$version_dir/bin/createdb"
ln -sf "/usr/bin/dropdb-$version" "$version_dir/bin/dropdb"
else
# Fallback to non-versioned names (latest version)
ln -sf "/usr/bin/pg_dump" "$version_dir/bin/pg_dump"
ln -sf "/usr/bin/pg_dumpall" "$version_dir/bin/pg_dumpall"
ln -sf "/usr/bin/psql" "$version_dir/bin/psql"
ln -sf "/usr/bin/pg_restore" "$version_dir/bin/pg_restore"
ln -sf "/usr/bin/createdb" "$version_dir/bin/createdb"
ln -sf "/usr/bin/dropdb" "$version_dir/bin/dropdb"
fi
# On Debian/Ubuntu, PostgreSQL binaries are located in /usr/lib/postgresql/{version}/bin/
pg_bin_dir="/usr/lib/postgresql/$version/bin"
if [ -d "$pg_bin_dir" ] && [ -f "$pg_bin_dir/pg_dump" ]; then
# Create symlinks to the version-specific binaries
ln -sf "$pg_bin_dir/pg_dump" "$version_dir/bin/pg_dump"
ln -sf "$pg_bin_dir/pg_dumpall" "$version_dir/bin/pg_dumpall"
ln -sf "$pg_bin_dir/psql" "$version_dir/bin/psql"
ln -sf "$pg_bin_dir/pg_restore" "$version_dir/bin/pg_restore"
ln -sf "$pg_bin_dir/createdb" "$version_dir/bin/createdb"
ln -sf "$pg_bin_dir/dropdb" "$version_dir/bin/dropdb"
echo "PostgreSQL $version client tools installed successfully"
else
echo "Warning: PostgreSQL $version client tools may not have installed correctly"
echo "Error: PostgreSQL $version binaries not found in expected location: $pg_bin_dir"
echo "Available PostgreSQL directories:"
ls -la /usr/lib/postgresql/ 2>/dev/null || echo "No PostgreSQL directories found in /usr/lib/postgresql/"
if [ -d "$pg_bin_dir" ]; then
echo "Contents of $pg_bin_dir:"
ls -la "$pg_bin_dir" 2>/dev/null || echo "Directory exists but cannot list contents"
fi
exit 1
fi
echo
done
@@ -93,6 +95,9 @@ for version in $versions; do
version_dir="$POSTGRES_DIR/postgresql-$version"
if [ -f "$version_dir/bin/pg_dump" ]; then
echo " postgresql-$version: $version_dir/bin/"
# Verify the correct version
version_output=$("$version_dir/bin/pg_dump" --version 2>/dev/null | grep -o "pg_dump (PostgreSQL) [0-9]\+\.[0-9]\+")
echo " Version check: $version_output"
fi
done

View File

@@ -1,4 +1,4 @@
This directory is needed only for development.
This directory is needed only for development and CI\CD.
We have to download and install all the PostgreSQL versions from 13 to 17 locally.
This is needed so we can call pg_dump, pg_dumpall, etc. on each version of the PostgreSQL database.

View File

@@ -37,9 +37,12 @@ Example:
Before any commit, make sure:
1. You created critical tests for your changes
2. `golangci-lint fmt` and `golangci-lint run` are passing
2. `make lint` is passing (for backend) and `npm run lint` is passing (for frontend)
3. All tests are passing
4. Project is building successfully
5. All your commits should be squashed into one commit with proper message (or to meaningful parts)
6. Code do really refactored and production ready
7. You have one single PR per one feature (at least, if features not connected)
### Automated Versioning
@@ -68,8 +71,13 @@ If you need to add some explanation, do it in appropriate place in the code. Or
Before taking anything more than a couple of lines of code, please write Rostislav via Telegram (@rostislav_dugin) and confirm priority. It is possible that we already have something in the works, it is not needed or it's not project priority.
Nearsest features:
- add copying of databases
- add API keys and API actions
Backups flow:
- do not remove old backups on backups disable
- add FTP
- add Dropbox
- add OneDrive
@@ -82,43 +90,16 @@ Backups flow:
Notifications flow:
- add Mattermost
- add MS Teams
Extra:
- add tests running on each PR (in progress by Rostislav Dugin)
- add prettier labels to GitHub README
- create pretty website like rybbit.io with demo
- add HTTPS for Postgresus
- add simple SQL queries via UI
- add brute force protection on auth (via local RPS limiter)
- add support of Kubernetes Helm
- create pretty website like rybbit.io with demo
Monitoring flow:
- add system metrics (CPU, RAM, disk, IO)
- add queries stats (slowest, most frequent, etc. via pg_stat_statements)
- add alerting for slow queries (listen for slow query and if they reach >100ms - send message)
- add alerting for high resource usage (listen for high resource usage and if they reach >90% - send message)
- add DB size distribution chart (tables, indexes, etc.)
- add performance test for DB (to compare DBs on different clouds and VPS)
- add DB metrics (pg_stat_activity, pg_locks, pg_stat_database)
- add chart of connections (from IPs, apps names, etc.)
- add chart of transactions (TPS)
- deadlocks chart
- chart of connection attempts (to see crash loops)
- add chart of IDLE transactions VS executing transactions
- show queries that take the most IO time (suboptimal indexes)
- show chart by top IO / CPU queries usage (see page 90 of the PostgreSQL monitoring book)
```
exec_time | IO | CPU | query
105 hrs | 73% | 27% | SELECT * FROM users;
```
- chart of read / update / delete / insert queries
- chart with deadlocks, conflicts, rollbacks (see page 115 of the PostgreSQL monitoring book)
- stats of buffer usage
- status of IO (DB, indexes, sequences)
- % of cache hit
- replication stats

View File

@@ -0,0 +1,45 @@
# How to add new notifier to Postgresus (Discord, Slack, Telegram, Email, Webhook, etc.)
## Backend part
1. Create new model in `backend/internal/features/notifiers/models/{notifier_name}/` folder. Implement `NotificationSender` interface from parent folder.
- The model should implement `Send(logger *slog.Logger, heading string, message string) error` and `Validate() error` methods
- Use UUID primary key as `NotifierID` that references the main notifiers table
2. Add new notifier type to `backend/internal/features/notifiers/enums.go` in the `NotifierType` constants.
3. Update the main `Notifier` model in `backend/internal/features/notifiers/model.go`:
- Add new notifier field with GORM foreign key relation
- Update `getSpecificNotifier()` method to handle the new type
- Update `Send()` method to route to the new notifier
4. If you need to add some .env variables to test, add them in `backend/internal/config/config.go` (so we can use it in tests)
5. If you need some Docker container to test, add it to `backend/docker-compose.yml.example`. For sensitive data - keep it blank.
6. If you need some sensitive envs to test in pipeline, message @rostislav_dugin so I can add it to GitHub Actions. For example, API keys or credentials.
7. Create new migration in `backend/migrations` folder:
- Create table with `notifier_id` as UUID primary key
- Add foreign key constraint to `notifiers` table with CASCADE DELETE
- Look at existing notifier migrations for reference
8. Make sure that all tests are passing.
## Frontend part
If you are able to develop only backend - it's fine, message @rostislav_dugin so I can complete UI part.
1. Add models and validator to `frontend/src/entity/notifiers/models/{notifier_name}/` folder and update `index.ts` file to include new model exports.
2. Upload an SVG icon to `public/icons/notifiers/`, update `src/entity/notifiers/models/getNotifierLogoFromType.ts` to return new icon path, update `src/entity/notifiers/models/NotifierType.ts` to include new type, and update `src/entity/notifiers/models/getNotifierNameFromType.ts` to return new name.
3. Add UI components to manage your notifier:
- `src/features/notifiers/ui/edit/notifiers/Edit{NotifierName}Component.tsx` (for editing)
- `src/features/notifiers/ui/show/notifier/Show{NotifierName}Component.tsx` (for display)
4. Update main components to handle the new notifier type:
- `EditNotifierComponent.tsx` - add import, validation function, and component rendering
- `ShowNotifierComponent.tsx` - add import and component rendering
5. Make sure everything is working as expected.

View File

@@ -0,0 +1,51 @@
# How to add new storage to Postgresus (S3, FTP, Google Drive, NAS, etc.)
## Backend part
1. Create new model in `backend/internal/features/storages/models/{storage_name}/` folder. Implement `StorageFileSaver` interface from parent folder.
- The model should implement `SaveFile(logger *slog.Logger, fileID uuid.UUID, file io.Reader) error`, `GetFile(fileID uuid.UUID) (io.ReadCloser, error)`, `DeleteFile(fileID uuid.UUID) error`, `Validate() error`, and `TestConnection() error` methods
- Use UUID primary key as `StorageID` that references the main storages table
- Add `TableName() string` method to return the proper table name
2. Add new storage type to `backend/internal/features/storages/enums.go` in the `StorageType` constants.
3. Update the main `Storage` model in `backend/internal/features/storages/model.go`:
- Add new storage field with GORM foreign key relation
- Update `getSpecificStorage()` method to handle the new type
- Update `SaveFile()`, `GetFile()`, and `DeleteFile()` methods to route to the new storage
- Update `Validate()` method to include new storage validation
4. If you need to add some .env variables to test, add them in `backend/internal/config/config.go` (so we can use it in tests)
5. If you need some Docker container to test, add it to `backend/docker-compose.yml.example`. For sensitive data - keep it blank.
6. If you need some sensitive envs to test in pipeline, message @rostislav_dugin so I can add it to GitHub Actions. For example, Google Drive envs or FTP credentials.
7. Create new migration in `backend/migrations` folder:
- Create table with `storage_id` as UUID primary key
- Add foreign key constraint to `storages` table with CASCADE DELETE
- Look at existing storage migrations for reference
8. Update tests in `backend/internal/features/storages/model_test.go` to test new storage
9. Make sure that all tests are passing.
## Frontend part
If you are able to develop only backend - it's fine, message @rostislav_dugin so I can complete UI part.
1. Add models and api to `frontend/src/entity/storages/models/` folder and update `index.ts` file to include new model exports.
- Create TypeScript interface for your storage model
- Add validation function if needed
2. Upload an SVG icon to `public/icons/storages/`, update `src/entity/storages/models/getStorageLogoFromType.ts` to return new icon path, update `src/entity/storages/models/StorageType.ts` to include new type, and update `src/entity/storages/models/getStorageNameFromType.ts` to return new name.
3. Add UI components to manage your storage:
- `src/features/storages/ui/edit/storages/Edit{StorageName}Component.tsx` (for editing)
- `src/features/storages/ui/show/storages/Show{StorageName}Component.tsx` (for display)
4. Update main components to handle the new storage type:
- `EditStorageComponent.tsx` - add import and component rendering
- `ShowStorageComponent.tsx` - add import and component rendering
5. Make sure everything is working as expected.

View File

@@ -16,27 +16,4 @@ services:
volumes:
- ./postgresus-data:/postgresus-data
container_name: postgresus-local
depends_on:
postgresus-db:
condition: service_healthy
restart: unless-stopped
postgresus-db:
image: postgres:17
# we use default values, but do not expose
# PostgreSQL ports so it is safe
environment:
- POSTGRES_DB=postgresus
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=Q1234567
volumes:
- ./pgdata:/var/lib/postgresql/data
container_name: postgresus-db
command: -p 5437
shm_size: 10gb
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres -d postgresus -p 5437"]
interval: 5s
timeout: 5s
retries: 5
restart: unless-stopped
restart: unless-stopped

View File

@@ -1,54 +1,39 @@
# React + TypeScript + Vite
# Frontend Development
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
## Development
Currently, two official plugins are available:
To run the development server:
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Babel](https://babeljs.io/) for Fast Refresh
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
## Expanding the ESLint configuration
If you are developing a production application, we recommend updating the configuration to enable type-aware lint rules:
```js
export default tseslint.config({
extends: [
// Remove ...tseslint.configs.recommended and replace with this
...tseslint.configs.recommendedTypeChecked,
// Alternatively, use this for stricter rules
...tseslint.configs.strictTypeChecked,
// Optionally, add this for stylistic rules
...tseslint.configs.stylisticTypeChecked,
],
languageOptions: {
// other options...
parserOptions: {
project: ['./tsconfig.node.json', './tsconfig.app.json'],
tsconfigRootDir: import.meta.dirname,
},
},
});
```bash
npm run dev
```
You can also install [eslint-plugin-react-x](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-x) and [eslint-plugin-react-dom](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-dom) for React-specific lint rules:
## Build
```js
// eslint.config.js
import reactDom from 'eslint-plugin-react-dom';
import reactX from 'eslint-plugin-react-x';
To build the project for production:
export default tseslint.config({
plugins: {
// Add the react-x and react-dom plugins
'react-x': reactX,
'react-dom': reactDom,
},
rules: {
// other rules...
// Enable its recommended typescript rules
...reactX.configs['recommended-typescript'].rules,
...reactDom.configs.recommended.rules,
},
});
```bash
npm run build
```
This will compile TypeScript and create an optimized production build.
## Code Quality
### Linting
To check for linting errors:
```bash
npm run lint
```
### Formatting
To format code using Prettier:
```bash
npm run format
```
This will automatically format all TypeScript, JavaScript, JSON, CSS, and Markdown files.

View File

@@ -15,6 +15,7 @@
"react-dom": "^19.1.0",
"react-github-btn": "^1.4.0",
"react-router": "^7.6.0",
"recharts": "^3.2.0",
"tailwindcss": "^4.1.7"
},
"devDependencies": {
@@ -1315,6 +1316,32 @@
"react-dom": ">=16.9.0"
}
},
"node_modules/@reduxjs/toolkit": {
"version": "2.9.0",
"resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-2.9.0.tgz",
"integrity": "sha512-fSfQlSRu9Z5yBkvsNhYF2rPS8cGXn/TZVrlwN1948QyZ8xMZ0JvP50S2acZNaf+o63u6aEeMjipFyksjIcWrog==",
"license": "MIT",
"dependencies": {
"@standard-schema/spec": "^1.0.0",
"@standard-schema/utils": "^0.3.0",
"immer": "^10.0.3",
"redux": "^5.0.1",
"redux-thunk": "^3.1.0",
"reselect": "^5.1.0"
},
"peerDependencies": {
"react": "^16.9.0 || ^17.0.0 || ^18 || ^19",
"react-redux": "^7.2.1 || ^8.1.3 || ^9.0.0"
},
"peerDependenciesMeta": {
"react": {
"optional": true
},
"react-redux": {
"optional": true
}
}
},
"node_modules/@rollup/rollup-android-arm-eabi": {
"version": "4.41.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.41.0.tgz",
@@ -1575,6 +1602,18 @@
"win32"
]
},
"node_modules/@standard-schema/spec": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.0.0.tgz",
"integrity": "sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==",
"license": "MIT"
},
"node_modules/@standard-schema/utils": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/@standard-schema/utils/-/utils-0.3.0.tgz",
"integrity": "sha512-e7Mew686owMaPJVNNLs55PUvgz371nKgwsc4vxE49zsODpJEnxgxRo2y/OKrqueavXgZNMDVj3DdHFlaSAeU8g==",
"license": "MIT"
},
"node_modules/@tailwindcss/node": {
"version": "4.1.7",
"resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.7.tgz",
@@ -1917,6 +1956,69 @@
"@babel/types": "^7.20.7"
}
},
"node_modules/@types/d3-array": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz",
"integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==",
"license": "MIT"
},
"node_modules/@types/d3-color": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz",
"integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==",
"license": "MIT"
},
"node_modules/@types/d3-ease": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz",
"integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==",
"license": "MIT"
},
"node_modules/@types/d3-interpolate": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz",
"integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==",
"license": "MIT",
"dependencies": {
"@types/d3-color": "*"
}
},
"node_modules/@types/d3-path": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.1.tgz",
"integrity": "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==",
"license": "MIT"
},
"node_modules/@types/d3-scale": {
"version": "4.0.9",
"resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz",
"integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==",
"license": "MIT",
"dependencies": {
"@types/d3-time": "*"
}
},
"node_modules/@types/d3-shape": {
"version": "3.1.7",
"resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.7.tgz",
"integrity": "sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg==",
"license": "MIT",
"dependencies": {
"@types/d3-path": "*"
}
},
"node_modules/@types/d3-time": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz",
"integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==",
"license": "MIT"
},
"node_modules/@types/d3-timer": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz",
"integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==",
"license": "MIT"
},
"node_modules/@types/estree": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.7.tgz",
@@ -1934,7 +2036,7 @@
"version": "19.1.4",
"resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.4.tgz",
"integrity": "sha512-EB1yiiYdvySuIITtD5lhW4yPyJ31RkJkkDw794LaQYrxCSaQV/47y5o1FMC4zF9ZyjUjzJMZwbovEnT5yHTW6g==",
"dev": true,
"devOptional": true,
"license": "MIT",
"dependencies": {
"csstype": "^3.0.2"
@@ -1950,6 +2052,12 @@
"@types/react": "^19.0.0"
}
},
"node_modules/@types/use-sync-external-store": {
"version": "0.0.6",
"resolved": "https://registry.npmjs.org/@types/use-sync-external-store/-/use-sync-external-store-0.0.6.tgz",
"integrity": "sha512-zFDAD+tlpf2r4asuHEj0XH6pY6i0g5NeAHPn+15wk3BV6JA69eERFXC1gyGThDkVa1zCyKr5jox1+2LbV/AMLg==",
"license": "MIT"
},
"node_modules/@typescript-eslint/eslint-plugin": {
"version": "8.32.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.32.1.tgz",
@@ -2666,6 +2774,15 @@
"integrity": "sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==",
"license": "MIT"
},
"node_modules/clsx": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz",
"integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==",
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
@@ -2745,6 +2862,127 @@
"integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==",
"license": "MIT"
},
"node_modules/d3-array": {
"version": "3.2.4",
"resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz",
"integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==",
"license": "ISC",
"dependencies": {
"internmap": "1 - 2"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-color": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz",
"integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==",
"license": "ISC",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-ease": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz",
"integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==",
"license": "BSD-3-Clause",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-format": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz",
"integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==",
"license": "ISC",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-interpolate": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
"integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==",
"license": "ISC",
"dependencies": {
"d3-color": "1 - 3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-path": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz",
"integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==",
"license": "ISC",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-scale": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz",
"integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==",
"license": "ISC",
"dependencies": {
"d3-array": "2.10.0 - 3",
"d3-format": "1 - 3",
"d3-interpolate": "1.2.0 - 3",
"d3-time": "2.1.1 - 3",
"d3-time-format": "2 - 4"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-shape": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz",
"integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==",
"license": "ISC",
"dependencies": {
"d3-path": "^3.1.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-time": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz",
"integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==",
"license": "ISC",
"dependencies": {
"d3-array": "2 - 3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-time-format": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz",
"integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==",
"license": "ISC",
"dependencies": {
"d3-time": "1 - 3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-timer": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz",
"integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==",
"license": "ISC",
"engines": {
"node": ">=12"
}
},
"node_modules/data-view-buffer": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.2.tgz",
@@ -2823,6 +3061,12 @@
}
}
},
"node_modules/decimal.js-light": {
"version": "2.5.1",
"resolved": "https://registry.npmjs.org/decimal.js-light/-/decimal.js-light-2.5.1.tgz",
"integrity": "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==",
"license": "MIT"
},
"node_modules/deep-is": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz",
@@ -3097,6 +3341,16 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/es-toolkit": {
"version": "1.39.10",
"resolved": "https://registry.npmjs.org/es-toolkit/-/es-toolkit-1.39.10.tgz",
"integrity": "sha512-E0iGnTtbDhkeczB0T+mxmoVlT4YNweEKBLq7oaU4p11mecdsZpNWOglI4895Vh4usbQ+LsJiuLuI2L0Vdmfm2w==",
"license": "MIT",
"workspaces": [
"docs",
"benchmarks"
]
},
"node_modules/esbuild": {
"version": "0.25.4",
"resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.4.tgz",
@@ -3371,6 +3625,12 @@
"node": ">=0.10.0"
}
},
"node_modules/eventemitter3": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.1.tgz",
"integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==",
"license": "MIT"
},
"node_modules/fast-deep-equal": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
@@ -3813,6 +4073,16 @@
"node": ">= 4"
}
},
"node_modules/immer": {
"version": "10.1.3",
"resolved": "https://registry.npmjs.org/immer/-/immer-10.1.3.tgz",
"integrity": "sha512-tmjF/k8QDKydUlm3mZU+tjM6zeq9/fFpPqH9SzWmBnVVKsPBg/V66qsMwb3/Bo90cgUN+ghdVBess+hPsxUyRw==",
"license": "MIT",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/immer"
}
},
"node_modules/import-fresh": {
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz",
@@ -3855,6 +4125,15 @@
"node": ">= 0.4"
}
},
"node_modules/internmap": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz",
"integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==",
"license": "ISC",
"engines": {
"node": ">=12"
}
},
"node_modules/is-array-buffer": {
"version": "3.0.5",
"resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.5.tgz",
@@ -5879,9 +6158,31 @@
"version": "16.13.1",
"resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
"integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==",
"dev": true,
"license": "MIT"
},
"node_modules/react-redux": {
"version": "9.2.0",
"resolved": "https://registry.npmjs.org/react-redux/-/react-redux-9.2.0.tgz",
"integrity": "sha512-ROY9fvHhwOD9ySfrF0wmvu//bKCQ6AeZZq1nJNtbDC+kk5DuSuNX/n6YWYF/SYy7bSba4D4FSz8DJeKY/S/r+g==",
"license": "MIT",
"dependencies": {
"@types/use-sync-external-store": "^0.0.6",
"use-sync-external-store": "^1.4.0"
},
"peerDependencies": {
"@types/react": "^18.2.25 || ^19",
"react": "^18.0 || ^19",
"redux": "^5.0.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"redux": {
"optional": true
}
}
},
"node_modules/react-refresh": {
"version": "0.17.0",
"resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz",
@@ -5914,6 +6215,48 @@
}
}
},
"node_modules/recharts": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/recharts/-/recharts-3.2.0.tgz",
"integrity": "sha512-fX0xCgNXo6mag9wz3oLuANR+dUQM4uIlTYBGTGq9CBRgW/8TZPzqPGYs5NTt8aENCf+i1CI8vqxT1py8L/5J2w==",
"license": "MIT",
"dependencies": {
"@reduxjs/toolkit": "1.x.x || 2.x.x",
"clsx": "^2.1.1",
"decimal.js-light": "^2.5.1",
"es-toolkit": "^1.39.3",
"eventemitter3": "^5.0.1",
"immer": "^10.1.1",
"react-redux": "8.x.x || 9.x.x",
"reselect": "5.1.1",
"tiny-invariant": "^1.3.3",
"use-sync-external-store": "^1.2.2",
"victory-vendor": "^37.0.2"
},
"engines": {
"node": ">=18"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0",
"react-dom": "^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0",
"react-is": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
}
},
"node_modules/redux": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/redux/-/redux-5.0.1.tgz",
"integrity": "sha512-M9/ELqF6fy8FwmkpnF0S3YKOqMyoWJ4+CS5Efg2ct3oY9daQvd/Pc71FpGZsVsbl3Cpb+IIcjBDUnnyBdQbq4w==",
"license": "MIT"
},
"node_modules/redux-thunk": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/redux-thunk/-/redux-thunk-3.1.0.tgz",
"integrity": "sha512-NW2r5T6ksUKXCabzhL9z+h206HQw/NJkcLm1GPImRQ8IzfXwRGqjVhKJGauHirT0DAuyy6hjdnMZaRoAcy0Klw==",
"license": "MIT",
"peerDependencies": {
"redux": "^5.0.0"
}
},
"node_modules/reflect.getprototypeof": {
"version": "1.0.10",
"resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz",
@@ -5958,6 +6301,12 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/reselect": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/reselect/-/reselect-5.1.1.tgz",
"integrity": "sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==",
"license": "MIT"
},
"node_modules/resize-observer-polyfill": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz",
@@ -6508,6 +6857,12 @@
"node": ">=12.22"
}
},
"node_modules/tiny-invariant": {
"version": "1.3.3",
"resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz",
"integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==",
"license": "MIT"
},
"node_modules/tinyglobby": {
"version": "0.2.13",
"resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.13.tgz",
@@ -6770,6 +7125,37 @@
"punycode": "^2.1.0"
}
},
"node_modules/use-sync-external-store": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.5.0.tgz",
"integrity": "sha512-Rb46I4cGGVBmjamjphe8L/UnvJD+uPPtTkNvX5mZgqdbavhI4EbgIWJiIHXJ8bc/i9EQGPRh4DwEURJ552Do0A==",
"license": "MIT",
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
}
},
"node_modules/victory-vendor": {
"version": "37.3.6",
"resolved": "https://registry.npmjs.org/victory-vendor/-/victory-vendor-37.3.6.tgz",
"integrity": "sha512-SbPDPdDBYp+5MJHhBCAyI7wKM3d5ivekigc2Dk2s7pgbZ9wIgIBYGVw4zGHBml/qTFbexrofXW6Gu4noGxrOwQ==",
"license": "MIT AND ISC",
"dependencies": {
"@types/d3-array": "^3.0.3",
"@types/d3-ease": "^3.0.0",
"@types/d3-interpolate": "^3.0.1",
"@types/d3-scale": "^4.0.2",
"@types/d3-shape": "^3.1.0",
"@types/d3-time": "^3.0.0",
"@types/d3-timer": "^3.0.0",
"d3-array": "^3.1.6",
"d3-ease": "^3.0.1",
"d3-interpolate": "^3.0.1",
"d3-scale": "^4.0.2",
"d3-shape": "^3.1.0",
"d3-time": "^3.0.0",
"d3-timer": "^3.0.1"
}
},
"node_modules/vite": {
"version": "6.3.5",
"resolved": "https://registry.npmjs.org/vite/-/vite-6.3.5.tgz",

View File

@@ -18,6 +18,7 @@
"react-dom": "^19.1.0",
"react-github-btn": "^1.4.0",
"react-router": "^7.6.0",
"recharts": "^3.2.0",
"tailwindcss": "^4.1.7"
},
"devDependencies": {

View File

@@ -0,0 +1,2 @@
<?xml version="1.0" encoding="utf-8"?><!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg width="800px" height="800px" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg" fill="none"><path fill="#5059C9" d="M10.765 6.875h3.616c.342 0 .619.276.619.617v3.288a2.272 2.272 0 01-2.274 2.27h-.01a2.272 2.272 0 01-2.274-2.27V7.199c0-.179.145-.323.323-.323zM13.21 6.225c.808 0 1.464-.655 1.464-1.462 0-.808-.656-1.463-1.465-1.463s-1.465.655-1.465 1.463c0 .807.656 1.462 1.465 1.462z"/><path fill="#7B83EB" d="M8.651 6.225a2.114 2.114 0 002.117-2.112A2.114 2.114 0 008.65 2a2.114 2.114 0 00-2.116 2.112c0 1.167.947 2.113 2.116 2.113zM11.473 6.875h-5.97a.611.611 0 00-.596.625v3.75A3.669 3.669 0 008.488 15a3.669 3.669 0 003.582-3.75V7.5a.611.611 0 00-.597-.625z"/><path fill="#000000" d="M8.814 6.875v5.255a.598.598 0 01-.596.595H5.193a3.951 3.951 0 01-.287-1.476V7.5a.61.61 0 01.597-.624h3.31z" opacity=".1"/><path fill="#000000" d="M8.488 6.875v5.58a.6.6 0 01-.596.595H5.347a3.22 3.22 0 01-.267-.65 3.951 3.951 0 01-.172-1.15V7.498a.61.61 0 01.596-.624h2.985z" opacity=".2"/><path fill="#000000" d="M8.488 6.875v4.93a.6.6 0 01-.596.595H5.08a3.951 3.951 0 01-.172-1.15V7.498a.61.61 0 01.596-.624h2.985z" opacity=".2"/><path fill="#000000" d="M8.163 6.875v4.93a.6.6 0 01-.596.595H5.079a3.951 3.951 0 01-.172-1.15V7.498a.61.61 0 01.596-.624h2.66z" opacity=".2"/><path fill="#000000" d="M8.814 5.195v1.024c-.055.003-.107.006-.163.006-.055 0-.107-.003-.163-.006A2.115 2.115 0 016.593 4.6h1.625a.598.598 0 01.596.594z" opacity=".1"/><path fill="#000000" d="M8.488 5.52v.699a2.115 2.115 0 01-1.79-1.293h1.195a.598.598 0 01.595.594z" opacity=".2"/><path fill="#000000" d="M8.488 5.52v.699a2.115 2.115 0 01-1.79-1.293h1.195a.598.598 0 01.595.594z" opacity=".2"/><path fill="#000000" d="M8.163 5.52v.647a2.115 2.115 0 01-1.465-1.242h.87a.598.598 0 01.595.595z" opacity=".2"/><path fill="url(#microsoft-teams-color-16__paint0_linear_2372_494)" d="M1.597 4.925h5.969c.33 0 .597.267.597.596v5.958a.596.596 0 01-.597.596h-5.97A.596.596 0 011 11.479V5.521c0-.33.267-.596.597-.596z"/><path fill="#ffffff" d="M6.152 7.193H4.959v3.243h-.76V7.193H3.01v-.63h3.141v.63z"/><defs><linearGradient id="microsoft-teams-color-16__paint0_linear_2372_494" x1="2.244" x2="6.906" y1="4.46" y2="12.548" gradientUnits="userSpaceOnUse"><stop stop-color="#5A62C3"/><stop offset=".5" stop-color="#4D55BD"/><stop offset="1" stop-color="#3940AB"/></linearGradient></defs></svg>

After

Width:  |  Height:  |  Size: 2.4 KiB

View File

@@ -0,0 +1,11 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg fill="#000000" width="800px" height="800px" viewBox="0 0 256 256" id="Flat" xmlns="http://www.w3.org/2000/svg">
<g opacity="0.2">
<rect x="40" y="144" width="176" height="64" rx="8"/>
</g>
<g opacity="0.2">
<rect x="40" y="48" width="176" height="64" rx="8"/>
</g>
<path d="M208,136H48a16.01833,16.01833,0,0,0-16,16v48a16.01833,16.01833,0,0,0,16,16H208a16.01833,16.01833,0,0,0,16-16V152A16.01833,16.01833,0,0,0,208,136Zm0,64H48V152H208l.01025,47.99951Zm0-160H48A16.01833,16.01833,0,0,0,32,56v48a16.01833,16.01833,0,0,0,16,16H208a16.01833,16.01833,0,0,0,16-16V56A16.01833,16.01833,0,0,0,208,40Zm0,64H48V56H208l.01025,47.99951ZM192,80a12,12,0,1,1-12-12A12.01375,12.01375,0,0,1,192,80Zm0,96a12,12,0,1,1-12-12A12.01375,12.01375,0,0,1,192,176Z"/>

After

Width:  |  Height:  |  Size: 892 B

View File

@@ -12,3 +12,5 @@ export function getApplicationServer() {
}
export const GOOGLE_DRIVE_OAUTH_REDIRECT_URL = 'https://postgresus.com/storages/google-oauth';
export const APP_VERSION = (import.meta.env.VITE_APP_VERSION as string) || 'dev';

View File

@@ -0,0 +1,2 @@
export * from './metrics';
export * from './settings';

View File

@@ -0,0 +1,16 @@
import { getApplicationServer } from '../../../../constants';
import RequestOptions from '../../../../shared/api/RequestOptions';
import { apiHelper } from '../../../../shared/api/apiHelper';
import type { GetMetricsRequest } from '../model/GetMetricsRequest';
import type { PostgresMonitoringMetric } from '../model/PostgresMonitoringMetric';
export const metricsApi = {
async getMetrics(request: GetMetricsRequest): Promise<PostgresMonitoringMetric[]> {
const requestOptions: RequestOptions = new RequestOptions();
requestOptions.setBody(JSON.stringify(request));
return apiHelper.fetchPostJson<PostgresMonitoringMetric[]>(
`${getApplicationServer()}/api/v1/postgres-monitoring-metrics/get`,
requestOptions,
);
},
};

View File

@@ -0,0 +1,5 @@
export { metricsApi } from './api/metricsApi';
export type { PostgresMonitoringMetric } from './model/PostgresMonitoringMetric';
export type { GetMetricsRequest } from './model/GetMetricsRequest';
export { PostgresMonitoringMetricType } from './model/PostgresMonitoringMetricType';
export { PostgresMonitoringMetricValueType } from './model/PostgresMonitoringMetricValueType';

View File

@@ -0,0 +1,8 @@
import type { PostgresMonitoringMetricType } from './PostgresMonitoringMetricType';
export interface GetMetricsRequest {
databaseId: string;
metricType: PostgresMonitoringMetricType;
from: string;
to: string;
}

View File

@@ -0,0 +1,11 @@
import type { PostgresMonitoringMetricType } from './PostgresMonitoringMetricType';
import type { PostgresMonitoringMetricValueType } from './PostgresMonitoringMetricValueType';
export interface PostgresMonitoringMetric {
id: string;
databaseId: string;
metric: PostgresMonitoringMetricType;
valueType: PostgresMonitoringMetricValueType;
value: number;
createdAt: string;
}

View File

@@ -0,0 +1,4 @@
export enum PostgresMonitoringMetricType {
DB_RAM_USAGE = 'DB_RAM_USAGE',
DB_IO_USAGE = 'DB_IO_USAGE',
}

View File

@@ -0,0 +1,4 @@
export enum PostgresMonitoringMetricValueType {
BYTE = 'BYTE',
PERCENT = 'PERCENT',
}

View File

@@ -0,0 +1,24 @@
import { getApplicationServer } from '../../../../constants';
import RequestOptions from '../../../../shared/api/RequestOptions';
import { apiHelper } from '../../../../shared/api/apiHelper';
import type { PostgresMonitoringSettings } from '../model/PostgresMonitoringSettings';
export const monitoringSettingsApi = {
async saveSettings(settings: PostgresMonitoringSettings) {
const requestOptions: RequestOptions = new RequestOptions();
requestOptions.setBody(JSON.stringify(settings));
return apiHelper.fetchPostJson<PostgresMonitoringSettings>(
`${getApplicationServer()}/api/v1/postgres-monitoring-settings/save`,
requestOptions,
);
},
async getSettingsByDbID(databaseId: string) {
const requestOptions: RequestOptions = new RequestOptions();
return apiHelper.fetchGetJson<PostgresMonitoringSettings>(
`${getApplicationServer()}/api/v1/postgres-monitoring-settings/database/${databaseId}`,
requestOptions,
true,
);
},
};

View File

@@ -0,0 +1,3 @@
export { monitoringSettingsApi } from './api/monitoringSettingsApi';
export type { PostgresMonitoringSettings } from './model/PostgresMonitoringSettings';
export { PostgresqlExtension } from './model/PostgresqlExtension';

View File

@@ -0,0 +1,13 @@
import type { Database } from '../../../databases';
import { PostgresqlExtension } from './PostgresqlExtension';
export interface PostgresMonitoringSettings {
databaseId: string;
database?: Database;
isDbResourcesMonitoringEnabled: boolean;
monitoringIntervalSeconds: number;
installedExtensions: PostgresqlExtension[];
installedExtensionsRaw?: string;
}

View File

@@ -0,0 +1,4 @@
export enum PostgresqlExtension {
PG_PROCTAB = 'pg_proctab',
PG_STAT_STATEMENTS = 'pg_stat_statements',
}

View File

@@ -17,3 +17,6 @@ export { validateSlackNotifier } from './models/slack/validateSlackNotifier';
export type { DiscordNotifier } from './models/discord/DiscordNotifier';
export { validateDiscordNotifier } from './models/discord/validateDiscordNotifier';
export type { TeamsNotifier } from './models/teams/TeamsNotifier';
export { validateTeamsNotifier } from './models/teams/validateTeamsNotifier';

View File

@@ -2,6 +2,7 @@ import type { NotifierType } from './NotifierType';
import type { DiscordNotifier } from './discord/DiscordNotifier';
import type { EmailNotifier } from './email/EmailNotifier';
import type { SlackNotifier } from './slack/SlackNotifier';
import type { TeamsNotifier } from './teams/TeamsNotifier';
import type { TelegramNotifier } from './telegram/TelegramNotifier';
import type { WebhookNotifier } from './webhook/WebhookNotifier';
@@ -17,4 +18,5 @@ export interface Notifier {
webhookNotifier?: WebhookNotifier;
slackNotifier?: SlackNotifier;
discordNotifier?: DiscordNotifier;
teamsNotifier?: TeamsNotifier;
}

View File

@@ -4,4 +4,5 @@ export enum NotifierType {
WEBHOOK = 'WEBHOOK',
SLACK = 'SLACK',
DISCORD = 'DISCORD',
TEAMS = 'TEAMS',
}

View File

@@ -12,6 +12,8 @@ export const getNotifierLogoFromType = (type: NotifierType) => {
return '/icons/notifiers/slack.svg';
case NotifierType.DISCORD:
return '/icons/notifiers/discord.svg';
case NotifierType.TEAMS:
return '/icons/notifiers/teams.svg';
default:
return '';
}

View File

@@ -10,6 +10,10 @@ export const getNotifierNameFromType = (type: NotifierType) => {
return 'Webhook';
case NotifierType.SLACK:
return 'Slack';
case NotifierType.DISCORD:
return 'Discord';
case NotifierType.TEAMS:
return 'Teams';
default:
return '';
}

View File

@@ -0,0 +1,7 @@
export interface TeamsNotifier {
/** Power Automate HTTP endpoint:
* trigger = "When an HTTP request is received"
* e.g. https://prod-00.westeurope.logic.azure.com/workflows/...
*/
powerAutomateUrl: string;
}

View File

@@ -0,0 +1,16 @@
import type { TeamsNotifier } from './TeamsNotifier';
export const validateTeamsNotifier = (notifier: TeamsNotifier): boolean => {
if (!notifier?.powerAutomateUrl) {
return false;
}
try {
const u = new URL(notifier.powerAutomateUrl);
if (u.protocol !== 'http:' && u.protocol !== 'https:') return false;
} catch {
return false;
}
return true;
};

Some files were not shown because too many files have changed in this diff Show More