Compare commits

...

25 Commits

Author SHA1 Message Date
Rostislav Dugin
2e7cc1549a FIX (deploy): Add NAS testing to CI \ CD workflow 2025-07-23 17:44:32 +03:00
Rostislav Dugin
62ff3962a1 FEATURE (storages): Add NAS storage 2025-07-23 17:35:10 +03:00
Rostislav Dugin
34afe9a347 FIX (spelling): Fix healthcheck spelling and add website to readme 2025-07-22 11:15:34 +03:00
Rostislav Dugin
4eb7c7a902 FEATURE (contirbute): Update contribute readme [skip-release] 2025-07-22 11:04:33 +03:00
Rostislav Dugin
5f3c4f23d7 FIX (dependencies): Run extra dependencies via go mod tidy 2025-07-21 21:21:44 +03:00
Rostislav Dugin
ecb8212eab FEATURE (gin): Add griz compression for static files and API responses 2025-07-21 21:19:27 +03:00
Rostislav Dugin
0e178343a8 FIX (monitoring): Fix text of down and up messages to not be the same in heading and body 2025-07-21 20:59:20 +03:00
Rostislav Dugin
0acd205f43 FIX (restores): Fix order of temp files closing that causes flaky tests 2025-07-21 20:01:19 +03:00
Rostislav Dugin
d678f9b3a2 FEATURE (container): Move PostgreSQL into container 2025-07-21 19:36:42 +03:00
Rostislav Dugin
7859951653 Merge branch 'main' of https://github.com/RostislavDugin/postgresus 2025-07-21 14:59:12 +03:00
Rostislav Dugin
7472aa1e1f FIX (backups): Do not double close backup file 2025-07-21 14:58:34 +03:00
Rostislav Dugin
9283713eab Merge pull request #6 from RostislavDugin/feature/update_readme
FEATURE (readme): Move badges under the description [skip-release]
2025-07-21 14:47:39 +03:00
Rostislav Dugin
9a9c170ffc FEATURE (readme): Move badges under the description [skip-release] 2025-07-21 14:43:48 +03:00
Rostislav Dugin
d05efc3151 FIX (deployments): Remove Docker Hub description update 2025-07-21 14:13:50 +03:00
Rostislav Dugin
1ee41fb673 FEATURE (auth): Add rate limiting for sign in edpoint to not allow brute force 2025-07-21 14:00:31 +03:00
Rostislav Dugin
529f080ca5 FEATURE (readme): Add pretty labels to GitHub 2025-07-21 13:47:47 +03:00
Rostislav Dugin
df0f7e0e7a FIX (deployment): Fix caching modules 2025-07-21 13:24:53 +03:00
Rostislav Dugin
6418de87db FIX (deployments): Use binaries instead of symlinks on PostgreSQL download 2025-07-21 13:19:40 +03:00
Rostislav Dugin
230f66bb10 FIX (deployments): Use binaries instead of symlinks on PostgreSQL download 2025-07-21 13:16:26 +03:00
Rostislav Dugin
1cd10772ae FIX (deployment): Download PostgreSQL client tools 2025-07-21 13:10:18 +03:00
Rostislav Dugin
d56518b847 FIX (deploy): Fix migrations run on deploy 2025-07-21 13:00:59 +03:00
Rostislav Dugin
64195024c6 FIX (deploy): Update text of docker compose executable 2025-07-21 12:55:04 +03:00
Rostislav Dugin
200429dbab FEATURE (deploy): Run tests on each deployment 2025-07-21 12:48:06 +03:00
Rostislav Dugin
07ad7d9a2a BREAKING CHANGE: Bump verwion to 1.0 2025-07-21 10:28:05 +03:00
Rostislav Dugin
ffefe68ca4 FIX (docker hub): Fix updating description 2025-07-21 10:02:22 +03:00
53 changed files with 1512 additions and 521 deletions

445
.github/workflows/ci-release.yml vendored Normal file
View File

@@ -0,0 +1,445 @@
name: CI and Release
on:
push:
branches: [main]
pull_request:
branches: [main]
workflow_dispatch:
jobs:
lint-backend:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.23.3"
- name: Cache Go modules
uses: actions/cache@v4
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-go-${{ hashFiles('backend/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Install golangci-lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.60.3
echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
- name: Install swag for swagger generation
run: go install github.com/swaggo/swag/cmd/swag@v1.16.4
- name: Generate swagger docs
run: |
cd backend
swag init -d . -g cmd/main.go -o swagger
- name: Run golangci-lint
run: |
cd backend
golangci-lint run
- name: Verify go mod tidy
run: |
cd backend
go mod tidy
git diff --exit-code go.mod go.sum || (echo "go mod tidy made changes, please run 'go mod tidy' and commit the changes" && exit 1)
lint-frontend:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
cache: "npm"
cache-dependency-path: frontend/package-lock.json
- name: Install dependencies
run: |
cd frontend
npm ci
- name: Check if prettier was run
run: |
cd frontend
npm run format
git diff --exit-code || (echo "Prettier made changes, please run 'npm run format' and commit the changes" && exit 1)
- name: Check if linter was run
run: |
cd frontend
npm run lint
test-backend:
runs-on: ubuntu-latest
needs: [lint-backend]
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.23.3"
- name: Cache Go modules
uses: actions/cache@v4
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-go-${{ hashFiles('backend/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Create .env file for testing
run: |
cd backend
cat > .env << EOF
# docker-compose.yml
DEV_DB_NAME=postgresus
DEV_DB_USERNAME=postgres
DEV_DB_PASSWORD=Q1234567
#app
ENV_MODE=development
# db
DATABASE_DSN=host=localhost user=postgres password=Q1234567 dbname=postgresus port=5437 sslmode=disable
DATABASE_URL=postgres://postgres:Q1234567@localhost:5437/postgresus?sslmode=disable
# migrations
GOOSE_DRIVER=postgres
GOOSE_DBSTRING=postgres://postgres:Q1234567@localhost:5437/postgresus?sslmode=disable
GOOSE_MIGRATION_DIR=./migrations
# testing
# to get Google Drive env variables: add storage in UI and copy data from added storage here
TEST_GOOGLE_DRIVE_CLIENT_ID=${{ secrets.TEST_GOOGLE_DRIVE_CLIENT_ID }}
TEST_GOOGLE_DRIVE_CLIENT_SECRET=${{ secrets.TEST_GOOGLE_DRIVE_CLIENT_SECRET }}
TEST_GOOGLE_DRIVE_TOKEN_JSON=${{ secrets.TEST_GOOGLE_DRIVE_TOKEN_JSON }}
# testing DBs
TEST_POSTGRES_13_PORT=5001
TEST_POSTGRES_14_PORT=5002
TEST_POSTGRES_15_PORT=5003
TEST_POSTGRES_16_PORT=5004
TEST_POSTGRES_17_PORT=5005
# testing S3
TEST_MINIO_PORT=9000
TEST_MINIO_CONSOLE_PORT=9001
# testing NAS
TEST_NAS_PORT=5006
EOF
- name: Start test containers
run: |
cd backend
docker compose -f docker-compose.yml.example up -d
- name: Wait for containers to be ready
run: |
# Wait for main dev database
timeout 60 bash -c 'until docker exec dev-db pg_isready -h localhost -p 5437 -U postgres; do sleep 2; done'
# Wait for test databases
timeout 60 bash -c 'until nc -z localhost 5001; do sleep 2; done'
timeout 60 bash -c 'until nc -z localhost 5002; do sleep 2; done'
timeout 60 bash -c 'until nc -z localhost 5003; do sleep 2; done'
timeout 60 bash -c 'until nc -z localhost 5004; do sleep 2; done'
timeout 60 bash -c 'until nc -z localhost 5005; do sleep 2; done'
# Wait for MinIO
timeout 60 bash -c 'until nc -z localhost 9000; do sleep 2; done'
- name: Install PostgreSQL client tools
run: |
chmod +x backend/tools/download_linux.sh
cd backend/tools
./download_linux.sh
- name: Run database migrations
run: |
cd backend
go install github.com/pressly/goose/v3/cmd/goose@latest
goose up
- name: Run Go tests
run: |
cd backend
go test ./internal/...
- name: Stop test containers
if: always()
run: |
cd backend
docker compose -f docker-compose.yml.example down -v
determine-version:
runs-on: ubuntu-latest
needs: [test-backend, lint-frontend]
if: ${{ github.ref == 'refs/heads/main' && !contains(github.event.head_commit.message, '[skip-release]') }}
outputs:
should_release: ${{ steps.version_bump.outputs.should_release }}
new_version: ${{ steps.version_bump.outputs.new_version }}
bump_type: ${{ steps.version_bump.outputs.bump_type }}
steps:
- name: Check out code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Install semver
run: npm install -g semver
- name: Get current version
id: current_version
run: |
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
echo "current_version=${LATEST_TAG#v}" >> $GITHUB_OUTPUT
echo "Current version: ${LATEST_TAG#v}"
- name: Analyze commits and determine version bump
id: version_bump
run: |
CURRENT_VERSION="${{ steps.current_version.outputs.current_version }}"
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
# Get commits since last tag
if [ "$LATEST_TAG" = "v0.0.0" ]; then
COMMITS=$(git log --pretty=format:"%s" --no-merges)
else
COMMITS=$(git log ${LATEST_TAG}..HEAD --pretty=format:"%s" --no-merges)
fi
echo "Analyzing commits:"
echo "$COMMITS"
# Initialize flags
HAS_FEATURE=false
HAS_FIX=false
HAS_BREAKING=false
# Analyze each commit
while IFS= read -r commit; do
if [[ "$commit" =~ ^FEATURE ]]; then
HAS_FEATURE=true
echo "Found FEATURE commit: $commit"
elif [[ "$commit" =~ ^FIX ]]; then
HAS_FIX=true
echo "Found FIX commit: $commit"
elif [[ "$commit" =~ ^REFACTOR ]]; then
HAS_FIX=true # Treat refactor as patch
echo "Found REFACTOR commit: $commit"
fi
# Check for breaking changes
if [[ "$commit" =~ BREAKING[[:space:]]CHANGE ]] || [[ "$commit" =~ "!" ]]; then
HAS_BREAKING=true
echo "Found BREAKING CHANGE: $commit"
fi
done <<< "$COMMITS"
# Determine version bump
if [ "$HAS_BREAKING" = true ]; then
BUMP_TYPE="major"
elif [ "$HAS_FEATURE" = true ]; then
BUMP_TYPE="minor"
elif [ "$HAS_FIX" = true ]; then
BUMP_TYPE="patch"
else
BUMP_TYPE="none"
fi
echo "bump_type=$BUMP_TYPE" >> $GITHUB_OUTPUT
if [ "$BUMP_TYPE" != "none" ]; then
NEW_VERSION=$(npx semver -i $BUMP_TYPE $CURRENT_VERSION)
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
echo "should_release=true" >> $GITHUB_OUTPUT
echo "New version will be: $NEW_VERSION"
else
echo "should_release=false" >> $GITHUB_OUTPUT
echo "No version bump needed"
fi
build-only:
runs-on: ubuntu-latest
needs: [test-backend, lint-frontend]
if: ${{ github.ref == 'refs/heads/main' && contains(github.event.head_commit.message, '[skip-release]') }}
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up QEMU (enables multi-arch emulation)
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push SHA-only tags
uses: docker/build-push-action@v5
with:
context: .
push: true
platforms: linux/amd64,linux/arm64
tags: |
rostislavdugin/postgresus:latest
rostislavdugin/postgresus:${{ github.sha }}
build-and-push:
runs-on: ubuntu-latest
needs: [determine-version]
if: ${{ needs.determine-version.outputs.should_release == 'true' }}
permissions:
contents: write
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up QEMU (enables multi-arch emulation)
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push with version tags
uses: docker/build-push-action@v5
with:
context: .
push: true
platforms: linux/amd64,linux/arm64
tags: |
rostislavdugin/postgresus:latest
rostislavdugin/postgresus:v${{ needs.determine-version.outputs.new_version }}
rostislavdugin/postgresus:${{ github.sha }}
release:
runs-on: ubuntu-latest
needs: [determine-version, build-and-push]
if: ${{ needs.determine-version.outputs.should_release == 'true' }}
permissions:
contents: write
pull-requests: write
steps:
- name: Check out code
uses: actions/checkout@v4
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Generate changelog
id: changelog
run: |
NEW_VERSION="${{ needs.determine-version.outputs.new_version }}"
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
# Get commits since last tag
if [ "$LATEST_TAG" = "v0.0.0" ]; then
COMMITS=$(git log --pretty=format:"%s|%H|%an|%ad" --date=short --no-merges)
else
COMMITS=$(git log ${LATEST_TAG}..HEAD --pretty=format:"%s|%H|%an|%ad" --date=short --no-merges)
fi
# Create changelog
CHANGELOG="# Changelog\n\n## [${NEW_VERSION}] - $(date +%Y-%m-%d)\n\n"
# Group commits by type and area
FEATURES=""
FIXES=""
REFACTORS=""
while IFS= read -r line; do
if [ -n "$line" ]; then
COMMIT_MSG=$(echo "$line" | cut -d'|' -f1)
COMMIT_HASH=$(echo "$line" | cut -d'|' -f2)
SHORT_HASH=${COMMIT_HASH:0:7}
# Parse commit message format: TYPE (area): description
if [[ "$COMMIT_MSG" == FEATURE* ]]; then
TEMP="${COMMIT_MSG#FEATURE}"
TEMP="${TEMP#"${TEMP%%[![:space:]]*}"}"
if [[ "$TEMP" == \(* ]]; then
AREA=$(echo "$TEMP" | sed 's/^(\([^)]*\)).*/\1/')
DESC=$(echo "$TEMP" | sed 's/^([^)]*):[[:space:]]*//')
FEATURES="${FEATURES}- **${AREA}**: ${DESC} ([${SHORT_HASH}](https://github.com/${{ github.repository }}/commit/${COMMIT_HASH}))\n"
fi
elif [[ "$COMMIT_MSG" == FIX* ]]; then
TEMP="${COMMIT_MSG#FIX}"
TEMP="${TEMP#"${TEMP%%[![:space:]]*}"}"
if [[ "$TEMP" == \(* ]]; then
AREA=$(echo "$TEMP" | sed 's/^(\([^)]*\)).*/\1/')
DESC=$(echo "$TEMP" | sed 's/^([^)]*):[[:space:]]*//')
FIXES="${FIXES}- **${AREA}**: ${DESC} ([${SHORT_HASH}](https://github.com/${{ github.repository }}/commit/${COMMIT_HASH}))\n"
fi
elif [[ "$COMMIT_MSG" == REFACTOR* ]]; then
TEMP="${COMMIT_MSG#REFACTOR}"
TEMP="${TEMP#"${TEMP%%[![:space:]]*}"}"
if [[ "$TEMP" == \(* ]]; then
AREA=$(echo "$TEMP" | sed 's/^(\([^)]*\)).*/\1/')
DESC=$(echo "$TEMP" | sed 's/^([^)]*):[[:space:]]*//')
REFACTORS="${REFACTORS}- **${AREA}**: ${DESC} ([${SHORT_HASH}](https://github.com/${{ github.repository }}/commit/${COMMIT_HASH}))\n"
fi
fi
fi
done <<< "$COMMITS"
# Build changelog sections
if [ -n "$FEATURES" ]; then
CHANGELOG="${CHANGELOG}### ✨ Features\n${FEATURES}\n"
fi
if [ -n "$FIXES" ]; then
CHANGELOG="${CHANGELOG}### 🐛 Bug Fixes\n${FIXES}\n"
fi
if [ -n "$REFACTORS" ]; then
CHANGELOG="${CHANGELOG}### 🔨 Refactoring\n${REFACTORS}\n"
fi
# Add Docker image info
CHANGELOG="${CHANGELOG}### 🐳 Docker\n"
CHANGELOG="${CHANGELOG}- **Image**: \`rostislavdugin/postgresus:v${NEW_VERSION}\`\n"
CHANGELOG="${CHANGELOG}- **Platforms**: linux/amd64, linux/arm64\n\n"
# Set output for GitHub release
{
echo 'changelog<<EOF'
echo -e "$CHANGELOG"
echo EOF
} >> $GITHUB_OUTPUT
- name: Create GitHub Release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: v${{ needs.determine-version.outputs.new_version }}
release_name: Release v${{ needs.determine-version.outputs.new_version }}
body: ${{ steps.changelog.outputs.changelog }}
draft: false
prerelease: false

View File

@@ -1,120 +0,0 @@
name: Build & push Docker image
on:
push:
branches: [main]
workflow_dispatch: {}
jobs:
lint-backend:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.23.3"
- name: Cache Go modules
uses: actions/cache@v4
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Install golangci-lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.60.3
echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
- name: Install swag for swagger generation
run: go install github.com/swaggo/swag/cmd/swag@v1.16.4
- name: Generate swagger docs
run: |
cd backend
swag init -d . -g cmd/main.go -o swagger
- name: Run golangci-lint
run: |
cd backend
golangci-lint run
- name: Verify go mod tidy
run: |
cd backend
go mod tidy
git diff --exit-code go.mod go.sum || (echo "go mod tidy made changes, please run 'go mod tidy' and commit the changes" && exit 1)
lint-frontend:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
cache: "npm"
cache-dependency-path: frontend/package-lock.json
- name: Install dependencies
run: |
cd frontend
npm ci
- name: Check if prettier was run
run: |
cd frontend
npm run format
git diff --exit-code || (echo "Prettier made changes, please run 'npm run format' and commit the changes" && exit 1)
- name: Check if linter was run
run: |
cd frontend
npm run lint
build-and-push:
runs-on: ubuntu-latest
needs: [lint-backend, lint-frontend]
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up QEMU (enables multi-arch emulation)
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
push: true
platforms: linux/amd64,linux/arm64 # both chip families
tags: |
rostislavdugin/postgresus:latest
rostislavdugin/postgresus:${{ github.sha }}
- name: Update Docker Hub description
uses: peter-evans/dockerhub-description@v4
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: rostislavdugin/postgresus
short-description: "PostgreSQL monitoring and backups solution"
readme-filepath: ./README.md

View File

@@ -1,235 +0,0 @@
name: Automated Release
on:
push:
branches: [main]
workflow_dispatch:
jobs:
release:
runs-on: ubuntu-latest
if: ${{ !contains(github.event.head_commit.message, '[skip-release]') }}
permissions:
contents: write
pull-requests: write
steps:
- name: Check out code
uses: actions/checkout@v4
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Install dependencies
run: |
npm install -g conventional-changelog-cli
npm install -g semver
- name: Get current version
id: current_version
run: |
# Get the latest tag, default to 0.0.0 if no tags exist
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
echo "current_version=${LATEST_TAG#v}" >> $GITHUB_OUTPUT
echo "Current version: ${LATEST_TAG#v}"
- name: Analyze commits and determine version bump
id: version_bump
run: |
CURRENT_VERSION="${{ steps.current_version.outputs.current_version }}"
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
# Get commits since last tag
if [ "$LATEST_TAG" = "v0.0.0" ]; then
COMMITS=$(git log --pretty=format:"%s" --no-merges)
else
COMMITS=$(git log ${LATEST_TAG}..HEAD --pretty=format:"%s" --no-merges)
fi
echo "Analyzing commits:"
echo "$COMMITS"
# Initialize flags
HAS_FEATURE=false
HAS_FIX=false
HAS_BREAKING=false
# Analyze each commit
while IFS= read -r commit; do
if [[ "$commit" =~ ^FEATURE ]]; then
HAS_FEATURE=true
echo "Found FEATURE commit: $commit"
elif [[ "$commit" =~ ^FIX ]]; then
HAS_FIX=true
echo "Found FIX commit: $commit"
elif [[ "$commit" =~ ^REFACTOR ]]; then
HAS_FIX=true # Treat refactor as patch
echo "Found REFACTOR commit: $commit"
fi
# Check for breaking changes
if [[ "$commit" =~ BREAKING[[:space:]]CHANGE ]] || [[ "$commit" =~ "!" ]]; then
HAS_BREAKING=true
echo "Found BREAKING CHANGE: $commit"
fi
done <<< "$COMMITS"
# Determine version bump
if [ "$HAS_BREAKING" = true ]; then
BUMP_TYPE="major"
elif [ "$HAS_FEATURE" = true ]; then
BUMP_TYPE="minor"
elif [ "$HAS_FIX" = true ]; then
BUMP_TYPE="patch"
else
BUMP_TYPE="none"
fi
echo "bump_type=$BUMP_TYPE" >> $GITHUB_OUTPUT
if [ "$BUMP_TYPE" != "none" ]; then
NEW_VERSION=$(npx semver -i $BUMP_TYPE $CURRENT_VERSION)
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
echo "should_release=true" >> $GITHUB_OUTPUT
echo "New version will be: $NEW_VERSION"
else
echo "should_release=false" >> $GITHUB_OUTPUT
echo "No version bump needed"
fi
- name: Generate changelog
id: changelog
if: steps.version_bump.outputs.should_release == 'true'
run: |
CURRENT_VERSION="${{ steps.current_version.outputs.current_version }}"
NEW_VERSION="${{ steps.version_bump.outputs.new_version }}"
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
# Get commits since last tag
if [ "$LATEST_TAG" = "v0.0.0" ]; then
COMMITS=$(git log --pretty=format:"%s|%H|%an|%ad" --date=short --no-merges)
else
COMMITS=$(git log ${LATEST_TAG}..HEAD --pretty=format:"%s|%H|%an|%ad" --date=short --no-merges)
fi
# Create changelog
CHANGELOG="# Changelog\n\n## [${NEW_VERSION}] - $(date +%Y-%m-%d)\n\n"
# Group commits by type and area
FEATURES=""
FIXES=""
REFACTORS=""
while IFS= read -r line; do
if [ -n "$line" ]; then
COMMIT_MSG=$(echo "$line" | cut -d'|' -f1)
COMMIT_HASH=$(echo "$line" | cut -d'|' -f2)
COMMIT_AUTHOR=$(echo "$line" | cut -d'|' -f3)
COMMIT_DATE=$(echo "$line" | cut -d'|' -f4)
SHORT_HASH=${COMMIT_HASH:0:7}
# Parse commit message format: TYPE (area): description
if [[ "$COMMIT_MSG" == FEATURE* ]]; then
# Extract area and description
TEMP="${COMMIT_MSG#FEATURE}"
TEMP="${TEMP#"${TEMP%%[![:space:]]*}"}" # trim leading spaces
if [[ "$TEMP" == \(* ]]; then
AREA=$(echo "$TEMP" | sed 's/^(\([^)]*\)).*/\1/')
DESC=$(echo "$TEMP" | sed 's/^([^)]*):[[:space:]]*//')
FEATURES="${FEATURES}- **${AREA}**: ${DESC} ([${SHORT_HASH}](https://github.com/${{ github.repository }}/commit/${COMMIT_HASH}))\n"
fi
elif [[ "$COMMIT_MSG" == FIX* ]]; then
# Extract area and description
TEMP="${COMMIT_MSG#FIX}"
TEMP="${TEMP#"${TEMP%%[![:space:]]*}"}" # trim leading spaces
if [[ "$TEMP" == \(* ]]; then
AREA=$(echo "$TEMP" | sed 's/^(\([^)]*\)).*/\1/')
DESC=$(echo "$TEMP" | sed 's/^([^)]*):[[:space:]]*//')
FIXES="${FIXES}- **${AREA}**: ${DESC} ([${SHORT_HASH}](https://github.com/${{ github.repository }}/commit/${COMMIT_HASH}))\n"
fi
elif [[ "$COMMIT_MSG" == REFACTOR* ]]; then
# Extract area and description
TEMP="${COMMIT_MSG#REFACTOR}"
TEMP="${TEMP#"${TEMP%%[![:space:]]*}"}" # trim leading spaces
if [[ "$TEMP" == \(* ]]; then
AREA=$(echo "$TEMP" | sed 's/^(\([^)]*\)).*/\1/')
DESC=$(echo "$TEMP" | sed 's/^([^)]*):[[:space:]]*//')
REFACTORS="${REFACTORS}- **${AREA}**: ${DESC} ([${SHORT_HASH}](https://github.com/${{ github.repository }}/commit/${COMMIT_HASH}))\n"
fi
fi
fi
done <<< "$COMMITS"
# Build changelog sections
if [ -n "$FEATURES" ]; then
CHANGELOG="${CHANGELOG}### ✨ Features\n${FEATURES}\n"
fi
if [ -n "$FIXES" ]; then
CHANGELOG="${CHANGELOG}### 🐛 Bug Fixes\n${FIXES}\n"
fi
if [ -n "$REFACTORS" ]; then
CHANGELOG="${CHANGELOG}### 🔨 Refactoring\n${REFACTORS}\n"
fi
# Save changelog to file
echo -e "$CHANGELOG" > RELEASE_CHANGELOG.md
# Update main CHANGELOG.md - preserve all version history
if [ -f "CHANGELOG.md" ]; then
# Get the header until [Unreleased] section
sed -n '1,/## \[Unreleased\]/p' CHANGELOG.md > NEW_CHANGELOG.md
echo "" >> NEW_CHANGELOG.md
# Add the new release (without the "# Changelog" header)
echo "## [${NEW_VERSION}] - $(date +%Y-%m-%d)" >> NEW_CHANGELOG.md
echo "" >> NEW_CHANGELOG.md
# Add the new release sections
if [ -n "$FEATURES" ]; then
echo "### ✨ Features" >> NEW_CHANGELOG.md
echo -e "$FEATURES" >> NEW_CHANGELOG.md
fi
if [ -n "$FIXES" ]; then
echo "### 🐛 Bug Fixes" >> NEW_CHANGELOG.md
echo -e "$FIXES" >> NEW_CHANGELOG.md
fi
if [ -n "$REFACTORS" ]; then
echo "### 🔨 Refactoring" >> NEW_CHANGELOG.md
echo -e "$REFACTORS" >> NEW_CHANGELOG.md
fi
# Get existing releases (everything after first ## [version] pattern)
sed -n '/## \[[0-9]/,$p' CHANGELOG.md >> NEW_CHANGELOG.md
# Replace the original file
mv NEW_CHANGELOG.md CHANGELOG.md
else
echo -e "$CHANGELOG" > CHANGELOG.md
fi
# Set output for GitHub release (escape newlines)
{
echo 'changelog<<EOF'
echo -e "$CHANGELOG"
echo EOF
} >> $GITHUB_OUTPUT
- name: Create GitHub Release
if: steps.version_bump.outputs.should_release == 'true'
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: v${{ steps.version_bump.outputs.new_version }}
release_name: Release v${{ steps.version_bump.outputs.new_version }}
body: ${{ steps.changelog.outputs.changelog }}
draft: false
prerelease: false

View File

@@ -1,10 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
<!-- This file is automatically updated by the release workflow -->

View File

@@ -53,18 +53,23 @@ RUN CGO_ENABLED=0 \
# ========= RUNTIME =========
FROM --platform=$TARGETPLATFORM debian:bookworm-slim
# Install PostgreSQL client tools (versions 13-17)
# Install PostgreSQL server and client tools (versions 13-17)
RUN apt-get update && apt-get install -y --no-install-recommends \
wget ca-certificates gnupg lsb-release && \
wget ca-certificates gnupg lsb-release sudo gosu && \
wget -qO- https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - && \
echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" \
> /etc/apt/sources.list.d/pgdg.list && \
apt-get update && \
apt-get install -y --no-install-recommends \
postgresql-client-13 postgresql-client-14 postgresql-client-15 \
postgresql-17 postgresql-client-13 postgresql-client-14 postgresql-client-15 \
postgresql-client-16 postgresql-client-17 && \
rm -rf /var/lib/apt/lists/*
# Create postgres user and set up directories
RUN useradd -m -s /bin/bash postgres || true && \
mkdir -p /postgresus-data/pgdata && \
chown -R postgres:postgres /postgresus-data/pgdata
WORKDIR /app
# Copy Goose from build stage
@@ -87,7 +92,71 @@ RUN if [ ! -f /app/.env ]; then \
fi; \
fi
# Create startup script
COPY <<EOF /app/start.sh
#!/bin/bash
set -e
# PostgreSQL 17 binary paths
PG_BIN="/usr/lib/postgresql/17/bin"
# Ensure proper ownership of data directory
echo "Setting up data directory permissions..."
mkdir -p /postgresus-data/pgdata
chown -R postgres:postgres /postgresus-data
# Initialize PostgreSQL if not already initialized
if [ ! -s "/postgresus-data/pgdata/PG_VERSION" ]; then
echo "Initializing PostgreSQL database..."
gosu postgres \$PG_BIN/initdb -D /postgresus-data/pgdata --encoding=UTF8 --locale=C.UTF-8
# Configure PostgreSQL
echo "host all all 127.0.0.1/32 md5" >> /postgresus-data/pgdata/pg_hba.conf
echo "local all all trust" >> /postgresus-data/pgdata/pg_hba.conf
echo "port = 5437" >> /postgresus-data/pgdata/postgresql.conf
echo "listen_addresses = 'localhost'" >> /postgresus-data/pgdata/postgresql.conf
echo "shared_buffers = 256MB" >> /postgresus-data/pgdata/postgresql.conf
echo "max_connections = 100" >> /postgresus-data/pgdata/postgresql.conf
fi
# Start PostgreSQL in background
echo "Starting PostgreSQL..."
gosu postgres \$PG_BIN/postgres -D /postgresus-data/pgdata -p 5437 &
POSTGRES_PID=\$!
# Wait for PostgreSQL to be ready
echo "Waiting for PostgreSQL to be ready..."
for i in {1..30}; do
if gosu postgres \$PG_BIN/pg_isready -p 5437 -h localhost >/dev/null 2>&1; then
echo "PostgreSQL is ready!"
break
fi
if [ \$i -eq 30 ]; then
echo "PostgreSQL failed to start"
exit 1
fi
sleep 1
done
# Create database and set password for postgres user
echo "Setting up database and user..."
gosu postgres \$PG_BIN/psql -p 5437 -h localhost -d postgres << 'SQL'
ALTER USER postgres WITH PASSWORD 'Q1234567';
CREATE DATABASE "postgresus" OWNER postgres;
\q
SQL
# Start the main application
echo "Starting Postgresus application..."
exec ./main
EOF
RUN chmod +x /app/start.sh
EXPOSE 4005
ENTRYPOINT ["./main"]
# Volume for PostgreSQL data
VOLUME ["/postgresus-data"]
ENTRYPOINT ["/app/start.sh"]
CMD []

View File

@@ -1,9 +1,18 @@
<div align="center">
<img src="assets/logo.svg" alt="Postgresus Logo" width="250"/>
<img src="assets/logo.svg" style="margin-bottom: 20px;" alt="Postgresus Logo" width="250"/>
<h3>PostgreSQL monitoring and backup</h3>
<p>Free, open source and self-hosted solution for automated PostgreSQL monitoring and backups. With multiple storage options and notifications</p>
<!-- Badges -->
[![MIT License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)
[![Docker Pulls](https://img.shields.io/docker/pulls/rostislavdugin/postgresus?color=brightgreen)](https://hub.docker.com/r/rostislavdugin/postgresus)
[![Platform](https://img.shields.io/badge/platform-linux%20%7C%20macos%20%7C%20windows-lightgrey)](https://github.com/RostislavDugin/postgresus)
[![PostgreSQL](https://img.shields.io/badge/PostgreSQL-13%20%7C%2014%20%7C%2015%20%7C%2016%20%7C%2017-336791?logo=postgresql&logoColor=white)](https://www.postgresql.org/)
[![Self Hosted](https://img.shields.io/badge/self--hosted-yes-brightgreen)](https://github.com/RostislavDugin/postgresus)
[![Open Source](https://img.shields.io/badge/open%20source-❤️-red)](https://github.com/RostislavDugin/postgresus)
<p>
<a href="#-features">Features</a> •
<a href="#-installation">Installation</a> •
@@ -11,8 +20,14 @@
<a href="#-license">License</a> •
<a href="#-contributing">Contributing</a>
</p>
<p style="margin-top: 20px; margin-bottom: 20px; font-size: 1.2em;">
<a href="https://postgresus.com" target="_blank"><strong>🌐 Postgresus website</strong></a>
</p>
<img src="assets/dashboard.svg" alt="Postgresus Dashboard" width="800"/>
</div>
---
@@ -55,21 +70,29 @@
- **Historical data**: View trends and patterns over time
- **Alert system**: Get notified when issues are detected
### 📦 Installation
You have three ways to install Postgresus:
- Script (recommended)
- Simple Docker run
- Docker Compose setup
<img src="assets/healthchecks.svg" alt="Postgresus Dashboard" width="800"/>
---
## 📦 Installation
You have two ways to install Postgresus: via automated script (recommended) or manual Docker Compose setup.
You have three ways to install Postgresus: automated script (recommended), simple Docker run, or Docker Compose setup.
### Option 1: Automated Installation Script (Recommended, Linux only)
The installation script will:
- ✅ Install Docker with Docker Compose (if not already installed)
-Create optimized `docker-compose.yml` configuration
-Set up automatic startup on system reboot via cron
- ✅ Install Docker with Docker Compose(if not already installed)
-Set up Postgresus
-Configure automatic startup on system reboot
```bash
sudo apt-get install -y curl && \
@@ -77,7 +100,26 @@ sudo curl -sSL https://raw.githubusercontent.com/RostislavDugin/postgresus/refs/
| sudo bash
```
### Option 2: Manual Docker Compose Setup
### Option 2: Simple Docker Run
The easiest way to run Postgresus with embedded PostgreSQL:
```bash
docker run -d \
--name postgresus \
-p 4005:4005 \
-v ./postgresus-data:/postgresus-data \
--restart unless-stopped \
rostislavdugin/postgresus:latest
```
This single command will:
- ✅ Start Postgresus
- ✅ Store all data in `./postgresus-data` directory
- ✅ Automatically restart on system reboot
### Option 3: Docker Compose Setup
Create a `docker-compose.yml` file with the following configuration:
@@ -92,29 +134,6 @@ services:
- "4005:4005"
volumes:
- ./postgresus-data:/postgresus-data
depends_on:
postgresus-db:
condition: service_healthy
restart: unless-stopped
postgresus-db:
container_name: postgresus-db
image: postgres:17
# we use default values, but do not expose
# PostgreSQL ports so it is safe
environment:
- POSTGRES_DB=postgresus
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=Q1234567
volumes:
- ./pgdata:/var/lib/postgresql/data
command: -p 5437
shm_size: 10gb
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres -d postgresus -p 5437"]
interval: 5s
timeout: 5s
retries: 5
restart: unless-stopped
```

View File

@@ -24,4 +24,6 @@ TEST_POSTGRES_16_PORT=5004
TEST_POSTGRES_17_PORT=5005
# testing S3
TEST_MINIO_PORT=9000
TEST_MINIO_CONSOLE_PORT=9001
TEST_MINIO_CONSOLE_PORT=9001
# testing NAS
TEST_NAS_PORT=5006

View File

@@ -5,9 +5,9 @@ DEV_DB_PASSWORD=Q1234567
#app
ENV_MODE=production
# db
DATABASE_DSN=host=postgresus-db user=postgres password=Q1234567 dbname=postgresus port=5437 sslmode=disable
DATABASE_URL=postgres://postgres:Q1234567@postgresus-db:5437/postgresus?sslmode=disable
DATABASE_DSN=host=localhost user=postgres password=Q1234567 dbname=postgresus port=5437 sslmode=disable
DATABASE_URL=postgres://postgres:Q1234567@localhost:5437/postgresus?sslmode=disable
# migrations
GOOSE_DRIVER=postgres
GOOSE_DBSTRING=postgres://postgres:Q1234567@postgresus-db:5437/postgresus?sslmode=disable
GOOSE_DBSTRING=postgres://postgres:Q1234567@localhost:5437/postgresus?sslmode=disable
GOOSE_MIGRATION_DIR=./migrations

View File

@@ -1,15 +0,0 @@
repos:
- repo: local
hooks:
- id: golangci-lint-fmt
name: Format Go Code using golangci-lint fmt
entry: golangci-lint fmt ./...
language: system
types: [go]
- id: golangci-lint-run
name: Run golangci-lint for static analysis
entry: golangci-lint run
language: system
types: [go]
pass_filenames: false

View File

@@ -31,6 +31,7 @@ import (
_ "postgresus-backend/swagger" // swagger docs
"github.com/gin-contrib/cors"
"github.com/gin-contrib/gzip"
"github.com/gin-gonic/gin"
swaggerFiles "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
@@ -61,6 +62,15 @@ func main() {
gin.SetMode(gin.ReleaseMode)
ginApp := gin.Default()
// Add GZIP compression middleware
ginApp.Use(gzip.Gzip(
gzip.DefaultCompression,
// Don't compress already compressed files
gzip.WithExcludedExtensions(
[]string{".png", ".gif", ".jpeg", ".jpg", ".ico", ".svg", ".pdf", ".mp4"},
),
))
enableCors(ginApp)
setUpRoutes(ginApp)
setUpDependencies()

View File

@@ -86,3 +86,19 @@ services:
- POSTGRES_PASSWORD=testpassword
container_name: test-postgres-17
shm_size: 1gb
# Test NAS server (Samba)
test-nas:
image: dperson/samba:latest
ports:
- "${TEST_NAS_PORT:-445}:445"
environment:
- USERID=1000
- GROUPID=1000
volumes:
- ./temp/nas:/shared
command: >
-u "testuser;testpassword"
-s "backups;/shared;yes;no;no;testuser"
-p
container_name: test-nas

View File

@@ -4,6 +4,7 @@ go 1.23.3
require (
github.com/gin-contrib/cors v1.7.5
github.com/gin-contrib/gzip v1.2.3
github.com/gin-gonic/gin v1.10.0
github.com/golang-jwt/jwt/v4 v4.5.2
github.com/google/uuid v1.6.0
@@ -19,6 +20,7 @@ require (
github.com/swaggo/gin-swagger v1.6.0
github.com/swaggo/swag v1.16.4
golang.org/x/crypto v0.39.0
golang.org/x/time v0.12.0
gorm.io/driver/postgres v1.5.11
gorm.io/gorm v1.26.1
)
@@ -27,9 +29,11 @@ require (
cloud.google.com/go/auth v0.16.2 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.7.0 // indirect
github.com/geoffgarside/ber v1.1.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
github.com/hirochachacha/go-smb2 v1.1.0
google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/grpc v1.73.0 // indirect

View File

@@ -35,10 +35,12 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
github.com/geoffgarside/ber v1.1.0 h1:qTmFG4jJbwiSzSXoNJeHcOprVzZ8Ulde2Rrrifu5U9w=
github.com/geoffgarside/ber v1.1.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc=
github.com/gin-contrib/cors v1.7.5 h1:cXC9SmofOrRg0w9PigwGlHG3ztswH6bqq4vJVXnvYMk=
github.com/gin-contrib/cors v1.7.5/go.mod h1:4q3yi7xBEDDWKapjT2o1V7mScKDDr8k+jZ0fSquGoy0=
github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4=
github.com/gin-contrib/gzip v0.0.6/go.mod h1:QOJlmV2xmayAjkNS2Y8NQsMneuRShOU/kjovCXNuzzk=
github.com/gin-contrib/gzip v1.2.3 h1:dAhT722RuEG330ce2agAs75z7yB+NKvX/ZM1r8w0u2U=
github.com/gin-contrib/gzip v1.2.3/go.mod h1:ad72i4Bzmaypk8M762gNXa2wkxxjbz0icRNnuLJ9a/c=
github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w=
github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM=
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
@@ -91,6 +93,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU
github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=
github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
github.com/ilyakaznacheev/cleanenv v1.5.0 h1:0VNZXggJE2OYdXE87bfSSwGxeiGt9moSR2lOrsHHvr4=
github.com/ilyakaznacheev/cleanenv v1.5.0/go.mod h1:a5aDzaJrLCQZsazHol1w8InnDcOX0OColm64SlIi6gk=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
@@ -210,12 +214,14 @@ go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2
golang.org/x/arch v0.17.0 h1:4O3dfLzd+lQewptAHqjewQZQDyEdejz3VwgeYwkZneU=
golang.org/x/arch v0.17.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
@@ -230,6 +236,7 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -252,6 +259,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=

View File

@@ -41,6 +41,8 @@ type EnvVariables struct {
TestMinioPort string `env:"TEST_MINIO_PORT"`
TestMinioConsolePort string `env:"TEST_MINIO_CONSOLE_PORT"`
TestNASPort string `env:"TEST_NAS_PORT"`
}
var (
@@ -161,6 +163,11 @@ func loadEnvVariables() {
log.Error("TEST_MINIO_CONSOLE_PORT is empty")
os.Exit(1)
}
if env.TestNASPort == "" {
log.Error("TEST_NAS_PORT is empty")
os.Exit(1)
}
}
log.Info("Environment variables loaded successfully!")

View File

@@ -255,10 +255,10 @@ func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
copyResultCh <- err
}()
// Wait for the dump and copy to finish
waitErr := cmd.Wait()
// Wait for the copy to finish first, then the dump process
copyErr := <-copyResultCh
bytesWritten := <-bytesWrittenCh
waitErr := cmd.Wait()
// Check for shutdown before finalizing
if config.IsShouldShutdown() {

View File

@@ -224,7 +224,7 @@ func (uc *CheckPgHealthUseCase) sendDbStatusNotification(
messageBody := ""
if newHealthStatus == databases.HealthStatusAvailable {
messageTitle = fmt.Sprintf("✅ [%s] DB is back online", database.Name)
messageTitle = fmt.Sprintf("✅ [%s] DB is online", database.Name)
messageBody = fmt.Sprintf("✅ [%s] DB is back online", database.Name)
} else {
messageTitle = fmt.Sprintf("❌ [%s] DB is unavailable", database.Name)

View File

@@ -303,7 +303,7 @@ func Test_CheckPgHealthUseCase(t *testing.T) {
t,
"SendNotification",
mock.Anything,
fmt.Sprintf("✅ [%s] DB is back online", database.Name),
fmt.Sprintf("✅ [%s] DB is online", database.Name),
fmt.Sprintf("✅ [%s] DB is back online", database.Name),
)
})

View File

@@ -222,11 +222,8 @@ func (uc *RestorePostgresqlBackupUsecase) downloadBackupToTempFile(
return "", nil, fmt.Errorf("failed to write backup to temporary file: %w", err)
}
// Close the temp file to ensure all data is written
if err := tempFile.Close(); err != nil {
cleanupFunc()
return "", nil, fmt.Errorf("failed to close temporary backup file: %w", err)
}
// Close the temp file to ensure all data is written - this is handled by defer
// Removing explicit close to avoid double-close error
uc.logger.Info("Backup file written to temporary location", "tempFile", tempBackupFile)
return tempBackupFile, cleanupFunc, nil

View File

@@ -6,4 +6,5 @@ const (
StorageTypeLocal StorageType = "LOCAL"
StorageTypeS3 StorageType = "S3"
StorageTypeGoogleDrive StorageType = "GOOGLE_DRIVE"
StorageTypeNAS StorageType = "NAS"
)

View File

@@ -6,6 +6,7 @@ import (
"log/slog"
google_drive_storage "postgresus-backend/internal/features/storages/models/google_drive"
local_storage "postgresus-backend/internal/features/storages/models/local"
nas_storage "postgresus-backend/internal/features/storages/models/nas"
s3_storage "postgresus-backend/internal/features/storages/models/s3"
"github.com/google/uuid"
@@ -22,6 +23,7 @@ type Storage struct {
LocalStorage *local_storage.LocalStorage `json:"localStorage" gorm:"foreignKey:StorageID"`
S3Storage *s3_storage.S3Storage `json:"s3Storage" gorm:"foreignKey:StorageID"`
GoogleDriveStorage *google_drive_storage.GoogleDriveStorage `json:"googleDriveStorage" gorm:"foreignKey:StorageID"`
NASStorage *nas_storage.NASStorage `json:"nasStorage" gorm:"foreignKey:StorageID"`
}
func (s *Storage) SaveFile(logger *slog.Logger, fileID uuid.UUID, file io.Reader) error {
@@ -69,6 +71,8 @@ func (s *Storage) getSpecificStorage() StorageFileSaver {
return s.S3Storage
case StorageTypeGoogleDrive:
return s.GoogleDriveStorage
case StorageTypeNAS:
return s.NASStorage
default:
panic("invalid storage type: " + string(s.Type))
}

View File

@@ -10,8 +10,10 @@ import (
"postgresus-backend/internal/config"
google_drive_storage "postgresus-backend/internal/features/storages/models/google_drive"
local_storage "postgresus-backend/internal/features/storages/models/local"
nas_storage "postgresus-backend/internal/features/storages/models/nas"
s3_storage "postgresus-backend/internal/features/storages/models/s3"
"postgresus-backend/internal/util/logger"
"strconv"
"testing"
"time"
@@ -44,6 +46,14 @@ func Test_Storage_BasicOperations(t *testing.T) {
require.NoError(t, err, "Failed to setup test file")
defer os.Remove(testFilePath)
// Setup NAS port
nasPort := 445
if portStr := config.GetEnv().TestNASPort; portStr != "" {
if port, err := strconv.Atoi(portStr); err == nil {
nasPort = port
}
}
// Run tests
testCases := []struct {
name string
@@ -73,6 +83,20 @@ func Test_Storage_BasicOperations(t *testing.T) {
TokenJSON: config.GetEnv().TestGoogleDriveTokenJSON,
},
},
{
name: "NASStorage",
storage: &nas_storage.NASStorage{
StorageID: uuid.New(),
Host: "localhost",
Port: nasPort,
Share: "backups",
Username: "testuser",
Password: "testpassword",
UseSSL: false,
Domain: "",
Path: "test-files",
},
},
}
for _, tc := range testCases {
@@ -201,4 +225,5 @@ func validateEnvVariables(t *testing.T) {
assert.NotEmpty(t, env.TestGoogleDriveClientSecret, "TEST_GOOGLE_DRIVE_CLIENT_SECRET is empty")
assert.NotEmpty(t, env.TestGoogleDriveTokenJSON, "TEST_GOOGLE_DRIVE_TOKEN_JSON is empty")
assert.NotEmpty(t, env.TestMinioPort, "TEST_MINIO_PORT is empty")
assert.NotEmpty(t, env.TestNASPort, "TEST_NAS_PORT is empty")
}

View File

@@ -62,8 +62,8 @@ func (l *LocalStorage) SaveFile(logger *slog.Logger, fileID uuid.UUID, file io.R
return fmt.Errorf("failed to sync temp file: %w", err)
}
err = tempFile.Close()
if err != nil {
// Close the temp file explicitly before moving it (required on Windows)
if err = tempFile.Close(); err != nil {
logger.Error("Failed to close temp file", "fileId", fileID.String(), "error", err)
return fmt.Errorf("failed to close temp file: %w", err)
}

View File

@@ -0,0 +1,401 @@
package nas_storage
import (
"crypto/tls"
"errors"
"fmt"
"io"
"log/slog"
"net"
"path/filepath"
"strings"
"time"
"github.com/google/uuid"
"github.com/hirochachacha/go-smb2"
)
type NASStorage struct {
StorageID uuid.UUID `json:"storageId" gorm:"primaryKey;type:uuid;column:storage_id"`
Host string `json:"host" gorm:"not null;type:text;column:host"`
Port int `json:"port" gorm:"not null;default:445;column:port"`
Share string `json:"share" gorm:"not null;type:text;column:share"`
Username string `json:"username" gorm:"not null;type:text;column:username"`
Password string `json:"password" gorm:"not null;type:text;column:password"`
UseSSL bool `json:"useSsl" gorm:"not null;default:false;column:use_ssl"`
Domain string `json:"domain" gorm:"type:text;column:domain"`
Path string `json:"path" gorm:"type:text;column:path"`
}
func (n *NASStorage) TableName() string {
return "nas_storages"
}
func (n *NASStorage) SaveFile(logger *slog.Logger, fileID uuid.UUID, file io.Reader) error {
logger.Info("Starting to save file to NAS storage", "fileId", fileID.String(), "host", n.Host)
session, err := n.createSession()
if err != nil {
logger.Error("Failed to create NAS session", "fileId", fileID.String(), "error", err)
return fmt.Errorf("failed to create NAS session: %w", err)
}
defer func() {
if logoffErr := session.Logoff(); logoffErr != nil {
logger.Error(
"Failed to logoff NAS session",
"fileId",
fileID.String(),
"error",
logoffErr,
)
}
}()
fs, err := session.Mount(n.Share)
if err != nil {
logger.Error(
"Failed to mount NAS share",
"fileId",
fileID.String(),
"share",
n.Share,
"error",
err,
)
return fmt.Errorf("failed to mount share '%s': %w", n.Share, err)
}
defer func() {
if umountErr := fs.Umount(); umountErr != nil {
logger.Error(
"Failed to unmount NAS share",
"fileId",
fileID.String(),
"error",
umountErr,
)
}
}()
// Ensure the directory exists
if n.Path != "" {
if err := n.ensureDirectory(fs, n.Path); err != nil {
logger.Error(
"Failed to ensure directory",
"fileId",
fileID.String(),
"path",
n.Path,
"error",
err,
)
return fmt.Errorf("failed to ensure directory: %w", err)
}
}
filePath := n.getFilePath(fileID.String())
logger.Debug("Creating file on NAS", "fileId", fileID.String(), "filePath", filePath)
nasFile, err := fs.Create(filePath)
if err != nil {
logger.Error(
"Failed to create file on NAS",
"fileId",
fileID.String(),
"filePath",
filePath,
"error",
err,
)
return fmt.Errorf("failed to create file on NAS: %w", err)
}
defer func() {
if closeErr := nasFile.Close(); closeErr != nil {
logger.Error("Failed to close NAS file", "fileId", fileID.String(), "error", closeErr)
}
}()
logger.Debug("Copying file data to NAS", "fileId", fileID.String())
_, err = io.Copy(nasFile, file)
if err != nil {
logger.Error("Failed to write file to NAS", "fileId", fileID.String(), "error", err)
return fmt.Errorf("failed to write file to NAS: %w", err)
}
logger.Info(
"Successfully saved file to NAS storage",
"fileId",
fileID.String(),
"filePath",
filePath,
)
return nil
}
func (n *NASStorage) GetFile(fileID uuid.UUID) (io.ReadCloser, error) {
session, err := n.createSession()
if err != nil {
return nil, fmt.Errorf("failed to create NAS session: %w", err)
}
fs, err := session.Mount(n.Share)
if err != nil {
_ = session.Logoff()
return nil, fmt.Errorf("failed to mount share '%s': %w", n.Share, err)
}
filePath := n.getFilePath(fileID.String())
// Check if file exists
_, err = fs.Stat(filePath)
if err != nil {
_ = fs.Umount()
_ = session.Logoff()
return nil, fmt.Errorf("file not found: %s", fileID.String())
}
nasFile, err := fs.Open(filePath)
if err != nil {
_ = fs.Umount()
_ = session.Logoff()
return nil, fmt.Errorf("failed to open file from NAS: %w", err)
}
// Return a wrapped reader that cleans up resources when closed
return &nasFileReader{
file: nasFile,
fs: fs,
session: session,
}, nil
}
func (n *NASStorage) DeleteFile(fileID uuid.UUID) error {
session, err := n.createSession()
if err != nil {
return fmt.Errorf("failed to create NAS session: %w", err)
}
defer func() {
_ = session.Logoff()
}()
fs, err := session.Mount(n.Share)
if err != nil {
return fmt.Errorf("failed to mount share '%s': %w", n.Share, err)
}
defer func() {
_ = fs.Umount()
}()
filePath := n.getFilePath(fileID.String())
// Check if file exists before trying to delete
_, err = fs.Stat(filePath)
if err != nil {
// File doesn't exist, consider it already deleted
return nil
}
err = fs.Remove(filePath)
if err != nil {
return fmt.Errorf("failed to delete file from NAS: %w", err)
}
return nil
}
func (n *NASStorage) Validate() error {
if n.Host == "" {
return errors.New("NAS host is required")
}
if n.Share == "" {
return errors.New("NAS share is required")
}
if n.Username == "" {
return errors.New("NAS username is required")
}
if n.Password == "" {
return errors.New("NAS password is required")
}
if n.Port <= 0 || n.Port > 65535 {
return errors.New("NAS port must be between 1 and 65535")
}
// Test the configuration by creating a session
return n.TestConnection()
}
func (n *NASStorage) TestConnection() error {
session, err := n.createSession()
if err != nil {
return fmt.Errorf("failed to connect to NAS: %w", err)
}
defer func() {
_ = session.Logoff()
}()
// Try to mount the share to verify access
fs, err := session.Mount(n.Share)
if err != nil {
return fmt.Errorf("failed to access share '%s': %w", n.Share, err)
}
defer func() {
_ = fs.Umount()
}()
// If path is specified, check if it exists or can be created
if n.Path != "" {
if err := n.ensureDirectory(fs, n.Path); err != nil {
return fmt.Errorf("failed to access or create path '%s': %w", n.Path, err)
}
}
return nil
}
func (n *NASStorage) createSession() (*smb2.Session, error) {
// Create connection with timeout
conn, err := n.createConnection()
if err != nil {
return nil, err
}
// Create SMB2 dialer
d := &smb2.Dialer{
Initiator: &smb2.NTLMInitiator{
User: n.Username,
Password: n.Password,
Domain: n.Domain,
},
}
// Create session
session, err := d.Dial(conn)
if err != nil {
_ = conn.Close()
return nil, fmt.Errorf("failed to create SMB session: %w", err)
}
return session, nil
}
func (n *NASStorage) createConnection() (net.Conn, error) {
address := net.JoinHostPort(n.Host, fmt.Sprintf("%d", n.Port))
// Create connection with timeout
dialer := &net.Dialer{
Timeout: 10 * time.Second,
}
if n.UseSSL {
// Use TLS connection
tlsConfig := &tls.Config{
ServerName: n.Host,
InsecureSkipVerify: false, // Change to true if you want to skip cert verification
}
conn, err := tls.DialWithDialer(dialer, "tcp", address, tlsConfig)
if err != nil {
return nil, fmt.Errorf("failed to create SSL connection to %s: %w", address, err)
}
return conn, nil
} else {
// Use regular TCP connection
conn, err := dialer.Dial("tcp", address)
if err != nil {
return nil, fmt.Errorf("failed to create connection to %s: %w", address, err)
}
return conn, nil
}
}
func (n *NASStorage) ensureDirectory(fs *smb2.Share, path string) error {
// Clean and normalize the path
path = filepath.Clean(path)
path = strings.ReplaceAll(path, "\\", "/")
// Check if directory already exists
_, err := fs.Stat(path)
if err == nil {
return nil // Directory exists
}
// Try to create the directory (including parent directories)
parts := strings.Split(path, "/")
currentPath := ""
for _, part := range parts {
if part == "" || part == "." {
continue
}
if currentPath == "" {
currentPath = part
} else {
currentPath = currentPath + "/" + part
}
// Check if this part of the path exists
_, err := fs.Stat(currentPath)
if err != nil {
// Directory doesn't exist, try to create it
err = fs.Mkdir(currentPath, 0755)
if err != nil {
return fmt.Errorf("failed to create directory '%s': %w", currentPath, err)
}
}
}
return nil
}
func (n *NASStorage) getFilePath(filename string) string {
if n.Path == "" {
return filename
}
// Clean path and use forward slashes for SMB
cleanPath := filepath.Clean(n.Path)
cleanPath = strings.ReplaceAll(cleanPath, "\\", "/")
return cleanPath + "/" + filename
}
// nasFileReader wraps the NAS file and handles cleanup of resources
type nasFileReader struct {
file *smb2.File
fs *smb2.Share
session *smb2.Session
}
func (r *nasFileReader) Read(p []byte) (n int, err error) {
return r.file.Read(p)
}
func (r *nasFileReader) Close() error {
// Close resources in reverse order
var errors []error
if r.file != nil {
if err := r.file.Close(); err != nil {
errors = append(errors, fmt.Errorf("failed to close file: %w", err))
}
}
if r.fs != nil {
if err := r.fs.Umount(); err != nil {
errors = append(errors, fmt.Errorf("failed to unmount share: %w", err))
}
}
if r.session != nil {
if err := r.session.Logoff(); err != nil {
errors = append(errors, fmt.Errorf("failed to logoff session: %w", err))
}
}
if len(errors) > 0 {
// Return the first error, but log others if needed
return errors[0]
}
return nil
}

View File

@@ -26,17 +26,21 @@ func (r *StorageRepository) Save(storage *Storage) (*Storage, error) {
if storage.GoogleDriveStorage != nil {
storage.GoogleDriveStorage.StorageID = storage.ID
}
case StorageTypeNAS:
if storage.NASStorage != nil {
storage.NASStorage.StorageID = storage.ID
}
}
if storage.ID == uuid.Nil {
if err := tx.Create(storage).
Omit("LocalStorage", "S3Storage", "GoogleDriveStorage").
Omit("LocalStorage", "S3Storage", "GoogleDriveStorage", "NASStorage").
Error; err != nil {
return err
}
} else {
if err := tx.Save(storage).
Omit("LocalStorage", "S3Storage", "GoogleDriveStorage").
Omit("LocalStorage", "S3Storage", "GoogleDriveStorage", "NASStorage").
Error; err != nil {
return err
}
@@ -64,6 +68,13 @@ func (r *StorageRepository) Save(storage *Storage) (*Storage, error) {
return err
}
}
case StorageTypeNAS:
if storage.NASStorage != nil {
storage.NASStorage.StorageID = storage.ID // Ensure ID is set
if err := tx.Save(storage.NASStorage).Error; err != nil {
return err
}
}
}
return nil
@@ -84,6 +95,7 @@ func (r *StorageRepository) FindByID(id uuid.UUID) (*Storage, error) {
Preload("LocalStorage").
Preload("S3Storage").
Preload("GoogleDriveStorage").
Preload("NASStorage").
Where("id = ?", id).
First(&s).Error; err != nil {
return nil, err
@@ -100,6 +112,7 @@ func (r *StorageRepository) FindByUserID(userID uuid.UUID) ([]*Storage, error) {
Preload("LocalStorage").
Preload("S3Storage").
Preload("GoogleDriveStorage").
Preload("NASStorage").
Where("user_id = ?", userID).
Order("name ASC").
Find(&storages).Error; err != nil {
@@ -131,6 +144,12 @@ func (r *StorageRepository) Delete(s *Storage) error {
return err
}
}
case StorageTypeNAS:
if s.NASStorage != nil {
if err := tx.Delete(s.NASStorage).Error; err != nil {
return err
}
}
}
// Delete the main storage

View File

@@ -222,10 +222,14 @@ func verifyDataIntegrity(t *testing.T, originalDB *sqlx.DB, restoredDB *sqlx.DB)
assert.NoError(t, err)
assert.Equal(t, len(originalData), len(restoredData), "Should have same number of rows")
for i := range originalData {
assert.Equal(t, originalData[i].ID, restoredData[i].ID, "ID should match")
assert.Equal(t, originalData[i].Name, restoredData[i].Name, "Name should match")
assert.Equal(t, originalData[i].Value, restoredData[i].Value, "Value should match")
// Only compare data if both slices have elements (to avoid panic)
if len(originalData) > 0 && len(restoredData) > 0 {
for i := range originalData {
assert.Equal(t, originalData[i].ID, restoredData[i].ID, "ID should match")
assert.Equal(t, originalData[i].Name, restoredData[i].Name, "Name should match")
assert.Equal(t, originalData[i].Value, restoredData[i].Value, "Value should match")
}
}
}

View File

@@ -4,10 +4,12 @@ import (
"net/http"
"github.com/gin-gonic/gin"
"golang.org/x/time/rate"
)
type UserController struct {
userService *UserService
userService *UserService
signinLimiter *rate.Limiter
}
func (c *UserController) RegisterRoutes(router *gin.RouterGroup) {
@@ -51,8 +53,18 @@ func (c *UserController) SignUp(ctx *gin.Context) {
// @Param request body SignInRequest true "User signin data"
// @Success 200 {object} SignInResponse
// @Failure 400
// @Failure 429 {object} map[string]string "Rate limit exceeded"
// @Router /users/signin [post]
func (c *UserController) SignIn(ctx *gin.Context) {
// We use rate limiter to prevent brute force attacks
if !c.signinLimiter.Allow() {
ctx.JSON(
http.StatusTooManyRequests,
gin.H{"error": "Rate limit exceeded. Please try again later."},
)
return
}
var request SignInRequest
if err := ctx.ShouldBindJSON(&request); err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request format"})

View File

@@ -2,6 +2,8 @@ package users
import (
user_repositories "postgresus-backend/internal/features/users/repositories"
"golang.org/x/time/rate"
)
var secretKeyRepository = &user_repositories.SecretKeyRepository{}
@@ -12,6 +14,7 @@ var userService = &UserService{
}
var userController = &UserController{
userService,
rate.NewLimiter(rate.Limit(3), 3), // 3 RPS with burst of 3
}
func GetUserService() *UserService {

View File

@@ -0,0 +1,30 @@
-- +goose Up
-- +goose StatementBegin
-- Create NAS storages table
CREATE TABLE nas_storages (
storage_id UUID PRIMARY KEY,
host TEXT NOT NULL,
port INTEGER NOT NULL DEFAULT 445,
share TEXT NOT NULL,
username TEXT NOT NULL,
password TEXT NOT NULL,
use_ssl BOOLEAN NOT NULL DEFAULT FALSE,
domain TEXT,
path TEXT
);
ALTER TABLE nas_storages
ADD CONSTRAINT fk_nas_storages_storage
FOREIGN KEY (storage_id)
REFERENCES storages (id)
ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS nas_storages;
-- +goose StatementEnd

View File

@@ -0,0 +1 @@
This is test data for storage testing

View File

@@ -0,0 +1 @@
This is test data for storage testing

View File

@@ -2,6 +2,9 @@
set -e # Exit on any error
# Ensure non-interactive mode for apt
export DEBIAN_FRONTEND=noninteractive
echo "Installing PostgreSQL client tools versions 13-17 for Linux (Debian/Ubuntu)..."
echo
@@ -30,18 +33,18 @@ echo
# Add PostgreSQL official APT repository
echo "Adding PostgreSQL official APT repository..."
$SUDO apt-get update -qq
$SUDO apt-get install -y wget ca-certificates
$SUDO apt-get update -qq -y
$SUDO apt-get install -y -qq wget ca-certificates
# Add GPG key
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | $SUDO apt-key add -
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | $SUDO apt-key add - 2>/dev/null
# Add repository
echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" | $SUDO tee /etc/apt/sources.list.d/pgdg.list
echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" | $SUDO tee /etc/apt/sources.list.d/pgdg.list >/dev/null
# Update package list
echo "Updating package list..."
$SUDO apt-get update -qq
$SUDO apt-get update -qq -y
# Install client tools for each version
versions="13 14 15 16 17"
@@ -50,35 +53,34 @@ for version in $versions; do
echo "Installing PostgreSQL $version client tools..."
# Install client tools only
$SUDO apt-get install -y postgresql-client-$version
$SUDO apt-get install -y -qq postgresql-client-$version
# Create version-specific directory and symlinks
version_dir="$POSTGRES_DIR/postgresql-$version"
mkdir -p "$version_dir/bin"
# Create symlinks to the installed binaries
if [ -f "/usr/bin/pg_dump" ]; then
# If multiple versions, binaries are usually named with version suffix
if [ -f "/usr/bin/pg_dump-$version" ]; then
ln -sf "/usr/bin/pg_dump-$version" "$version_dir/bin/pg_dump"
ln -sf "/usr/bin/pg_dumpall-$version" "$version_dir/bin/pg_dumpall"
ln -sf "/usr/bin/psql-$version" "$version_dir/bin/psql"
ln -sf "/usr/bin/pg_restore-$version" "$version_dir/bin/pg_restore"
ln -sf "/usr/bin/createdb-$version" "$version_dir/bin/createdb"
ln -sf "/usr/bin/dropdb-$version" "$version_dir/bin/dropdb"
else
# Fallback to non-versioned names (latest version)
ln -sf "/usr/bin/pg_dump" "$version_dir/bin/pg_dump"
ln -sf "/usr/bin/pg_dumpall" "$version_dir/bin/pg_dumpall"
ln -sf "/usr/bin/psql" "$version_dir/bin/psql"
ln -sf "/usr/bin/pg_restore" "$version_dir/bin/pg_restore"
ln -sf "/usr/bin/createdb" "$version_dir/bin/createdb"
ln -sf "/usr/bin/dropdb" "$version_dir/bin/dropdb"
fi
# On Debian/Ubuntu, PostgreSQL binaries are located in /usr/lib/postgresql/{version}/bin/
pg_bin_dir="/usr/lib/postgresql/$version/bin"
if [ -d "$pg_bin_dir" ] && [ -f "$pg_bin_dir/pg_dump" ]; then
# Create symlinks to the version-specific binaries
ln -sf "$pg_bin_dir/pg_dump" "$version_dir/bin/pg_dump"
ln -sf "$pg_bin_dir/pg_dumpall" "$version_dir/bin/pg_dumpall"
ln -sf "$pg_bin_dir/psql" "$version_dir/bin/psql"
ln -sf "$pg_bin_dir/pg_restore" "$version_dir/bin/pg_restore"
ln -sf "$pg_bin_dir/createdb" "$version_dir/bin/createdb"
ln -sf "$pg_bin_dir/dropdb" "$version_dir/bin/dropdb"
echo "PostgreSQL $version client tools installed successfully"
else
echo "Warning: PostgreSQL $version client tools may not have installed correctly"
echo "Error: PostgreSQL $version binaries not found in expected location: $pg_bin_dir"
echo "Available PostgreSQL directories:"
ls -la /usr/lib/postgresql/ 2>/dev/null || echo "No PostgreSQL directories found in /usr/lib/postgresql/"
if [ -d "$pg_bin_dir" ]; then
echo "Contents of $pg_bin_dir:"
ls -la "$pg_bin_dir" 2>/dev/null || echo "Directory exists but cannot list contents"
fi
exit 1
fi
echo
done
@@ -93,6 +95,9 @@ for version in $versions; do
version_dir="$POSTGRES_DIR/postgresql-$version"
if [ -f "$version_dir/bin/pg_dump" ]; then
echo " postgresql-$version: $version_dir/bin/"
# Verify the correct version
version_output=$("$version_dir/bin/pg_dump" --version 2>/dev/null | grep -o "pg_dump (PostgreSQL) [0-9]\+\.[0-9]\+")
echo " Version check: $version_output"
fi
done

View File

@@ -1,4 +1,4 @@
This directory is needed only for development.
This directory is needed only for development and CI\CD.
We have to download and install all the PostgreSQL versions from 13 to 17 locally.
This is needed so we can call pg_dump, pg_dumpall, etc. on each version of the PostgreSQL database.

View File

@@ -70,6 +70,7 @@ Before taking anything more than a couple of lines of code, please write Rostisl
Backups flow:
- do not remove old backups on backups disable
- add FTP
- add Dropbox
- add OneDrive
@@ -86,17 +87,14 @@ Notifications flow:
Extra:
- add tests running on each PR (in progress by Rostislav Dugin)
- add prettier labels to GitHub README
- create pretty website like rybbit.io with demo
- add HTTPS for Postgresus
- add simple SQL queries via UI
- add brute force protection on auth (via local RPS limiter)
- add support of Kubernetes Helm
- create pretty website like rybbit.io with demo
Monitoring flow:
- add system metrics (CPU, RAM, disk, IO)
- add system metrics (CPU, RAM, disk, IO) (in progress by Rostislav Dugin)
- add queries stats (slowest, most frequent, etc. via pg_stat_statements)
- add alerting for slow queries (listen for slow query and if they reach >100ms - send message)
- add alerting for high resource usage (listen for high resource usage and if they reach >90% - send message)

View File

@@ -16,27 +16,4 @@ services:
volumes:
- ./postgresus-data:/postgresus-data
container_name: postgresus-local
depends_on:
postgresus-db:
condition: service_healthy
restart: unless-stopped
postgresus-db:
image: postgres:17
# we use default values, but do not expose
# PostgreSQL ports so it is safe
environment:
- POSTGRES_DB=postgresus
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=Q1234567
volumes:
- ./pgdata:/var/lib/postgresql/data
container_name: postgresus-db
command: -p 5437
shm_size: 10gb
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres -d postgresus -p 5437"]
interval: 5s
timeout: 5s
retries: 5
restart: unless-stopped
restart: unless-stopped

View File

@@ -0,0 +1,11 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg fill="#000000" width="800px" height="800px" viewBox="0 0 256 256" id="Flat" xmlns="http://www.w3.org/2000/svg">
<g opacity="0.2">
<rect x="40" y="144" width="176" height="64" rx="8"/>
</g>
<g opacity="0.2">
<rect x="40" y="48" width="176" height="64" rx="8"/>
</g>
<path d="M208,136H48a16.01833,16.01833,0,0,0-16,16v48a16.01833,16.01833,0,0,0,16,16H208a16.01833,16.01833,0,0,0,16-16V152A16.01833,16.01833,0,0,0,208,136Zm0,64H48V152H208l.01025,47.99951Zm0-160H48A16.01833,16.01833,0,0,0,32,56v48a16.01833,16.01833,0,0,0,16,16H208a16.01833,16.01833,0,0,0,16-16V56A16.01833,16.01833,0,0,0,208,40Zm0,64H48V56H208l.01025,47.99951ZM192,80a12,12,0,1,1-12-12A12.01375,12.01375,0,0,1,192,80Zm0,96a12,12,0,1,1-12-12A12.01375,12.01375,0,0,1,192,176Z"/>

After

Width:  |  Height:  |  Size: 892 B

View File

@@ -3,6 +3,7 @@ export { type Storage } from './models/Storage';
export { StorageType } from './models/StorageType';
export { type LocalStorage } from './models/LocalStorage';
export { type S3Storage } from './models/S3Storage';
export { type NASStorage } from './models/NASStorage';
export { getStorageLogoFromType } from './models/getStorageLogoFromType';
export { getStorageNameFromType } from './models/getStorageNameFromType';
export { type GoogleDriveStorage } from './models/GoogleDriveStorage';

View File

@@ -0,0 +1,10 @@
export interface NASStorage {
host: string;
port: number;
share: string;
username: string;
password: string;
useSsl: boolean;
domain?: string;
path?: string;
}

View File

@@ -1,5 +1,6 @@
import type { GoogleDriveStorage } from './GoogleDriveStorage';
import type { LocalStorage } from './LocalStorage';
import type { NASStorage } from './NASStorage';
import type { S3Storage } from './S3Storage';
import type { StorageType } from './StorageType';
@@ -13,4 +14,5 @@ export interface Storage {
localStorage?: LocalStorage;
s3Storage?: S3Storage;
googleDriveStorage?: GoogleDriveStorage;
nasStorage?: NASStorage;
}

View File

@@ -2,4 +2,5 @@ export enum StorageType {
LOCAL = 'LOCAL',
S3 = 'S3',
GOOGLE_DRIVE = 'GOOGLE_DRIVE',
NAS = 'NAS',
}

View File

@@ -8,6 +8,8 @@ export const getStorageLogoFromType = (type: StorageType) => {
return '/icons/storages/s3.svg';
case StorageType.GOOGLE_DRIVE:
return '/icons/storages/google-drive.svg';
case StorageType.NAS:
return '/icons/storages/nas.svg';
default:
return '';
}

View File

@@ -8,6 +8,8 @@ export const getStorageNameFromType = (type: StorageType) => {
return 'S3';
case StorageType.GOOGLE_DRIVE:
return 'Google Drive';
case StorageType.NAS:
return 'NAS';
default:
return '';
}

View File

@@ -341,7 +341,7 @@ export const EditBackupConfigComponent = ({
<Tooltip
className="cursor-pointer"
title="Number of CPU cores to use for backup processing. Higher values may speed up backups but use more resources."
title="Number of CPU cores to use for restore processing. Higher values may speed up restores, but use more resources."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>

View File

@@ -190,7 +190,7 @@ export const EditHealthcheckConfigComponent = ({ databaseId, onClose }: Props) =
<Tooltip
className="cursor-pointer"
title="How many days to store healthcheck attempt history"
title="How many days to store health check attempt history"
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>

View File

@@ -40,7 +40,7 @@ export const ShowHealthcheckConfigComponent = ({ databaseId }: Props) => {
return (
<div className="space-y-4">
<div className="mb-1 flex items-center">
<div className="min-w-[180px]">Is healthcheck enabled</div>
<div className="min-w-[180px]">Is health check enabled</div>
<div className="w-[250px]">{healthcheckConfig.isHealthcheckEnabled ? 'Yes' : 'No'}</div>
</div>

View File

@@ -9,6 +9,7 @@ import {
} from '../../../../entity/storages';
import { ToastHelper } from '../../../../shared/toast';
import { EditGoogleDriveStorageComponent } from './storages/EditGoogleDriveStorageComponent';
import { EditNASStorageComponent } from './storages/EditNASStorageComponent';
import { EditS3StorageComponent } from './storages/EditS3StorageComponent';
interface Props {
@@ -98,6 +99,19 @@ export function EditStorageComponent({
};
}
if (type === StorageType.NAS) {
storage.nasStorage = {
host: '',
port: 0,
share: '',
username: '',
password: '',
useSsl: false,
domain: '',
path: '',
};
}
setStorage(
JSON.parse(
JSON.stringify({
@@ -148,6 +162,16 @@ export function EditStorageComponent({
);
}
if (storage.type === StorageType.NAS) {
return (
storage.nasStorage?.host &&
storage.nasStorage?.port &&
storage.nasStorage?.share &&
storage.nasStorage?.username &&
storage.nasStorage?.password
);
}
return false;
};
@@ -181,6 +205,7 @@ export function EditStorageComponent({
{ label: 'Local storage', value: StorageType.LOCAL },
{ label: 'S3', value: StorageType.S3 },
{ label: 'Google Drive', value: StorageType.GOOGLE_DRIVE },
{ label: 'NAS', value: StorageType.NAS },
]}
onChange={(value) => {
setStorageType(value);
@@ -211,6 +236,14 @@ export function EditStorageComponent({
setIsUnsaved={setIsUnsaved}
/>
)}
{storage?.type === StorageType.NAS && (
<EditNASStorageComponent
storage={storage}
setStorage={setStorage}
setIsUnsaved={setIsUnsaved}
/>
)}
</div>
<div className="mt-3 flex">

View File

@@ -0,0 +1,217 @@
import { InfoCircleOutlined } from '@ant-design/icons';
import { Input, InputNumber, Switch, Tooltip } from 'antd';
import type { Storage } from '../../../../../entity/storages';
interface Props {
storage: Storage;
setStorage: (storage: Storage) => void;
setIsUnsaved: (isUnsaved: boolean) => void;
}
export function EditNASStorageComponent({ storage, setStorage, setIsUnsaved }: Props) {
return (
<>
<div className="mb-2 flex items-center">
<div className="min-w-[110px]" />
<div className="text-xs text-blue-600">
<a href="https://postgresus.com/nas-storage" target="_blank" rel="noreferrer">
How to connect NAS storage?
</a>
</div>
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Host</div>
<Input
value={storage?.nasStorage?.host || ''}
onChange={(e) => {
if (!storage?.nasStorage) return;
setStorage({
...storage,
nasStorage: {
...storage.nasStorage,
host: e.target.value.trim(),
},
});
setIsUnsaved(true);
}}
size="small"
className="w-full max-w-[250px]"
placeholder="192.168.1.100"
/>
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Port</div>
<InputNumber
value={storage?.nasStorage?.port || 445}
onChange={(value) => {
if (!storage?.nasStorage || !value) return;
setStorage({
...storage,
nasStorage: {
...storage.nasStorage,
port: value,
},
});
setIsUnsaved(true);
}}
size="small"
className="w-full max-w-[250px]"
min={1}
max={65535}
placeholder="445"
/>
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Share</div>
<Input
value={storage?.nasStorage?.share || ''}
onChange={(e) => {
if (!storage?.nasStorage) return;
setStorage({
...storage,
nasStorage: {
...storage.nasStorage,
share: e.target.value.trim(),
},
});
setIsUnsaved(true);
}}
size="small"
className="w-full max-w-[250px]"
placeholder="shared_folder"
/>
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Username</div>
<Input
value={storage?.nasStorage?.username || ''}
onChange={(e) => {
if (!storage?.nasStorage) return;
setStorage({
...storage,
nasStorage: {
...storage.nasStorage,
username: e.target.value.trim(),
},
});
setIsUnsaved(true);
}}
size="small"
className="w-full max-w-[250px]"
placeholder="username"
/>
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Password</div>
<Input.Password
value={storage?.nasStorage?.password || ''}
onChange={(e) => {
if (!storage?.nasStorage) return;
setStorage({
...storage,
nasStorage: {
...storage.nasStorage,
password: e.target.value,
},
});
setIsUnsaved(true);
}}
size="small"
className="w-full max-w-[250px]"
placeholder="password"
/>
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Use SSL</div>
<Switch
checked={storage?.nasStorage?.useSsl || false}
onChange={(checked) => {
if (!storage?.nasStorage) return;
setStorage({
...storage,
nasStorage: {
...storage.nasStorage,
useSsl: checked,
},
});
setIsUnsaved(true);
}}
size="small"
/>
<Tooltip className="cursor-pointer" title="Enable SSL/TLS encryption for secure connection">
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Domain</div>
<Input
value={storage?.nasStorage?.domain || ''}
onChange={(e) => {
if (!storage?.nasStorage) return;
setStorage({
...storage,
nasStorage: {
...storage.nasStorage,
domain: e.target.value.trim() || undefined,
},
});
setIsUnsaved(true);
}}
size="small"
className="w-full max-w-[250px]"
placeholder="WORKGROUP (optional)"
/>
<Tooltip
className="cursor-pointer"
title="Windows domain name (optional, leave empty if not using domain authentication)"
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Path</div>
<Input
value={storage?.nasStorage?.path || ''}
onChange={(e) => {
if (!storage?.nasStorage) return;
setStorage({
...storage,
nasStorage: {
...storage.nasStorage,
path: e.target.value.trim() || undefined,
},
});
setIsUnsaved(true);
}}
size="small"
className="w-full max-w-[250px]"
placeholder="/backups (optional)"
/>
<Tooltip className="cursor-pointer" title="Subdirectory path within the share (optional)">
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
</>
);
}

View File

@@ -2,6 +2,7 @@ import { type Storage, StorageType } from '../../../../entity/storages';
import { getStorageLogoFromType } from '../../../../entity/storages/models/getStorageLogoFromType';
import { getStorageNameFromType } from '../../../../entity/storages/models/getStorageNameFromType';
import { ShowGoogleDriveStorageComponent } from './storages/ShowGoogleDriveStorageComponent';
import { ShowNASStorageComponent } from './storages/ShowNASStorageComponent';
import { ShowS3StorageComponent } from './storages/ShowS3StorageComponent';
interface Props {
@@ -32,6 +33,10 @@ export function ShowStorageComponent({ storage }: Props) {
<ShowGoogleDriveStorageComponent storage={storage} />
)}
</div>
<div>
{storage?.type === StorageType.NAS && <ShowNASStorageComponent storage={storage} />}
</div>
</div>
);
}

View File

@@ -0,0 +1,51 @@
import type { Storage } from '../../../../../entity/storages';
interface Props {
storage: Storage;
}
export function ShowNASStorageComponent({ storage }: Props) {
return (
<>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Host</div>
{storage?.nasStorage?.host || '-'}
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Port</div>
{storage?.nasStorage?.port || '-'}
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Share</div>
{storage?.nasStorage?.share || '-'}
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Username</div>
{storage?.nasStorage?.username || '-'}
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Password</div>
{storage?.nasStorage?.password ? '*********' : '-'}
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Use SSL</div>
{storage?.nasStorage?.useSsl ? 'Yes' : 'No'}
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Domain</div>
{storage?.nasStorage?.domain || '-'}
</div>
<div className="mb-1 flex items-center">
<div className="min-w-[110px]">Path</div>
{storage?.nasStorage?.path || '-'}
</div>
</>
);
}

View File

@@ -60,7 +60,7 @@ export const MainScreenComponent = () => {
target="_blank"
rel="noreferrer"
>
Healthcheck
Health-check
</a>
<a

View File

@@ -68,29 +68,6 @@ services:
- "4005:4005"
volumes:
- ./postgresus-data:/postgresus-data
depends_on:
postgresus-db:
condition: service_healthy
restart: unless-stopped
postgresus-db:
container_name: postgresus-db
image: postgres:17
# we use default values, but do not expose
# PostgreSQL ports so it is safe
environment:
- POSTGRES_DB=postgresus
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=Q1234567
volumes:
- ./pgdata:/var/lib/postgresql/data
command: -p 5437
shm_size: 10gb
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres -d postgresus -p 5437"]
interval: 5s
timeout: 5s
retries: 5
restart: unless-stopped
EOF
log "docker-compose.yml created successfully"