Compare commits

...

111 Commits

Author SHA1 Message Date
Rostislav Dugin
415dda8752 Merge pull request #440 from databasus/develop
FIX (local storage): Add fallback for file movement via renaming to s…
2026-03-14 13:38:39 +03:00
Rostislav Dugin
3faf85796a FIX (local storage): Add fallback for file movement via renaming to support cross-device movement 2026-03-14 13:32:29 +03:00
Rostislav Dugin
edd2759f5a Merge pull request #439 from databasus/develop
FIX (ci \ cd): Add e2e agent docker-compose to repo
2026-03-14 13:17:03 +03:00
Rostislav Dugin
c283856f38 FIX (ci \ cd): Add e2e agent docker-compose to repo 2026-03-14 13:15:34 +03:00
Rostislav Dugin
6059e1a33b Merge pull request #438 from databasus/develop
FIX (ci \ cd): Exclude agent e2e from docker ignore
2026-03-14 13:11:53 +03:00
Rostislav Dugin
2deda2e7ea FIX (ci \ cd): Exclude agent e2e from docker ignore 2026-03-14 13:11:27 +03:00
Rostislav Dugin
acf1143752 Merge pull request #437 from databasus/develop
FIX (ci \ cd): Update e2e tests for agent to run on GitHub workers
2026-03-14 12:54:56 +03:00
Rostislav Dugin
889063a8b4 FIX (ci \ cd): Update e2e tests for agent to run on GitHub workers 2026-03-14 12:54:32 +03:00
Rostislav Dugin
a1e20e7b10 Merge pull request #436 from databasus/develop
FIX (linting): Add E2E to linting
2026-03-14 12:48:23 +03:00
Rostislav Dugin
7e76945550 FIX (linting): Add E2E to linting 2026-03-14 12:47:43 +03:00
Rostislav Dugin
d98acfc4af Merge pull request #435 from databasus/develop
FEATURE (agent): Add postgres verification and e2e tests for agent
2026-03-14 12:43:51 +03:00
Rostislav Dugin
0ffc7c8c96 FEATURE (agent): Add postgres verification and e2e tests for agent 2026-03-14 12:43:13 +03:00
Rostislav Dugin
1b011bdcd4 Merge pull request #432 from databasus/develop
Develop
2026-03-13 18:51:50 +03:00
Rostislav Dugin
7e209ff537 REFACTOR (linters): Apply linters fixes 2026-03-13 18:50:57 +03:00
Rostislav Dugin
f712e3a437 FEATURE (linters): Introduce more strict linters 2026-03-13 18:03:38 +03:00
Rostislav Dugin
bcd7d8e1aa REFACTOR (formatters): Apply formatters auto fixes 2026-03-13 17:53:00 +03:00
Rostislav Dugin
880a7488e9 FEATURE (formatters): Add gofumpt and gci formatters 2026-03-13 17:50:07 +03:00
Rostislav Dugin
ca4d483f2c REFACTOR (golines): Apply golines fixes 2026-03-13 17:47:46 +03:00
Rostislav Dugin
1b511410a6 FEATURE (formatters): Fix config of golines 2026-03-13 17:47:29 +03:00
Rostislav Dugin
c8edff8046 FEATURE (golangci-lint): Upgrade golangci-lint to 2.11.3 in CI \ CD 2026-03-13 17:41:14 +03:00
Rostislav Dugin
f60e3d956b FEAUTRE (go): Upgrade Go version to 1.26.1 2026-03-13 17:37:39 +03:00
Rostislav Dugin
f2cb9022f2 FEATURE (agent): Setup agent directory, pre-commit and CI\CD workflow 2026-03-13 17:23:00 +03:00
Rostislav Dugin
4b3f36eea2 Merge pull request #429 from databasus/develop
FIX (readme): Add info about Anthropic and Open AI support via OSS pr…
2026-03-12 09:38:01 +03:00
Rostislav Dugin
460063e7a5 FIX (readme): Add info about Anthropic and Open AI support via OSS programs 2026-03-12 09:34:42 +03:00
Rostislav Dugin
a0f02b253e Merge pull request #427 from databasus/develop
FIX (retention): Fix GFS retention while hourly backups prevent daily…
2026-03-11 15:36:27 +03:00
Rostislav Dugin
812f11bc2f FIX (retention): Fix GFS retention while hourly backups prevent daily from cleanup 2026-03-11 15:35:53 +03:00
Rostislav Dugin
e796e3ddf0 Merge pull request #426 from databasus/develop
FIX (mysql): Detect supported compression levels
2026-03-11 12:53:35 +03:00
Rostislav Dugin
c96d3db337 FIX (mysql): Detect supported compression levels 2026-03-11 12:52:41 +03:00
Rostislav Dugin
ed6c3a2034 Merge pull request #425 from databasus/develop
Develop
2026-03-11 12:31:19 +03:00
Rostislav Dugin
05115047c3 FEATURE (version): Reload frontend if faced version mismatch with backend 2026-03-11 12:28:07 +03:00
Rostislav Dugin
446b96c6c0 FEATURE (arch): Add architecture to Databasus version in the bottom left of UI 2026-03-11 11:39:53 +03:00
Rostislav Dugin
36a0448da1 Merge pull request #420 from databasus/develop
FEATURE (email): Add skipping TLS for email notifier
2026-03-08 22:53:45 +03:00
Rostislav Dugin
8e392cfeab FEATURE (email): Add skipping TLS for email notifier 2026-03-08 22:48:28 +03:00
Rostislav Dugin
6683db1e52 Merge pull request #419 from databasus/develop
FIX (issues): Add DB version to issues template
2026-03-08 22:22:52 +03:00
Rostislav Dugin
703b883936 FIX (issues): Add DB version to issues template 2026-03-08 22:22:26 +03:00
Rostislav Dugin
e818bcff82 Merge pull request #415 from databasus/develop
Develop
2026-03-06 09:45:11 +03:00
Rostislav Dugin
b2f98f1332 FIX (mysql\mariadb): Increase max allowed packet size over restore for MySQL\MariaDB 2026-03-06 09:44:17 +03:00
Rostislav Dugin
230cc27ea6 FEATURE (backups): Add WAL API 2026-03-06 08:10:29 +03:00
Rostislav Dugin
cd197ff94b Merge pull request #410 from databasus/develop
FIX (readme): Update README
2026-03-01 10:43:47 +03:00
Rostislav Dugin
91f35a3e17 FIX (readme): Update README 2026-03-01 10:43:17 +03:00
Rostislav Dugin
30c2e2d156 Merge pull request #403 from databasus/develop
FIX (smtp): Add SMTP from field to env variables
2026-02-25 22:23:08 +03:00
Rostislav Dugin
ef7c5b45e6 FIX (smtp): Add SMTP from field to env variables 2026-02-25 22:13:04 +03:00
Rostislav Dugin
920c98e229 Merge pull request #397 from databasus/develop
FIX (migrations): Fix version of migrations tool goose
2026-02-22 23:43:55 +03:00
Rostislav Dugin
2a19a96aae FIX (migrations): Fix version of migrations tool goose 2026-02-22 23:43:23 +03:00
Rostislav Dugin
75aa2108d9 Merge pull request #396 from databasus/develop
FIX (email): Use current OS hostname instead of default localhost
2026-02-22 23:33:28 +03:00
Rostislav Dugin
0a0040839e FIX (email): Use current OS hostname instead of default localhost 2026-02-22 23:31:25 +03:00
Rostislav Dugin
ff4f795ece Merge pull request #394 from databasus/develop
FIX (nas): Add NAS share validation
2026-02-22 16:05:38 +03:00
Rostislav Dugin
dc05502580 FIX (nas): Add NAS share validation 2026-02-22 15:56:30 +03:00
Rostislav Dugin
1ca38f5583 Merge pull request #390 from databasus/develop
FEATURE (templates): Add PR template
2026-02-21 15:58:21 +03:00
Rostislav Dugin
40b3ff61c7 FEATURE (templates): Add PR template 2026-02-21 15:53:01 +03:00
Rostislav Dugin
e1b245a573 Merge pull request #389 from databasus/develop
Develop
2026-02-21 14:57:56 +03:00
Rostislav Dugin
fdf29b71f2 FIX (mongodb): Fix direct connection string parsing 2026-02-21 14:56:48 +03:00
Rostislav Dugin
49da981c21 Merge pull request #388 from databasus/main
Merge main into dev
2026-02-21 14:53:31 +03:00
Rostislav Dugin
9d611d3559 REFACTOR (mongodb): Refactor direct connection PR 2026-02-21 14:43:47 +03:00
ujstor
22cab53dab feature/mongodb-directConnection (#377)
FEATURE (mongodb): Add direct connection
2026-02-21 14:10:28 +03:00
Rostislav Dugin
d761c4156c Merge pull request #385 from databasus/develop
FIX (readme): Fix README typo
2026-02-20 17:17:45 +03:00
Rostislav Dugin
cbb8b82711 FIX (readme): Fix README typo 2026-02-20 17:01:44 +03:00
Rostislav Dugin
8e3d1e5bff Merge pull request #384 from databasus/develop
FIX (backups): Do not reload backups if request already in progress
2026-02-20 15:04:19 +03:00
Rostislav Dugin
349e7f0ee8 FIX (backups): Do not reload backups if request already in progress 2026-02-20 14:43:07 +03:00
Rostislav Dugin
3a274e135b Merge pull request #383 from databasus/develop
FEATURE (backups): Add GFS retention policy
2026-02-20 14:33:29 +03:00
Rostislav Dugin
61e937bc2a FEATURE (backups): Add GFS retention policy 2026-02-20 14:31:56 +03:00
Rostislav Dugin
f67919fe1a Merge pull request #374 from databasus/develop
FIX (backups): Fix backup download and clean up
2026-02-18 12:53:10 +03:00
Rostislav Dugin
91ee5966d8 FIX (backups): Fix backup download and clean up 2026-02-18 12:52:35 +03:00
Rostislav Dugin
d77d7d69a3 Merge pull request #371 from databasus/develop
FEATURE (backups): Add metadata alongsize with backup files itself to…
2026-02-17 19:54:53 +03:00
Rostislav Dugin
fc88b730d5 FEATURE (backups): Add metadata alongsize with backup files itself to make them recovarable without Databasus 2026-02-17 19:52:08 +03:00
Rostislav Dugin
1f1d80245f Merge pull request #368 from databasus/develop
FIX (restores): Increase restore timeout to 23 hours instead of 1 hour
2026-02-17 14:56:58 +03:00
Rostislav Dugin
16a29cf458 FIX (restores): Increase restore timeout to 23 hours instead of 1 hour 2026-02-17 14:56:25 +03:00
Rostislav Dugin
43e04500ac Merge pull request #367 from databasus/develop
FEATURE (backups): Add meaningful names for backups
2026-02-17 14:50:21 +03:00
Rostislav Dugin
cee3022f85 FEATURE (backups): Add meaningful names for backups 2026-02-17 14:49:33 +03:00
Rostislav Dugin
f46d92c480 Merge pull request #365 from databasus/develop
FIX (audit logs): Get rid of IDs in audit logs and improve naming log…
2026-02-15 01:10:54 +03:00
Rostislav Dugin
10677238d7 FIX (audit logs): Get rid of IDs in audit logs and improve naming logging 2026-02-15 01:06:39 +03:00
Rostislav Dugin
2553203fcf Merge pull request #363 from databasus/develop
FIX (sign up): Return authorization token on sign up to avoid 2-step …
2026-02-15 00:09:00 +03:00
Rostislav Dugin
7b05bd8000 FIX (sign up): Return authorization token on sign up to avoid 2-step sign up 2026-02-15 00:08:01 +03:00
Rostislav Dugin
8d45728f73 Merge pull request #362 from databasus/develop
FEATURE (auth): Add optional CloudFlare Turnstile for sign in \ sign …
2026-02-14 23:19:12 +03:00
Rostislav Dugin
c70ad82c95 FEATURE (auth): Add optional CloudFlare Turnstile for sign in \ sign up \ password reset 2026-02-14 23:11:36 +03:00
Rostislav Dugin
e4bc34d319 Merge pull request #361 from databasus/develop
Develop
2026-02-13 16:57:25 +03:00
Rostislav Dugin
257ae85da7 FIX (postgres): Fix read-only issue when user cannot access tables and partitions created after user creation 2026-02-13 16:56:56 +03:00
Rostislav Dugin
b42c820bb2 FIX (mariadb): Fix events exclusion 2026-02-13 16:21:48 +03:00
Rostislav Dugin
da5c13fb11 Merge pull request #356 from databasus/develop
FIX (mysql & mariadb): Fix creation of backups with exremely large SQ…
2026-02-10 22:40:06 +03:00
Rostislav Dugin
35180360e5 FIX (mysql & mariadb): Fix creation of backups with exremely large SQL statements to avoid OOM 2026-02-10 22:38:18 +03:00
Rostislav Dugin
e4f6cd7a5d Merge pull request #349 from databasus/develop
Develop
2026-02-09 16:42:00 +03:00
Rostislav Dugin
d7b8e6d56a Merge branch 'develop' of https://github.com/databasus/databasus into develop 2026-02-09 16:40:46 +03:00
Rostislav Dugin
6016f23fb2 FEATURE (svr): Add SVR support 2026-02-09 16:39:51 +03:00
Rostislav Dugin
e7c4ee8f6f Merge pull request #345 from databasus/develop
Develop
2026-02-08 23:38:42 +03:00
Rostislav Dugin
a75702a01b Merge pull request #342 from wuast94/patch-1
Add image source label to dockerfiles
2026-02-08 23:38:18 +03:00
Rostislav Dugin
81a21eb907 FEATURE (google drive): Change OAuth authorization flow to local address instead of databasus.com 2026-02-08 23:32:13 +03:00
Marc
33d6bf0147 Add image source label to dockerfiles
To get changelogs shown with Renovate a docker container has to add the source label described in the OCI Image Format Specification.

For reference: https://github.com/renovatebot/renovate/blob/main/lib/modules/datasource/docker/readme.md
2026-02-05 23:30:37 +01:00
Rostislav Dugin
6eb53bb07b Merge pull request #341 from databasus/develop
Develop
2026-02-06 00:25:30 +03:00
Rostislav Dugin
6ac04270b9 FEATURE (healthcheck): Add checking whether backup nodes available for primary node 2026-02-06 00:24:34 +03:00
Rostislav Dugin
b0510d7c21 FIX (logging): Add login to VictoriaLogs logger 2026-02-06 00:18:09 +03:00
Rostislav Dugin
dc5f271882 Merge pull request #339 from databasus/develop
FIX (storages): Do not remove system storage on any workspace deletion
2026-02-05 01:32:46 +03:00
Rostislav Dugin
8f718771c9 FIX (storages): Do not remove system storage on any workspace deletion 2026-02-05 01:32:21 +03:00
Rostislav Dugin
d8eea05dca Merge pull request #332 from databasus/develop
FIX (script): Fix script creation in playground head x2
2026-02-02 20:46:35 +03:00
Rostislav Dugin
b2a94274d7 FIX (script): Fix script creation in playground head x2 2026-02-02 20:44:52 +03:00
Rostislav Dugin
77c2712ebb Merge pull request #331 from databasus/develop
FIX (script): Fix script creation in playground head
2026-02-02 19:47:44 +03:00
Rostislav Dugin
a9dc29f82c FIX (script): Fix script creation in playground head 2026-02-02 19:47:15 +03:00
Rostislav Dugin
c934a45dca Merge pull request #330 from databasus/develop
FIX (storages): Fix storage edit in playground
2026-02-02 18:51:47 +03:00
Rostislav Dugin
d4acdf2826 FIX (storages): Fix storage edit in playground 2026-02-02 18:48:19 +03:00
Rostislav Dugin
49753c4fc0 Merge pull request #329 from databasus/develop
FIX (s3): Fix S3 prefill in playground on form edit
2026-02-02 18:14:07 +03:00
Rostislav Dugin
c6aed6b36d FIX (s3): Fix S3 prefill in playground on form edit 2026-02-02 18:12:44 +03:00
Rostislav Dugin
3060b4266a Merge pull request #328 from databasus/develop
Develop
2026-02-02 17:53:05 +03:00
Rostislav Dugin
ebeb597f17 FEATURE (playground): Add support of Rybbit script for playground 2026-02-02 17:50:31 +03:00
Rostislav Dugin
4783784325 FIX (playground): Do not show whitelist message in playground 2026-02-02 16:53:01 +03:00
Rostislav Dugin
bd41433bdb Merge branch 'develop' of https://github.com/databasus/databasus into develop 2026-02-02 16:50:18 +03:00
Rostislav Dugin
a9073787d2 FIX (audit logs): In dark mode show white text in audit logs 2026-02-02 16:44:49 +03:00
Rostislav Dugin
0890bf8f09 Merge pull request #327 from artemkalugin01/access-management-href-fix
Fix href in settings for access-management#global-settings
2026-02-02 16:12:25 +03:00
artem.kalugin
f8c11e8802 Fix href typo in settings for access-management#global-settings 2026-02-02 12:59:56 +03:00
Rostislav Dugin
e798d82fc1 Merge pull request #325 from databasus/develop
FIX (storages): Fix default storage type prefill in playground
2026-02-01 20:12:12 +03:00
Rostislav Dugin
81a01585ee FIX (storages): Fix default storage type prefill in playground 2026-02-01 20:07:12 +03:00
Rostislav Dugin
a8465c1a10 Merge pull request #324 from databasus/develop
FIX (storages): Limit local storage usage in playground
2026-02-01 19:20:34 +03:00
Rostislav Dugin
a9e5db70f6 FIX (storages): Limit local storage usage in playground 2026-02-01 19:18:54 +03:00
309 changed files with 13318 additions and 2499 deletions

44
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,44 @@
---
name: Bug Report
about: Report a bug or unexpected behavior in Databasus
labels: bug
---
## Databasus version (screenshot)
It is displayed in the bottom left corner of the Databasus UI. Please attach screenshot, not just version text
<!-- e.g. 1.4.2 -->
## Operating system and architecture
<!-- e.g. Ubuntu 22.04 x64, macOS 14 ARM, Windows 11 x64 -->
## Database type and version (optional, for DB-related bugs)
<!-- e.g. PostgreSQL 16 in Docker, MySQL 8.0 installed on server, MariaDB 11.4 in AWS Cloud -->
## Describe the bug (please write manually, do not ask AI to summarize)
**What happened:**
**What I expected:**
## Steps to reproduce
1.
2.
3.
## Have you asked AI how to solve the issue?
<!-- Using AI to diagnose issues before filing a bug report helps narrow down root causes. -->
- [ ] Claude Sonnet 4.6 or newer
- [ ] ChatGPT 5.2 or newer
- [ ] No
## Additional context / logs
<!-- Screenshots, error messages, relevant log output, etc. -->

View File

@@ -11,7 +11,7 @@ jobs:
lint-backend:
runs-on: self-hosted
container:
image: golang:1.24.9
image: golang:1.26.1
volumes:
- /runner-cache/go-pkg:/go/pkg/mod
- /runner-cache/go-build:/root/.cache/go-build
@@ -32,7 +32,7 @@ jobs:
- name: Install golangci-lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.7.2
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.11.3
echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
- name: Install swag for swagger generation
@@ -86,6 +86,39 @@ jobs:
cd frontend
npm run build
lint-agent:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.26.1"
cache-dependency-path: agent/go.sum
- name: Download Go modules
run: |
cd agent
go mod download
- name: Install golangci-lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.11.3
echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
- name: Run golangci-lint
run: |
cd agent
golangci-lint run
- name: Verify go mod tidy
run: |
cd agent
go mod tidy
git diff --exit-code go.mod go.sum || (echo "go mod tidy made changes, please run 'go mod tidy' and commit the changes" && exit 1)
test-frontend:
runs-on: ubuntu-latest
needs: [lint-frontend]
@@ -108,11 +141,55 @@ jobs:
cd frontend
npm run test
test-agent:
runs-on: ubuntu-latest
needs: [lint-agent]
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.26.1"
cache-dependency-path: agent/go.sum
- name: Download Go modules
run: |
cd agent
go mod download
- name: Run Go tests
run: |
cd agent
go test -count=1 -failfast ./internal/...
e2e-agent:
runs-on: ubuntu-latest
needs: [lint-agent]
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Run e2e tests
run: |
cd agent
make e2e
- name: Cleanup
if: always()
run: |
cd agent/e2e
docker compose down -v --rmi local || true
rm -rf artifacts || true
# Self-hosted: performant high-frequency CPU is used to start many containers and run tests fast. Tests
# step is bottle-neck, because we need a lot of containers and cannot parallelize tests due to shared resources
test-backend:
runs-on: self-hosted
needs: [lint-backend]
container:
image: golang:1.24.9
image: golang:1.26.1
options: --privileged -v /var/run/docker.sock:/var/run/docker.sock --add-host=host.docker.internal:host-gateway
volumes:
- /runner-cache/go-pkg:/go/pkg/mod
@@ -407,7 +484,7 @@ jobs:
- name: Run database migrations
run: |
cd backend
go install github.com/pressly/goose/v3/cmd/goose@latest
go install github.com/pressly/goose/v3/cmd/goose@v3.24.3
goose up
- name: Run Go tests
@@ -441,7 +518,7 @@ jobs:
runs-on: self-hosted
container:
image: node:20
needs: [test-backend, test-frontend]
needs: [test-backend, test-frontend, test-agent, e2e-agent]
if: ${{ github.ref == 'refs/heads/main' && !contains(github.event.head_commit.message, '[skip-release]') }}
outputs:
should_release: ${{ steps.version_bump.outputs.should_release }}
@@ -534,7 +611,7 @@ jobs:
build-only:
runs-on: self-hosted
needs: [test-backend, test-frontend]
needs: [test-backend, test-frontend, test-agent, e2e-agent]
if: ${{ github.ref == 'refs/heads/main' && contains(github.event.head_commit.message, '[skip-release]') }}
steps:
- name: Clean workspace

2
.gitignore vendored
View File

@@ -5,6 +5,7 @@ databasus-data/
.env
pgdata/
docker-compose.yml
!agent/e2e/docker-compose.yml
node_modules/
.idea
/articles
@@ -12,3 +13,4 @@ node_modules/
.DS_Store
/scripts
.vscode/settings.json
.claude

View File

@@ -41,3 +41,20 @@ repos:
language: system
files: ^backend/.*\.go$
pass_filenames: false
# Agent checks
- repo: local
hooks:
- id: agent-format-and-lint
name: Agent Format & Lint (golangci-lint)
entry: bash -c "cd agent && golangci-lint fmt ./internal/... ./cmd/... && golangci-lint run ./internal/... ./cmd/..."
language: system
files: ^agent/.*\.go$
pass_filenames: false
- id: agent-go-mod-tidy
name: Agent Go Mod Tidy
entry: bash -c "cd agent && go mod tidy"
language: system
files: ^agent/.*\.go$
pass_filenames: false

256
AGENTS.md
View File

@@ -1,35 +1,37 @@
# Agent Rules and Guidelines
This document contains all coding standards, conventions and best practices recommended for the Databasus project.
This document contains all coding standards, conventions and best practices recommended for the TgTaps project.
This is NOT a strict set of rules, but a set of recommendations to help you write better code.
---
## Table of Contents
- [Engineering Philosophy](#engineering-philosophy)
- [Backend Guidelines](#backend-guidelines)
- [Code Style](#code-style)
- [Engineering philosophy](#engineering-philosophy)
- [Backend guidelines](#backend-guidelines)
- [Code style](#code-style)
- [Boolean naming](#boolean-naming)
- [Add reasonable new lines between logical statements](#add-reasonable-new-lines-between-logical-statements)
- [Comments](#comments)
- [Controllers](#controllers)
- [Dependency Injection (DI)](#dependency-injection-di)
- [Dependency injection (DI)](#dependency-injection-di)
- [Migrations](#migrations)
- [Refactoring](#refactoring)
- [Testing](#testing)
- [Time Handling](#time-handling)
- [CRUD Examples](#crud-examples)
- [Frontend Guidelines](#frontend-guidelines)
- [React Component Structure](#react-component-structure)
- [Time handling](#time-handling)
- [CRUD examples](#crud-examples)
- [Frontend guidelines](#frontend-guidelines)
- [React component structure](#react-component-structure)
---
## Engineering Philosophy
## Engineering philosophy
**Think like a skeptical senior engineer and code reviewer. Don't just do what was asked—also think about what should have been asked.**
⚠️ **Balance vigilance with pragmatism:** Catch real issues, not theoretical ones. Don't let perfect be the enemy of good.
### Task Context Assessment:
### Task context assessment:
**First, assess the task scope:**
@@ -38,7 +40,7 @@ This is NOT a strict set of rules, but a set of recommendations to help you writ
- **Complex** (architecture, security, performance-critical): Full analysis required
- **Unclear** (ambiguous requirements): Always clarify assumptions first
### For Non-Trivial Tasks:
### For non-trivial tasks:
1. **Restate the objective and list assumptions** (explicit + implicit)
- If any assumption is shaky, call it out clearly
@@ -71,7 +73,7 @@ This is NOT a strict set of rules, but a set of recommendations to help you writ
- Patch the answer accordingly
- Verify edge cases are handled
### Application Guidelines:
### Application guidelines:
**Scale your response to the task:**
@@ -84,9 +86,9 @@ This is NOT a strict set of rules, but a set of recommendations to help you writ
---
## Backend Guidelines
## Backend guidelines
### Code Style
### Code style
**Always place private methods to the bottom of file**
@@ -94,7 +96,7 @@ This rule applies to ALL Go files including tests, services, controllers, reposi
In Go, exported (public) functions/methods start with uppercase letters, while unexported (private) ones start with lowercase letters.
#### Structure Order:
#### Structure order:
1. Type definitions and constants
2. Public methods/functions (uppercase)
@@ -227,7 +229,7 @@ func (c *ProjectController) extractProjectID(ctx *gin.Context) uuid.UUID {
}
```
#### Key Points:
#### Key points:
- **Exported/Public** = starts with uppercase letter (CreateUser, GetProject)
- **Unexported/Private** = starts with lowercase letter (validateUser, handleError)
@@ -237,13 +239,13 @@ func (c *ProjectController) extractProjectID(ctx *gin.Context) uuid.UUID {
---
### Boolean Naming
### Boolean naming
**Always prefix boolean variables with verbs like `is`, `has`, `was`, `should`, `can`, etc.**
This makes the code more readable and clearly indicates that the variable represents a true/false state.
#### Good Examples:
#### Good examples:
```go
type User struct {
@@ -265,7 +267,7 @@ wasCompleted := false
hasPermission := checkPermissions()
```
#### Bad Examples:
#### Bad examples:
```go
type User struct {
@@ -286,7 +288,7 @@ completed := false // Should be: wasCompleted
permission := true // Should be: hasPermission
```
#### Common Boolean Prefixes:
#### Common boolean prefixes:
- **is** - current state (IsActive, IsValid, IsEnabled)
- **has** - possession or presence (HasAccess, HasPermission, HasError)
@@ -297,6 +299,167 @@ permission := true // Should be: hasPermission
---
### Add reasonable new lines between logical statements
**Add blank lines between logical blocks to improve code readability.**
Separate different logical operations within a function with blank lines. This makes the code flow clearer and helps identify distinct steps in the logic.
#### Guidelines:
- Add blank line before final `return` statement
- Add blank line after variable declarations before using them
- Add blank line between error handling and subsequent logic
- Add blank line between different logical operations
#### Bad example (without spacing):
```go
func (t *Task) BeforeSave(tx *gorm.DB) error {
if len(t.Messages) > 0 {
messagesBytes, err := json.Marshal(t.Messages)
if err != nil {
return err
}
t.MessagesJSON = string(messagesBytes)
}
return nil
}
func (t *Task) AfterFind(tx *gorm.DB) error {
if t.MessagesJSON != "" {
var messages []onewin_dto.TaskCompletionMessage
if err := json.Unmarshal([]byte(t.MessagesJSON), &messages); err != nil {
return err
}
t.Messages = messages
}
return nil
}
```
#### Good example (with proper spacing):
```go
func (t *Task) BeforeSave(tx *gorm.DB) error {
if len(t.Messages) > 0 {
messagesBytes, err := json.Marshal(t.Messages)
if err != nil {
return err
}
t.MessagesJSON = string(messagesBytes)
}
return nil
}
func (t *Task) AfterFind(tx *gorm.DB) error {
if t.MessagesJSON != "" {
var messages []onewin_dto.TaskCompletionMessage
if err := json.Unmarshal([]byte(t.MessagesJSON), &messages); err != nil {
return err
}
t.Messages = messages
}
return nil
}
```
#### More examples:
**Service method with multiple operations:**
```go
func (s *UserService) CreateUser(request *CreateUserRequest) (*User, error) {
// Validate input
if err := s.validateUserRequest(request); err != nil {
return nil, err
}
// Create user entity
user := &User{
ID: uuid.New(),
Name: request.Name,
Email: request.Email,
}
// Save to database
if err := s.repository.Create(user); err != nil {
return nil, err
}
// Send notification
s.notificationService.SendWelcomeEmail(user.Email)
return user, nil
}
```
**Repository method with query building:**
```go
func (r *Repository) GetFiltered(filters *Filters) ([]*Entity, error) {
query := storage.GetDb().Model(&Entity{})
if filters.Status != "" {
query = query.Where("status = ?", filters.Status)
}
if filters.CreatedAfter != nil {
query = query.Where("created_at > ?", filters.CreatedAfter)
}
var entities []*Entity
if err := query.Find(&entities).Error; err != nil {
return nil, err
}
return entities, nil
}
```
**Repository method with error handling:**
Bad (without spacing):
```go
func (r *Repository) FindById(id uuid.UUID) (*models.Task, error) {
var task models.Task
result := storage.GetDb().Where("id = ?", id).First(&task)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, errors.New("task not found")
}
return nil, result.Error
}
return &task, nil
}
```
Good (with proper spacing):
```go
func (r *Repository) FindById(id uuid.UUID) (*models.Task, error) {
var task models.Task
result := storage.GetDb().Where("id = ?", id).First(&task)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, errors.New("task not found")
}
return nil, result.Error
}
return &task, nil
}
```
---
### Comments
#### Guidelines
@@ -305,13 +468,14 @@ permission := true // Should be: hasPermission
2. **Functions and variables should have meaningful names** - Code should be self-documenting
3. **Comments for unclear code only** - Only add comments when code logic isn't immediately clear
#### Key Principles:
#### Key principles:
- **Code should tell a story** - Use descriptive variable and function names
- **Comments explain WHY, not WHAT** - The code shows what happens, comments explain business logic or complex decisions
- **Prefer refactoring over commenting** - If code needs explaining, consider making it clearer instead
- **API documentation is required** - Swagger comments for all HTTP endpoints are mandatory
- **Complex algorithms deserve comments** - Mathematical formulas, business rules, or non-obvious optimizations
- **Do not write summary sections in .md files unless directly requested** - Avoid adding "Summary" or "Conclusion" sections at the end of documentation files unless the user explicitly asks for them
#### Example of useless comments:
@@ -343,7 +507,7 @@ func CreateValidLogItems(count int, uniqueID string) []logs_receiving.LogItemReq
### Controllers
#### Controller Guidelines:
#### Controller guidelines:
1. **When we write controller:**
- We combine all routes to single controller
@@ -475,7 +639,7 @@ func (c *AuditLogController) GetUserAuditLogs(ctx *gin.Context) {
---
### Dependency Injection (DI)
### Dependency injection (DI)
For DI files use **implicit fields declaration styles** (especially for controllers, services, repositories, use cases, etc., not simple data structures).
@@ -503,7 +667,7 @@ var orderController = &OrderController{
**This is needed to avoid forgetting to update DI style when we add new dependency.**
#### Force Such Usage
#### Force such usage
Please force such usage if file look like this (see some services\controllers\repos definitions and getters):
@@ -549,13 +713,13 @@ func GetOrderRepository() *repositories.OrderRepository {
}
```
#### SetupDependencies() Pattern
#### SetupDependencies() pattern
**All `SetupDependencies()` functions must use sync.Once to ensure idempotent execution.**
This pattern allows `SetupDependencies()` to be safely called multiple times (especially in tests) while ensuring the actual setup logic executes only once.
**Implementation Pattern:**
**Implementation pattern:**
```go
package feature
@@ -588,7 +752,7 @@ func SetupDependencies() {
}
```
**Why This Pattern:**
**Why this pattern:**
- **Tests can call multiple times**: Test setup often calls `SetupDependencies()` multiple times without issues
- **Thread-safe**: Works correctly with concurrent calls (nanoseconds or seconds apart)
@@ -604,13 +768,13 @@ func SetupDependencies() {
---
### Background Services
### Background services
**All background service `Run()` methods must panic if called multiple times to prevent corrupted states.**
Background services run infinite loops and must never be started twice on the same instance. Multiple calls indicate a serious bug that would cause duplicate goroutines, resource leaks, and data corruption.
**Implementation Pattern:**
**Implementation pattern:**
```go
package feature
@@ -654,14 +818,14 @@ func (s *BackgroundService) Run(ctx context.Context) {
}
```
**Why Panic Instead of Warning:**
**Why panic instead of warning:**
- **Prevents corruption**: Multiple `Run()` calls would create duplicate goroutines consuming resources
- **Fails fast**: Catches critical bugs immediately in tests and production
- **Clear indication**: Panic clearly indicates a serious programming error
- **Applies everywhere**: Same protection in tests and production
**When This Applies:**
**When this applies:**
- All background services with infinite loops
- Registry services (BackupNodesRegistry, RestoreNodesRegistry)
@@ -727,14 +891,14 @@ You can shortify, make more readable, improve code quality, etc. Common logic ca
**After writing tests, always launch them and verify that they pass.**
#### Test Naming Format
#### Test naming format
Use these naming patterns:
- `Test_WhatWeDo_WhatWeExpect`
- `Test_WhatWeDo_WhichConditions_WhatWeExpect`
#### Examples from Real Codebase:
#### Examples from real codebase:
- `Test_CreateApiKey_WhenUserIsProjectOwner_ApiKeyCreated`
- `Test_UpdateProject_WhenUserIsProjectAdmin_ProjectUpdated`
@@ -742,22 +906,22 @@ Use these naming patterns:
- `Test_GetProjectAuditLogs_WithDifferentUserRoles_EnforcesPermissionsCorrectly`
- `Test_ProjectLifecycleE2E_CompletesSuccessfully`
#### Testing Philosophy
#### Testing philosophy
**Prefer Controllers Over Unit Tests:**
**Prefer controllers over unit tests:**
- Test through HTTP endpoints via controllers whenever possible
- Avoid testing repositories, services in isolation - test via API instead
- Only use unit tests for complex model logic when no API exists
- Name test files `controller_test.go` or `service_test.go`, not `integration_test.go`
**Extract Common Logic to Testing Utilities:**
**Extract common logic to testing utilities:**
- Create `testing.go` or `testing/testing.go` files for shared test utilities
- Extract router creation, user setup, models creation helpers (in API, not just structs creation)
- Reuse common patterns across different test files
**Refactor Existing Tests:**
**Refactor existing tests:**
- When working with existing tests, always look for opportunities to refactor and improve
- Extract repetitive setup code to common utilities
@@ -766,7 +930,7 @@ Use these naming patterns:
- Consolidate similar test patterns across different test files
- Make tests more readable and maintainable for other developers
**Clean Up Test Data:**
**Clean up test data:**
- If the feature supports cleanup operations (DELETE endpoints, cleanup methods), use them in tests
- Clean up resources after test execution to avoid test data pollution
@@ -803,7 +967,7 @@ func Test_BackupLifecycle_CreateAndDelete(t *testing.T) {
}
```
#### Testing Utilities Structure
#### Testing utilities structure
**Create `testing.go` or `testing/testing.go` files with common utilities:**
@@ -839,7 +1003,7 @@ func AddMemberToProject(project *projects_models.Project, member *users_dto.Sign
}
```
#### Controller Test Examples
#### Controller test examples
**Permission-based testing:**
@@ -906,7 +1070,7 @@ func Test_ProjectLifecycleE2E_CompletesSuccessfully(t *testing.T) {
---
### Time Handling
### Time handling
**Always use `time.Now().UTC()` instead of `time.Now()`**
@@ -914,7 +1078,7 @@ This ensures consistent timezone handling across the application.
---
### CRUD Examples
### CRUD examples
This is an example of complete CRUD implementation structure:
@@ -1578,9 +1742,9 @@ func createTimedLog(db *gorm.DB, userID *uuid.UUID, message string, createdAt ti
---
## Frontend Guidelines
## Frontend guidelines
### React Component Structure
### React component structure
Write React components with the following structure:
@@ -1614,7 +1778,7 @@ export const ReactComponent = ({ someValue }: Props): JSX.Element => {
}
```
#### Structure Order:
#### Structure order:
1. **Props interface** - Define component props
2. **Helper functions** (outside component) - Pure utility functions

1
CLAUDE.md Normal file
View File

@@ -0,0 +1 @@
Look at @AGENTS.md

View File

@@ -22,7 +22,7 @@ RUN npm run build
# ========= BUILD BACKEND =========
# Backend build stage
FROM --platform=$BUILDPLATFORM golang:1.24.9 AS backend-build
FROM --platform=$BUILDPLATFORM golang:1.26.1 AS backend-build
# Make TARGET args available early so tools built here match the final image arch
ARG TARGETOS
@@ -66,13 +66,52 @@ RUN CGO_ENABLED=0 \
go build -o /app/main ./cmd/main.go
# ========= BUILD AGENT =========
# Builds the databasus-agent CLI binary for BOTH x86_64 and ARM64.
# Both architectures are always built because:
# - Databasus server runs on one arch (e.g. amd64)
# - The agent runs on remote PostgreSQL servers that may be on a
# different arch (e.g. arm64)
# - The backend serves the correct binary based on the agent's
# ?arch= query parameter
#
# We cross-compile from the build platform (no QEMU needed) because the
# agent is pure Go with zero C dependencies.
# CGO_ENABLED=0 produces fully static binaries — no glibc/musl dependency,
# so the agent runs on any Linux distro (Alpine, Debian, Ubuntu, RHEL, etc.).
# APP_VERSION is baked into the binary via -ldflags so the agent can
# compare its version against the server and auto-update when needed.
FROM --platform=$BUILDPLATFORM golang:1.26.1 AS agent-build
ARG APP_VERSION=dev
WORKDIR /agent
COPY agent/go.mod ./
RUN go mod download
COPY agent/ ./
# Build for x86_64 (amd64) — static binary, no glibc dependency
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
go build -ldflags "-X main.Version=${APP_VERSION}" \
-o /agent-binaries/databasus-agent-linux-amd64 ./cmd/main.go
# Build for ARM64 (arm64) — static binary, no glibc dependency
RUN CGO_ENABLED=0 GOOS=linux GOARCH=arm64 \
go build -ldflags "-X main.Version=${APP_VERSION}" \
-o /agent-binaries/databasus-agent-linux-arm64 ./cmd/main.go
# ========= RUNTIME =========
FROM debian:bookworm-slim
# Add version metadata to runtime image
ARG APP_VERSION=dev
ARG TARGETARCH
LABEL org.opencontainers.image.version=$APP_VERSION
ENV APP_VERSION=$APP_VERSION
ENV CONTAINER_ARCH=$TARGETARCH
# Set production mode for Docker containers
ENV ENV_MODE=production
@@ -218,6 +257,10 @@ COPY backend/migrations ./migrations
# Copy UI files
COPY --from=backend-build /app/ui/build ./ui/build
# Copy agent binaries (both architectures) — served by the backend
# at GET /api/v1/system/agent?arch=amd64|arm64
COPY --from=agent-build /agent-binaries ./agent-binaries
# Copy .env file (with fallback to .env.production.example)
COPY backend/.env* /app/
RUN if [ ! -f /app/.env ]; then \
@@ -268,10 +311,21 @@ window.__RUNTIME_CONFIG__ = {
IS_CLOUD: '\${IS_CLOUD:-false}',
GITHUB_CLIENT_ID: '\${GITHUB_CLIENT_ID:-}',
GOOGLE_CLIENT_ID: '\${GOOGLE_CLIENT_ID:-}',
IS_EMAIL_CONFIGURED: '\$IS_EMAIL_CONFIGURED'
IS_EMAIL_CONFIGURED: '\$IS_EMAIL_CONFIGURED',
CLOUDFLARE_TURNSTILE_SITE_KEY: '\${CLOUDFLARE_TURNSTILE_SITE_KEY:-}',
CONTAINER_ARCH: '\${CONTAINER_ARCH:-unknown}'
};
JSEOF
# Inject analytics script if provided (only if not already injected)
if [ -n "\${ANALYTICS_SCRIPT:-}" ]; then
if ! grep -q "rybbit.databasus.com" /app/ui/build/index.html 2>/dev/null; then
echo "Injecting analytics script..."
sed -i "s#</head># \${ANALYTICS_SCRIPT}\\
</head>#" /app/ui/build/index.html
fi
fi
# Ensure proper ownership of data directory
echo "Setting up data directory permissions..."
mkdir -p /databasus-data/pgdata
@@ -384,6 +438,8 @@ fi
# Create database and set password for postgres user
echo "Setting up database and user..."
gosu postgres \$PG_BIN/psql -p 5437 -h localhost -d postgres << 'SQL'
# We use stub password, because internal DB is not exposed outside container
ALTER USER postgres WITH PASSWORD 'Q1234567';
SELECT 'CREATE DATABASE databasus OWNER postgres'
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'databasus')
@@ -422,6 +478,8 @@ fi
exec ./main
EOF
LABEL org.opencontainers.image.source="https://github.com/databasus/databasus"
RUN chmod +x /app/start.sh
EXPOSE 4005
@@ -430,4 +488,4 @@ EXPOSE 4005
VOLUME ["/databasus-data"]
ENTRYPOINT ["/app/start.sh"]
CMD []
CMD []

View File

@@ -2,7 +2,7 @@
<img src="assets/logo.svg" alt="Databasus Logo" width="250"/>
<h3>Backup tool for PostgreSQL, MySQL and MongoDB</h3>
<p>Databasus is a free, open source and self-hosted tool to backup databases (with focus on PostgreSQL). Make backups with different storages (S3, Google Drive, FTP, etc.) and notifications about progress (Slack, Discord, Telegram, etc.). Previously known as Postgresus (see migration guide).</p>
<p>Databasus is a free, open source and self-hosted tool to backup databases (with focus on PostgreSQL). Make backups with different storages (S3, Google Drive, FTP, etc.) and notifications about progress (Slack, Discord, Telegram, etc.)</p>
<!-- Badges -->
[![PostgreSQL](https://img.shields.io/badge/PostgreSQL-336791?logo=postgresql&logoColor=white)](https://www.postgresql.org/)
@@ -11,7 +11,7 @@
[![MongoDB](https://img.shields.io/badge/MongoDB-47A248?logo=mongodb&logoColor=white)](https://www.mongodb.com/)
<br />
[![Apache 2.0 License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](LICENSE)
[![Docker Pulls](https://img.shields.io/docker/pulls/rostislavdugin/postgresus?color=brightgreen)](https://hub.docker.com/r/rostislavdugin/postgresus)
[![Docker Pulls](https://img.shields.io/docker/pulls/databasus/databasus?color=brightgreen)](https://hub.docker.com/r/databasus/databasus)
[![Platform](https://img.shields.io/badge/platform-linux%20%7C%20macos%20%7C%20windows-lightgrey)](https://github.com/databasus/databasus)
[![Self Hosted](https://img.shields.io/badge/self--hosted-yes-brightgreen)](https://github.com/databasus/databasus)
[![Open Source](https://img.shields.io/badge/open%20source-❤️-red)](https://github.com/databasus/databasus)
@@ -31,8 +31,6 @@
<img src="assets/dashboard-dark.svg" alt="Databasus Dark Dashboard" width="800" style="margin-bottom: 10px;"/>
<img src="assets/dashboard.svg" alt="Databasus Dashboard" width="800"/>
</div>
---
@@ -43,7 +41,7 @@
- **PostgreSQL**: 12, 13, 14, 15, 16, 17 and 18
- **MySQL**: 5.7, 8 and 9
- **MariaDB**: 10 and 11
- **MariaDB**: 10, 11 and 12
- **MongoDB**: 4, 5, 6, 7 and 8
### 🔄 **Scheduled backups**
@@ -52,6 +50,13 @@
- **Precise timing**: run backups at specific times (e.g., 4 AM during low traffic)
- **Smart compression**: 4-8x space savings with balanced compression (~20% overhead)
### 🗑️ **Retention policies**
- **Time period**: Keep backups for a fixed duration (e.g., 7 days, 3 months, 1 year)
- **Count**: Keep a fixed number of the most recent backups (e.g., last 30)
- **GFS (Grandfather-Father-Son)**: Layered retention — keep hourly, daily, weekly, monthly and yearly backups independently for fine-grained long-term history (enterprises requirement)
- **Size limits**: Set per-backup and total storage size caps to control storage usage
### 🗄️ **Multiple storage destinations** <a href="https://databasus.com/storages">(view supported)</a>
- **Local storage**: Keep backups on your VPS/server
@@ -71,6 +76,8 @@
- **Encryption for secrets**: Any sensitive data is encrypted and never exposed, even in logs or error messages
- **Read-only user**: Databasus uses a read-only user by default for backups and never stores anything that can modify your data
It is also important for Databasus that you are able to decrypt and restore backups from storages (local, S3, etc.) without Databasus itself. To do so, read our guide on [how to recover directly from storage](https://databasus.com/how-to-recover-without-databasus). We avoid "vendor lock-in" even to open source tool!
### 👥 **Suitable for teams** <a href="https://databasus.com/access-management">(docs)</a>
- **Workspaces**: Group databases, notifiers and storages for different projects or teams
@@ -220,8 +227,9 @@ For more options (NodePort, TLS, HTTPRoute for Gateway API), see the [Helm chart
3. **Configure schedule**: Choose from hourly, daily, weekly, monthly or cron intervals
4. **Set database connection**: Enter your database credentials and connection details
5. **Choose storage**: Select where to store your backups (local, S3, Google Drive, etc.)
6. **Add notifications** (optional): Configure email, Telegram, Slack, or webhook notifications
7. **Save and start**: Databasus will validate settings and begin the backup schedule
6. **Configure retention policy**: Choose time period, count or GFS to control how long backups are kept
7. **Add notifications** (optional): Configure email, Telegram, Slack, or webhook notifications
8. **Save and start**: Databasus will validate settings and begin the backup schedule
### 🔑 Resetting password <a href="https://databasus.com/password">(docs)</a>
@@ -233,66 +241,37 @@ docker exec -it databasus ./main --new-password="YourNewSecurePassword123" --ema
Replace `admin` with the actual email address of the user whose password you want to reset.
### 💾 Backuping Databasus itself
After installation, it is also recommended to <a href="https://databasus.com/faq/#backup-databasus">backup your Databasus itself</a> or, at least, to copy secret key used for encryption (30 seconds is needed). So you are able to restore from your encrypted backups if you lose access to the server with Databasus or it is corrupted.
---
## 📝 License
This project is licensed under the Apache 2.0 License - see the [LICENSE](LICENSE) file for details
---
## 🤝 Contributing
Contributions are welcome! Read the <a href="https://databasus.com/contribute">contributing guide</a> for more details, priorities and rules. If you want to contribute but don't know where to start, message me on Telegram [@rostislav_dugin](https://t.me/rostislav_dugin)
Also you can join our large community of developers, DBAs and DevOps engineers on Telegram [@databasus_community](https://t.me/databasus_community).
--
## 📖 Migration guide
Databasus is the new name for Postgresus. You can stay with latest version of Postgresus if you wish. If you want to migrate - follow installation steps for Databasus itself.
Just renaming an image is not enough as Postgresus and Databasus use different data folders and internal database naming.
You can put a new Databasus image with updated volume near the old Postgresus and run it (stop Postgresus before):
```
services:
databasus:
container_name: databasus
image: databasus/databasus:latest
ports:
- "4005:4005"
volumes:
- ./databasus-data:/databasus-data
restart: unless-stopped
```
Then manually move databases from Postgresus to Databasus.
### Why was Postgresus renamed to Databasus?
Databasus has been developed since 2023. It was internal tool to backup production and home projects databases. In start of 2025 it was released as open source project on GitHub. By the end of 2025 it became popular and the time for renaming has come in December 2025.
It was an important step for the project to grow. Actually, there are a couple of reasons:
1. Postgresus is no longer a little tool that just adds UI for pg_dump for little projects. It became a tool both for individual users, DevOps, DBAs, teams, companies and even large enterprises. Tens of thousands of users use Postgresus every day. Postgresus grew into a reliable backup management tool. Initial positioning is no longer suitable: the project is not just a UI wrapper, it's a solid backup management system now (despite it's still easy to use).
2. New databases are supported: although the primary focus is PostgreSQL (with 100% support in the most efficient way) and always will be, Databasus added support for MySQL, MariaDB and MongoDB. Later more databases will be supported.
3. Trademark issue: "postgres" is a trademark of PostgreSQL Inc. and cannot be used in the project name. So for safety and legal reasons, we had to rename the project.
## AI disclaimer
There have been questions about AI usage in project development in issues and discussions. As the project focuses on security, reliability and production usage, it's important to explain how AI is used in the development process.
First of all, we are proud to say that Databasus has been accepted into both [Claude for Open Source](https://claude.com/contact-sales/claude-for-oss) by Anthropic and [Codex for Open Source](https://developers.openai.com/codex/community/codex-for-oss/) by OpenAI in March 2026. For us it is one more signal that the project was recognized as important open-source software and was as critical infrastructure worth supporting independently by two of the world's leading AI companies. Read more at [databasus.com/faq](https://databasus.com/faq#oss-programs).
Despite of this, we have the following rules how AI is used in the development process:
AI is used as a helper for:
- verification of code quality and searching for vulnerabilities
- cleaning up and improving documentation, comments and code
- assistance during development
- double-checking PRs and commits after human review
- additional security analysis of PRs via Codex Security
AI is not used for:

1
agent/.env.example Normal file
View File

@@ -0,0 +1 @@
ENV_MODE=development

24
agent/.gitignore vendored Normal file
View File

@@ -0,0 +1,24 @@
main
.env
docker-compose.yml
!e2e/docker-compose.yml
pgdata
pgdata_test/
mysqldata/
mariadbdata/
main.exe
swagger/
swagger/*
swagger/docs.go
swagger/swagger.json
swagger/swagger.yaml
postgresus-backend.exe
databasus-backend.exe
ui/build/*
pgdata-for-restore/
temp/
cmd.exe
temp/
valkey-data/
victoria-logs-data/
databasus.json

41
agent/.golangci.yml Normal file
View File

@@ -0,0 +1,41 @@
version: "2"
run:
timeout: 5m
tests: false
concurrency: 4
linters:
default: standard
enable:
- funcorder
- bodyclose
- errorlint
- gocritic
- unconvert
- misspell
- errname
- noctx
- modernize
settings:
errcheck:
check-type-assertions: true
formatters:
enable:
- gofumpt
- golines
- gci
settings:
golines:
max-len: 120
gofumpt:
module-path: databasus-agent
extra-rules: true
gci:
sections:
- standard
- default
- localmodule

26
agent/Makefile Normal file
View File

@@ -0,0 +1,26 @@
.PHONY: run build test lint e2e e2e-clean
# Usage: make run ARGS="start --pg-host localhost"
run:
go run cmd/main.go $(ARGS)
build:
CGO_ENABLED=0 go build -ldflags "-X main.Version=$(VERSION)" -o databasus-agent ./cmd/main.go
test:
go test -count=1 -failfast ./internal/...
lint:
golangci-lint fmt ./cmd/... ./internal/... ./e2e/... && golangci-lint run ./cmd/... ./internal/... ./e2e/...
e2e:
cd e2e && docker compose build
cd e2e && docker compose run --rm e2e-agent-builder
cd e2e && docker compose up -d e2e-postgres e2e-mock-server
cd e2e && docker compose run --rm e2e-agent-runner
cd e2e && docker compose run --rm e2e-agent-docker
cd e2e && docker compose down -v
e2e-clean:
cd e2e && docker compose down -v --rmi local
rm -rf e2e/artifacts

170
agent/cmd/main.go Normal file
View File

@@ -0,0 +1,170 @@
package main
import (
"flag"
"fmt"
"log/slog"
"os"
"path/filepath"
"strings"
"databasus-agent/internal/config"
"databasus-agent/internal/features/start"
"databasus-agent/internal/features/upgrade"
"databasus-agent/internal/logger"
)
var Version = "dev"
func main() {
if len(os.Args) < 2 {
printUsage()
os.Exit(1)
}
switch os.Args[1] {
case "start":
runStart(os.Args[2:])
case "stop":
runStop()
case "status":
runStatus()
case "restore":
runRestore(os.Args[2:])
case "version":
fmt.Println(Version)
default:
fmt.Fprintf(os.Stderr, "unknown command: %s\n", os.Args[1])
printUsage()
os.Exit(1)
}
}
func runStart(args []string) {
fs := flag.NewFlagSet("start", flag.ExitOnError)
isDebug := fs.Bool("debug", false, "Enable debug logging")
isSkipUpdate := fs.Bool("skip-update", false, "Skip auto-update check")
cfg := &config.Config{}
cfg.LoadFromJSONAndArgs(fs, args)
if err := cfg.SaveToJSON(); err != nil {
fmt.Fprintf(os.Stderr, "Failed to save config: %v\n", err)
}
logger.Init(*isDebug)
log := logger.GetLogger()
isDev := checkIsDevelopment()
runUpdateCheck(cfg.DatabasusHost, *isSkipUpdate, isDev, log)
if err := start.Run(cfg, log); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}
func runStop() {
logger.Init(false)
logger.GetLogger().Info("stop: stub — not yet implemented")
}
func runStatus() {
logger.Init(false)
logger.GetLogger().Info("status: stub — not yet implemented")
}
func runRestore(args []string) {
fs := flag.NewFlagSet("restore", flag.ExitOnError)
targetDir := fs.String("target-dir", "", "Target pgdata directory")
backupID := fs.String("backup-id", "", "Full backup UUID (optional)")
targetTime := fs.String("target-time", "", "PITR target time in RFC3339 (optional)")
isYes := fs.Bool("yes", false, "Skip confirmation prompt")
isDebug := fs.Bool("debug", false, "Enable debug logging")
isSkipUpdate := fs.Bool("skip-update", false, "Skip auto-update check")
cfg := &config.Config{}
cfg.LoadFromJSONAndArgs(fs, args)
if err := cfg.SaveToJSON(); err != nil {
fmt.Fprintf(os.Stderr, "Failed to save config: %v\n", err)
}
logger.Init(*isDebug)
log := logger.GetLogger()
isDev := checkIsDevelopment()
runUpdateCheck(cfg.DatabasusHost, *isSkipUpdate, isDev, log)
log.Info("restore: stub — not yet implemented",
"targetDir", *targetDir,
"backupId", *backupID,
"targetTime", *targetTime,
"yes", *isYes,
)
}
func printUsage() {
fmt.Fprintln(os.Stderr, "Usage: databasus-agent <command> [flags]")
fmt.Fprintln(os.Stderr, "")
fmt.Fprintln(os.Stderr, "Commands:")
fmt.Fprintln(os.Stderr, " start Start the agent (WAL archiving + basebackups)")
fmt.Fprintln(os.Stderr, " stop Stop a running agent")
fmt.Fprintln(os.Stderr, " status Show agent status")
fmt.Fprintln(os.Stderr, " restore Restore a database from backup")
fmt.Fprintln(os.Stderr, " version Print agent version")
}
func runUpdateCheck(host string, isSkipUpdate, isDev bool, log *slog.Logger) {
if isSkipUpdate {
return
}
if host == "" {
return
}
if err := upgrade.CheckAndUpdate(host, Version, isDev, log); err != nil {
log.Error("Auto-update failed", "error", err)
os.Exit(1)
}
}
func checkIsDevelopment() bool {
dir, err := os.Getwd()
if err != nil {
return false
}
for range 3 {
if data, err := os.ReadFile(filepath.Join(dir, ".env")); err == nil {
return parseEnvMode(data)
}
if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil {
return false
}
dir = filepath.Dir(dir)
}
return false
}
func parseEnvMode(data []byte) bool {
for line := range strings.SplitSeq(string(data), "\n") {
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
parts := strings.SplitN(line, "=", 2)
if len(parts) == 2 && strings.TrimSpace(parts[0]) == "ENV_MODE" {
return strings.TrimSpace(parts[1]) == "development"
}
}
return false
}

1
agent/e2e/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
artifacts/

View File

@@ -0,0 +1,13 @@
# Builds agent binaries with different versions so
# we can test upgrade behavior (v1 -> v2)
FROM golang:1.26.1-alpine AS build
WORKDIR /src
COPY go.mod go.sum ./
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 go build -ldflags "-X main.Version=v1.0.0" -o /out/agent-v1 ./cmd/main.go
RUN CGO_ENABLED=0 go build -ldflags "-X main.Version=v2.0.0" -o /out/agent-v2 ./cmd/main.go
FROM alpine:3.21
COPY --from=build /out/ /out/
CMD ["cp", "-v", "/out/agent-v1", "/out/agent-v2", "/artifacts/"]

View File

@@ -0,0 +1,8 @@
# Runs pg_basebackup-via-docker-exec test (test 5) which tests
# that the agent can connect to Postgres inside Docker container
FROM docker:27-cli
RUN apk add --no-cache bash curl
WORKDIR /tmp
ENTRYPOINT []

View File

@@ -0,0 +1,14 @@
# Runs upgrade and host-mode pg_basebackup tests (tests 1-4). Needs
# Postgres client tools to be installed inside the system
FROM debian:bookworm-slim
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates curl gnupg2 postgresql-common && \
/usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y && \
apt-get install -y --no-install-recommends \
postgresql-client-17 && \
rm -rf /var/lib/apt/lists/*
WORKDIR /tmp
ENTRYPOINT []

View File

@@ -0,0 +1,10 @@
# Mock databasus API server for version checks and binary downloads. Just
# serves static responses and files from the `artifacts` directory.
FROM golang:1.26.1-alpine AS build
WORKDIR /app
COPY mock-server/main.go .
RUN CGO_ENABLED=0 go build -o mock-server main.go
FROM alpine:3.21
COPY --from=build /app/mock-server /usr/local/bin/mock-server
ENTRYPOINT ["mock-server"]

View File

@@ -0,0 +1,64 @@
services:
e2e-agent-builder:
build:
context: ..
dockerfile: e2e/Dockerfile.agent-builder
volumes:
- ./artifacts:/artifacts
container_name: e2e-agent-builder
e2e-postgres:
image: postgres:17
environment:
POSTGRES_DB: testdb
POSTGRES_USER: testuser
POSTGRES_PASSWORD: testpassword
container_name: e2e-agent-postgres
command: postgres -c wal_level=replica -c max_wal_senders=3
healthcheck:
test: ["CMD-SHELL", "pg_isready -U testuser -d testdb"]
interval: 2s
timeout: 5s
retries: 30
e2e-mock-server:
build:
context: .
dockerfile: Dockerfile.mock-server
volumes:
- ./artifacts:/artifacts:ro
container_name: e2e-mock-server
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:4050/health"]
interval: 2s
timeout: 5s
retries: 10
e2e-agent-runner:
build:
context: .
dockerfile: Dockerfile.agent-runner
volumes:
- ./artifacts:/opt/agent/artifacts:ro
- ./scripts:/opt/agent/scripts:ro
depends_on:
e2e-postgres:
condition: service_healthy
e2e-mock-server:
condition: service_healthy
container_name: e2e-agent-runner
command: ["bash", "/opt/agent/scripts/run-all.sh", "host"]
e2e-agent-docker:
build:
context: .
dockerfile: Dockerfile.agent-docker
volumes:
- ./artifacts:/opt/agent/artifacts:ro
- ./scripts:/opt/agent/scripts:ro
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
e2e-postgres:
condition: service_healthy
container_name: e2e-agent-docker
command: ["bash", "/opt/agent/scripts/run-all.sh", "docker"]

View File

@@ -0,0 +1,84 @@
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"sync"
)
type server struct {
mu sync.RWMutex
version string
binaryPath string
}
func main() {
version := "v2.0.0"
binaryPath := "/artifacts/agent-v2"
port := "4050"
s := &server{version: version, binaryPath: binaryPath}
http.HandleFunc("/api/v1/system/version", s.handleVersion)
http.HandleFunc("/api/v1/system/agent", s.handleAgentDownload)
http.HandleFunc("/mock/set-version", s.handleSetVersion)
http.HandleFunc("/health", s.handleHealth)
addr := ":" + port
log.Printf("Mock server starting on %s (version=%s, binary=%s)", addr, version, binaryPath)
if err := http.ListenAndServe(addr, nil); err != nil {
log.Fatalf("Server failed: %v", err)
}
}
func (s *server) handleVersion(w http.ResponseWriter, r *http.Request) {
s.mu.RLock()
v := s.version
s.mu.RUnlock()
log.Printf("GET /api/v1/system/version -> %s", v)
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(map[string]string{"version": v})
}
func (s *server) handleAgentDownload(w http.ResponseWriter, r *http.Request) {
s.mu.RLock()
path := s.binaryPath
s.mu.RUnlock()
log.Printf("GET /api/v1/system/agent (arch=%s) -> serving %s", r.URL.Query().Get("arch"), path)
http.ServeFile(w, r, path)
}
func (s *server) handleSetVersion(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "POST only", http.StatusMethodNotAllowed)
return
}
var body struct {
Version string `json:"version"`
}
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
s.mu.Lock()
s.version = body.Version
s.mu.Unlock()
log.Printf("POST /mock/set-version -> %s", body.Version)
_, _ = fmt.Fprintf(w, "version set to %s", body.Version)
}
func (s *server) handleHealth(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("ok"))
}

View File

@@ -0,0 +1,48 @@
#!/bin/bash
set -euo pipefail
MODE="${1:-host}"
SCRIPT_DIR="$(dirname "$0")"
PASSED=0
FAILED=0
run_test() {
local name="$1"
local script="$2"
echo ""
echo "========================================"
echo " $name"
echo "========================================"
if bash "$script"; then
echo " PASSED: $name"
PASSED=$((PASSED + 1))
else
echo " FAILED: $name"
FAILED=$((FAILED + 1))
fi
}
if [ "$MODE" = "host" ]; then
run_test "Test 1: Upgrade success (v1 -> v2)" "$SCRIPT_DIR/test-upgrade-success.sh"
run_test "Test 2: Upgrade skip (version matches)" "$SCRIPT_DIR/test-upgrade-skip.sh"
run_test "Test 3: pg_basebackup in PATH" "$SCRIPT_DIR/test-pg-host-path.sh"
run_test "Test 4: pg_basebackup via bindir" "$SCRIPT_DIR/test-pg-host-bindir.sh"
elif [ "$MODE" = "docker" ]; then
run_test "Test 5: pg_basebackup via docker exec" "$SCRIPT_DIR/test-pg-docker-exec.sh"
else
echo "Unknown mode: $MODE (expected 'host' or 'docker')"
exit 1
fi
echo ""
echo "========================================"
echo " Results: $PASSED passed, $FAILED failed"
echo "========================================"
if [ "$FAILED" -gt 0 ]; then
exit 1
fi

View File

@@ -0,0 +1,51 @@
#!/bin/bash
set -euo pipefail
ARTIFACTS="/opt/agent/artifacts"
AGENT="/tmp/test-agent"
PG_CONTAINER="e2e-agent-postgres"
# Copy agent binary
cp "$ARTIFACTS/agent-v1" "$AGENT"
chmod +x "$AGENT"
# Verify docker CLI works and PG container is accessible
if ! docker exec "$PG_CONTAINER" pg_basebackup --version > /dev/null 2>&1; then
echo "FAIL: Cannot reach pg_basebackup inside container $PG_CONTAINER (test setup issue)"
exit 1
fi
# Run start with --skip-update and pg-type=docker
echo "Running agent start (pg_basebackup via docker exec)..."
OUTPUT=$("$AGENT" start \
--skip-update \
--databasus-host http://e2e-mock-server:4050 \
--db-id test-db-id \
--token test-token \
--pg-host e2e-postgres \
--pg-port 5432 \
--pg-user testuser \
--pg-password testpassword \
--wal-dir /tmp/wal \
--pg-type docker \
--pg-docker-container-name "$PG_CONTAINER" 2>&1)
EXIT_CODE=$?
echo "$OUTPUT"
if [ "$EXIT_CODE" -ne 0 ]; then
echo "FAIL: Agent exited with code $EXIT_CODE"
exit 1
fi
if ! echo "$OUTPUT" | grep -q "pg_basebackup verified (docker)"; then
echo "FAIL: Expected output to contain 'pg_basebackup verified (docker)'"
exit 1
fi
if ! echo "$OUTPUT" | grep -q "PostgreSQL connection verified"; then
echo "FAIL: Expected output to contain 'PostgreSQL connection verified'"
exit 1
fi
echo "pg_basebackup found via docker exec and DB connection verified"

View File

@@ -0,0 +1,57 @@
#!/bin/bash
set -euo pipefail
ARTIFACTS="/opt/agent/artifacts"
AGENT="/tmp/test-agent"
CUSTOM_BIN_DIR="/opt/pg/bin"
# Copy agent binary
cp "$ARTIFACTS/agent-v1" "$AGENT"
chmod +x "$AGENT"
# Move pg_basebackup out of PATH into custom directory
mkdir -p "$CUSTOM_BIN_DIR"
cp "$(which pg_basebackup)" "$CUSTOM_BIN_DIR/pg_basebackup"
# Hide the system one by prepending an empty dir to PATH
export PATH="/opt/empty-path:$PATH"
mkdir -p /opt/empty-path
# Verify pg_basebackup is NOT directly callable from default location
# (we copied it, but the original is still there in debian — so we test
# that the agent uses the custom dir, not PATH, by checking the output)
# Run start with --skip-update and custom bin dir
echo "Running agent start (pg_basebackup via --pg-host-bin-dir)..."
OUTPUT=$("$AGENT" start \
--skip-update \
--databasus-host http://e2e-mock-server:4050 \
--db-id test-db-id \
--token test-token \
--pg-host e2e-postgres \
--pg-port 5432 \
--pg-user testuser \
--pg-password testpassword \
--wal-dir /tmp/wal \
--pg-type host \
--pg-host-bin-dir "$CUSTOM_BIN_DIR" 2>&1)
EXIT_CODE=$?
echo "$OUTPUT"
if [ "$EXIT_CODE" -ne 0 ]; then
echo "FAIL: Agent exited with code $EXIT_CODE"
exit 1
fi
if ! echo "$OUTPUT" | grep -q "pg_basebackup verified"; then
echo "FAIL: Expected output to contain 'pg_basebackup verified'"
exit 1
fi
if ! echo "$OUTPUT" | grep -q "PostgreSQL connection verified"; then
echo "FAIL: Expected output to contain 'PostgreSQL connection verified'"
exit 1
fi
echo "pg_basebackup found via custom bin dir and DB connection verified"

View File

@@ -0,0 +1,49 @@
#!/bin/bash
set -euo pipefail
ARTIFACTS="/opt/agent/artifacts"
AGENT="/tmp/test-agent"
# Copy agent binary
cp "$ARTIFACTS/agent-v1" "$AGENT"
chmod +x "$AGENT"
# Verify pg_basebackup is in PATH
if ! which pg_basebackup > /dev/null 2>&1; then
echo "FAIL: pg_basebackup not found in PATH (test setup issue)"
exit 1
fi
# Run start with --skip-update and pg-type=host
echo "Running agent start (pg_basebackup in PATH)..."
OUTPUT=$("$AGENT" start \
--skip-update \
--databasus-host http://e2e-mock-server:4050 \
--db-id test-db-id \
--token test-token \
--pg-host e2e-postgres \
--pg-port 5432 \
--pg-user testuser \
--pg-password testpassword \
--wal-dir /tmp/wal \
--pg-type host 2>&1)
EXIT_CODE=$?
echo "$OUTPUT"
if [ "$EXIT_CODE" -ne 0 ]; then
echo "FAIL: Agent exited with code $EXIT_CODE"
exit 1
fi
if ! echo "$OUTPUT" | grep -q "pg_basebackup verified"; then
echo "FAIL: Expected output to contain 'pg_basebackup verified'"
exit 1
fi
if ! echo "$OUTPUT" | grep -q "PostgreSQL connection verified"; then
echo "FAIL: Expected output to contain 'PostgreSQL connection verified'"
exit 1
fi
echo "pg_basebackup found in PATH and DB connection verified"

View File

@@ -0,0 +1,51 @@
#!/bin/bash
set -euo pipefail
ARTIFACTS="/opt/agent/artifacts"
AGENT="/tmp/test-agent"
# Set mock server to return v1.0.0 (same as agent)
curl -sf -X POST http://e2e-mock-server:4050/mock/set-version \
-H "Content-Type: application/json" \
-d '{"version":"v1.0.0"}'
# Copy v1 binary to writable location
cp "$ARTIFACTS/agent-v1" "$AGENT"
chmod +x "$AGENT"
# Verify initial version
VERSION=$("$AGENT" version)
if [ "$VERSION" != "v1.0.0" ]; then
echo "FAIL: Expected initial version v1.0.0, got $VERSION"
exit 1
fi
# Run start — agent should see version matches and skip upgrade
echo "Running agent start (expecting upgrade skip)..."
OUTPUT=$("$AGENT" start \
--databasus-host http://e2e-mock-server:4050 \
--db-id test-db-id \
--token test-token \
--pg-host e2e-postgres \
--pg-port 5432 \
--pg-user testuser \
--pg-password testpassword \
--wal-dir /tmp/wal \
--pg-type host 2>&1) || true
echo "$OUTPUT"
# Verify output contains "up to date"
if ! echo "$OUTPUT" | grep -qi "up to date"; then
echo "FAIL: Expected output to contain 'up to date'"
exit 1
fi
# Verify binary is still v1
VERSION=$("$AGENT" version)
if [ "$VERSION" != "v1.0.0" ]; then
echo "FAIL: Expected version v1.0.0 (unchanged), got $VERSION"
exit 1
fi
echo "Upgrade correctly skipped, version still $VERSION"

View File

@@ -0,0 +1,52 @@
#!/bin/bash
set -euo pipefail
ARTIFACTS="/opt/agent/artifacts"
AGENT="/tmp/test-agent"
# Ensure mock server returns v2.0.0
curl -sf -X POST http://e2e-mock-server:4050/mock/set-version \
-H "Content-Type: application/json" \
-d '{"version":"v2.0.0"}'
# Copy v1 binary to writable location
cp "$ARTIFACTS/agent-v1" "$AGENT"
chmod +x "$AGENT"
# Verify initial version
VERSION=$("$AGENT" version)
if [ "$VERSION" != "v1.0.0" ]; then
echo "FAIL: Expected initial version v1.0.0, got $VERSION"
exit 1
fi
echo "Initial version: $VERSION"
# Run start — agent will:
# 1. Fetch version from mock (v2.0.0 != v1.0.0)
# 2. Download v2 binary from mock
# 3. Replace itself on disk
# 4. Re-exec with same args
# 5. Re-exec'd v2 fetches version (v2.0.0 == v2.0.0) → skips update
# 6. Proceeds to start → verifies pg_basebackup + DB → exits 0 (stub)
echo "Running agent start (expecting upgrade v1 -> v2)..."
OUTPUT=$("$AGENT" start \
--databasus-host http://e2e-mock-server:4050 \
--db-id test-db-id \
--token test-token \
--pg-host e2e-postgres \
--pg-port 5432 \
--pg-user testuser \
--pg-password testpassword \
--wal-dir /tmp/wal \
--pg-type host 2>&1) || true
echo "$OUTPUT"
# Verify binary on disk is now v2
VERSION=$("$AGENT" version)
if [ "$VERSION" != "v2.0.0" ]; then
echo "FAIL: Expected upgraded version v2.0.0, got $VERSION"
exit 1
fi
echo "Binary upgraded successfully to $VERSION"

19
agent/go.mod Normal file
View File

@@ -0,0 +1,19 @@
module databasus-agent
go 1.26.1
require (
github.com/jackc/pgx/v5 v5.8.0
github.com/stretchr/testify v1.11.1
)
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
golang.org/x/text v0.29.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

35
agent/go.sum Normal file
View File

@@ -0,0 +1,35 @@
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo=
github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -0,0 +1,267 @@
package config
import (
"encoding/json"
"flag"
"fmt"
"os"
"databasus-agent/internal/logger"
)
var log = logger.GetLogger()
const configFileName = "databasus.json"
type Config struct {
DatabasusHost string `json:"databasusHost"`
DbID string `json:"dbId"`
Token string `json:"token"`
PgHost string `json:"pgHost"`
PgPort int `json:"pgPort"`
PgUser string `json:"pgUser"`
PgPassword string `json:"pgPassword"`
PgType string `json:"pgType"`
PgHostBinDir string `json:"pgHostBinDir"`
PgDockerContainerName string `json:"pgDockerContainerName"`
WalDir string `json:"walDir"`
IsDeleteWalAfterUpload *bool `json:"deleteWalAfterUpload"`
flags parsedFlags
}
// LoadFromJSONAndArgs reads databasus.json into the struct
// and overrides JSON values with any explicitly provided CLI flags.
func (c *Config) LoadFromJSONAndArgs(fs *flag.FlagSet, args []string) {
c.loadFromJSON()
c.applyDefaults()
c.initSources()
c.flags.databasusHost = fs.String(
"databasus-host",
"",
"Databasus server URL (e.g. http://your-server:4005)",
)
c.flags.dbID = fs.String("db-id", "", "Database ID")
c.flags.token = fs.String("token", "", "Agent token")
c.flags.pgHost = fs.String("pg-host", "", "PostgreSQL host")
c.flags.pgPort = fs.Int("pg-port", 0, "PostgreSQL port")
c.flags.pgUser = fs.String("pg-user", "", "PostgreSQL user")
c.flags.pgPassword = fs.String("pg-password", "", "PostgreSQL password")
c.flags.pgType = fs.String("pg-type", "", "PostgreSQL type: host or docker")
c.flags.pgHostBinDir = fs.String("pg-host-bin-dir", "", "Path to PG bin directory (host mode)")
c.flags.pgDockerContainerName = fs.String("pg-docker-container-name", "", "Docker container name (docker mode)")
c.flags.walDir = fs.String("wal-dir", "", "Path to WAL queue directory")
if err := fs.Parse(args); err != nil {
os.Exit(1)
}
c.applyFlags()
log.Info("========= Loading config ============")
c.logConfigSources()
log.Info("========= Config has been loaded ====")
}
// SaveToJSON writes the current struct to databasus.json.
func (c *Config) SaveToJSON() error {
data, err := json.MarshalIndent(c, "", " ")
if err != nil {
return err
}
return os.WriteFile(configFileName, data, 0o644)
}
func (c *Config) loadFromJSON() {
data, err := os.ReadFile(configFileName)
if err != nil {
if os.IsNotExist(err) {
log.Info("No databasus.json found, will create on save")
return
}
log.Warn("Failed to read databasus.json", "error", err)
return
}
if err := json.Unmarshal(data, c); err != nil {
log.Warn("Failed to parse databasus.json", "error", err)
return
}
log.Info("Configuration loaded from " + configFileName)
}
func (c *Config) applyDefaults() {
if c.PgPort == 0 {
c.PgPort = 5432
}
if c.PgType == "" {
c.PgType = "host"
}
if c.IsDeleteWalAfterUpload == nil {
v := true
c.IsDeleteWalAfterUpload = &v
}
}
func (c *Config) initSources() {
c.flags.sources = map[string]string{
"databasus-host": "not configured",
"db-id": "not configured",
"token": "not configured",
"pg-host": "not configured",
"pg-port": "not configured",
"pg-user": "not configured",
"pg-password": "not configured",
"pg-type": "not configured",
"pg-host-bin-dir": "not configured",
"pg-docker-container-name": "not configured",
"wal-dir": "not configured",
"delete-wal-after-upload": "not configured",
}
if c.DatabasusHost != "" {
c.flags.sources["databasus-host"] = configFileName
}
if c.DbID != "" {
c.flags.sources["db-id"] = configFileName
}
if c.Token != "" {
c.flags.sources["token"] = configFileName
}
if c.PgHost != "" {
c.flags.sources["pg-host"] = configFileName
}
// PgPort always has a value after applyDefaults
c.flags.sources["pg-port"] = configFileName
if c.PgUser != "" {
c.flags.sources["pg-user"] = configFileName
}
if c.PgPassword != "" {
c.flags.sources["pg-password"] = configFileName
}
// PgType always has a value after applyDefaults
c.flags.sources["pg-type"] = configFileName
if c.PgHostBinDir != "" {
c.flags.sources["pg-host-bin-dir"] = configFileName
}
if c.PgDockerContainerName != "" {
c.flags.sources["pg-docker-container-name"] = configFileName
}
if c.WalDir != "" {
c.flags.sources["wal-dir"] = configFileName
}
// IsDeleteWalAfterUpload always has a value after applyDefaults
c.flags.sources["delete-wal-after-upload"] = configFileName
}
func (c *Config) applyFlags() {
if c.flags.databasusHost != nil && *c.flags.databasusHost != "" {
c.DatabasusHost = *c.flags.databasusHost
c.flags.sources["databasus-host"] = "command line args"
}
if c.flags.dbID != nil && *c.flags.dbID != "" {
c.DbID = *c.flags.dbID
c.flags.sources["db-id"] = "command line args"
}
if c.flags.token != nil && *c.flags.token != "" {
c.Token = *c.flags.token
c.flags.sources["token"] = "command line args"
}
if c.flags.pgHost != nil && *c.flags.pgHost != "" {
c.PgHost = *c.flags.pgHost
c.flags.sources["pg-host"] = "command line args"
}
if c.flags.pgPort != nil && *c.flags.pgPort != 0 {
c.PgPort = *c.flags.pgPort
c.flags.sources["pg-port"] = "command line args"
}
if c.flags.pgUser != nil && *c.flags.pgUser != "" {
c.PgUser = *c.flags.pgUser
c.flags.sources["pg-user"] = "command line args"
}
if c.flags.pgPassword != nil && *c.flags.pgPassword != "" {
c.PgPassword = *c.flags.pgPassword
c.flags.sources["pg-password"] = "command line args"
}
if c.flags.pgType != nil && *c.flags.pgType != "" {
c.PgType = *c.flags.pgType
c.flags.sources["pg-type"] = "command line args"
}
if c.flags.pgHostBinDir != nil && *c.flags.pgHostBinDir != "" {
c.PgHostBinDir = *c.flags.pgHostBinDir
c.flags.sources["pg-host-bin-dir"] = "command line args"
}
if c.flags.pgDockerContainerName != nil && *c.flags.pgDockerContainerName != "" {
c.PgDockerContainerName = *c.flags.pgDockerContainerName
c.flags.sources["pg-docker-container-name"] = "command line args"
}
if c.flags.walDir != nil && *c.flags.walDir != "" {
c.WalDir = *c.flags.walDir
c.flags.sources["wal-dir"] = "command line args"
}
}
func (c *Config) logConfigSources() {
log.Info("databasus-host", "value", c.DatabasusHost, "source", c.flags.sources["databasus-host"])
log.Info("db-id", "value", c.DbID, "source", c.flags.sources["db-id"])
log.Info("token", "value", maskSensitive(c.Token), "source", c.flags.sources["token"])
log.Info("pg-host", "value", c.PgHost, "source", c.flags.sources["pg-host"])
log.Info("pg-port", "value", c.PgPort, "source", c.flags.sources["pg-port"])
log.Info("pg-user", "value", c.PgUser, "source", c.flags.sources["pg-user"])
log.Info("pg-password", "value", maskSensitive(c.PgPassword), "source", c.flags.sources["pg-password"])
log.Info("pg-type", "value", c.PgType, "source", c.flags.sources["pg-type"])
log.Info("pg-host-bin-dir", "value", c.PgHostBinDir, "source", c.flags.sources["pg-host-bin-dir"])
log.Info(
"pg-docker-container-name",
"value",
c.PgDockerContainerName,
"source",
c.flags.sources["pg-docker-container-name"],
)
log.Info("wal-dir", "value", c.WalDir, "source", c.flags.sources["wal-dir"])
log.Info(
"delete-wal-after-upload",
"value",
fmt.Sprintf("%v", *c.IsDeleteWalAfterUpload),
"source",
c.flags.sources["delete-wal-after-upload"],
)
}
func maskSensitive(value string) string {
if value == "" {
return "(not set)"
}
visibleLen := max(len(value)/4, 1)
return value[:visibleLen] + "***"
}

View File

@@ -0,0 +1,301 @@
package config
import (
"encoding/json"
"flag"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_LoadFromJSONAndArgs_ValuesLoadedFromJSON(t *testing.T) {
dir := setupTempDir(t)
writeConfigJSON(t, dir, Config{
DatabasusHost: "http://json-host:4005",
DbID: "json-db-id",
Token: "json-token",
})
cfg := &Config{}
fs := flag.NewFlagSet("test", flag.ContinueOnError)
cfg.LoadFromJSONAndArgs(fs, []string{})
assert.Equal(t, "http://json-host:4005", cfg.DatabasusHost)
assert.Equal(t, "json-db-id", cfg.DbID)
assert.Equal(t, "json-token", cfg.Token)
}
func Test_LoadFromJSONAndArgs_ValuesLoadedFromArgs_WhenNoJSON(t *testing.T) {
setupTempDir(t)
cfg := &Config{}
fs := flag.NewFlagSet("test", flag.ContinueOnError)
cfg.LoadFromJSONAndArgs(fs, []string{
"--databasus-host", "http://arg-host:4005",
"--db-id", "arg-db-id",
"--token", "arg-token",
})
assert.Equal(t, "http://arg-host:4005", cfg.DatabasusHost)
assert.Equal(t, "arg-db-id", cfg.DbID)
assert.Equal(t, "arg-token", cfg.Token)
}
func Test_LoadFromJSONAndArgs_ArgsOverrideJSON(t *testing.T) {
dir := setupTempDir(t)
writeConfigJSON(t, dir, Config{
DatabasusHost: "http://json-host:4005",
DbID: "json-db-id",
Token: "json-token",
})
cfg := &Config{}
fs := flag.NewFlagSet("test", flag.ContinueOnError)
cfg.LoadFromJSONAndArgs(fs, []string{
"--databasus-host", "http://arg-host:9999",
"--db-id", "arg-db-id-override",
"--token", "arg-token-override",
})
assert.Equal(t, "http://arg-host:9999", cfg.DatabasusHost)
assert.Equal(t, "arg-db-id-override", cfg.DbID)
assert.Equal(t, "arg-token-override", cfg.Token)
}
func Test_LoadFromJSONAndArgs_PartialArgsOverrideJSON(t *testing.T) {
dir := setupTempDir(t)
writeConfigJSON(t, dir, Config{
DatabasusHost: "http://json-host:4005",
DbID: "json-db-id",
Token: "json-token",
})
cfg := &Config{}
fs := flag.NewFlagSet("test", flag.ContinueOnError)
cfg.LoadFromJSONAndArgs(fs, []string{
"--databasus-host", "http://arg-host-only:4005",
})
assert.Equal(t, "http://arg-host-only:4005", cfg.DatabasusHost)
assert.Equal(t, "json-db-id", cfg.DbID)
assert.Equal(t, "json-token", cfg.Token)
}
func Test_SaveToJSON_ConfigSavedCorrectly(t *testing.T) {
setupTempDir(t)
deleteWal := true
cfg := &Config{
DatabasusHost: "http://save-host:4005",
DbID: "save-db-id",
Token: "save-token",
IsDeleteWalAfterUpload: &deleteWal,
}
err := cfg.SaveToJSON()
require.NoError(t, err)
saved := readConfigJSON(t)
assert.Equal(t, "http://save-host:4005", saved.DatabasusHost)
assert.Equal(t, "save-db-id", saved.DbID)
assert.Equal(t, "save-token", saved.Token)
}
func Test_SaveToJSON_AfterArgsOverrideJSON_SavedFileContainsMergedValues(t *testing.T) {
dir := setupTempDir(t)
writeConfigJSON(t, dir, Config{
DatabasusHost: "http://json-host:4005",
DbID: "json-db-id",
Token: "json-token",
})
cfg := &Config{}
fs := flag.NewFlagSet("test", flag.ContinueOnError)
cfg.LoadFromJSONAndArgs(fs, []string{
"--databasus-host", "http://override-host:9999",
})
err := cfg.SaveToJSON()
require.NoError(t, err)
saved := readConfigJSON(t)
assert.Equal(t, "http://override-host:9999", saved.DatabasusHost)
assert.Equal(t, "json-db-id", saved.DbID)
assert.Equal(t, "json-token", saved.Token)
}
func Test_LoadFromJSONAndArgs_PgFieldsLoadedFromJSON(t *testing.T) {
dir := setupTempDir(t)
deleteWal := false
writeConfigJSON(t, dir, Config{
DatabasusHost: "http://json-host:4005",
DbID: "json-db-id",
Token: "json-token",
PgHost: "pg-json-host",
PgPort: 5433,
PgUser: "pg-json-user",
PgPassword: "pg-json-pass",
PgType: "docker",
PgHostBinDir: "/usr/bin",
PgDockerContainerName: "pg-container",
WalDir: "/opt/wal",
IsDeleteWalAfterUpload: &deleteWal,
})
cfg := &Config{}
fs := flag.NewFlagSet("test", flag.ContinueOnError)
cfg.LoadFromJSONAndArgs(fs, []string{})
assert.Equal(t, "pg-json-host", cfg.PgHost)
assert.Equal(t, 5433, cfg.PgPort)
assert.Equal(t, "pg-json-user", cfg.PgUser)
assert.Equal(t, "pg-json-pass", cfg.PgPassword)
assert.Equal(t, "docker", cfg.PgType)
assert.Equal(t, "/usr/bin", cfg.PgHostBinDir)
assert.Equal(t, "pg-container", cfg.PgDockerContainerName)
assert.Equal(t, "/opt/wal", cfg.WalDir)
assert.Equal(t, false, *cfg.IsDeleteWalAfterUpload)
}
func Test_LoadFromJSONAndArgs_PgFieldsLoadedFromArgs(t *testing.T) {
setupTempDir(t)
cfg := &Config{}
fs := flag.NewFlagSet("test", flag.ContinueOnError)
cfg.LoadFromJSONAndArgs(fs, []string{
"--pg-host", "arg-pg-host",
"--pg-port", "5433",
"--pg-user", "arg-pg-user",
"--pg-password", "arg-pg-pass",
"--pg-type", "docker",
"--pg-host-bin-dir", "/custom/bin",
"--pg-docker-container-name", "my-pg",
"--wal-dir", "/var/wal",
})
assert.Equal(t, "arg-pg-host", cfg.PgHost)
assert.Equal(t, 5433, cfg.PgPort)
assert.Equal(t, "arg-pg-user", cfg.PgUser)
assert.Equal(t, "arg-pg-pass", cfg.PgPassword)
assert.Equal(t, "docker", cfg.PgType)
assert.Equal(t, "/custom/bin", cfg.PgHostBinDir)
assert.Equal(t, "my-pg", cfg.PgDockerContainerName)
assert.Equal(t, "/var/wal", cfg.WalDir)
}
func Test_LoadFromJSONAndArgs_PgArgsOverrideJSON(t *testing.T) {
dir := setupTempDir(t)
writeConfigJSON(t, dir, Config{
PgHost: "json-host",
PgPort: 5432,
PgUser: "json-user",
PgType: "host",
WalDir: "/json/wal",
})
cfg := &Config{}
fs := flag.NewFlagSet("test", flag.ContinueOnError)
cfg.LoadFromJSONAndArgs(fs, []string{
"--pg-host", "arg-host",
"--pg-port", "5433",
"--pg-user", "arg-user",
"--pg-type", "docker",
"--pg-docker-container-name", "my-container",
"--wal-dir", "/arg/wal",
})
assert.Equal(t, "arg-host", cfg.PgHost)
assert.Equal(t, 5433, cfg.PgPort)
assert.Equal(t, "arg-user", cfg.PgUser)
assert.Equal(t, "docker", cfg.PgType)
assert.Equal(t, "my-container", cfg.PgDockerContainerName)
assert.Equal(t, "/arg/wal", cfg.WalDir)
}
func Test_LoadFromJSONAndArgs_DefaultsApplied_WhenNoJSONAndNoArgs(t *testing.T) {
setupTempDir(t)
cfg := &Config{}
fs := flag.NewFlagSet("test", flag.ContinueOnError)
cfg.LoadFromJSONAndArgs(fs, []string{})
assert.Equal(t, 5432, cfg.PgPort)
assert.Equal(t, "host", cfg.PgType)
require.NotNil(t, cfg.IsDeleteWalAfterUpload)
assert.Equal(t, true, *cfg.IsDeleteWalAfterUpload)
}
func Test_SaveToJSON_PgFieldsSavedCorrectly(t *testing.T) {
setupTempDir(t)
deleteWal := false
cfg := &Config{
DatabasusHost: "http://host:4005",
DbID: "db-id",
Token: "token",
PgHost: "pg-host",
PgPort: 5433,
PgUser: "pg-user",
PgPassword: "pg-pass",
PgType: "docker",
PgHostBinDir: "/usr/bin",
PgDockerContainerName: "pg-container",
WalDir: "/opt/wal",
IsDeleteWalAfterUpload: &deleteWal,
}
err := cfg.SaveToJSON()
require.NoError(t, err)
saved := readConfigJSON(t)
assert.Equal(t, "pg-host", saved.PgHost)
assert.Equal(t, 5433, saved.PgPort)
assert.Equal(t, "pg-user", saved.PgUser)
assert.Equal(t, "pg-pass", saved.PgPassword)
assert.Equal(t, "docker", saved.PgType)
assert.Equal(t, "/usr/bin", saved.PgHostBinDir)
assert.Equal(t, "pg-container", saved.PgDockerContainerName)
assert.Equal(t, "/opt/wal", saved.WalDir)
require.NotNil(t, saved.IsDeleteWalAfterUpload)
assert.Equal(t, false, *saved.IsDeleteWalAfterUpload)
}
func setupTempDir(t *testing.T) string {
t.Helper()
origDir, err := os.Getwd()
require.NoError(t, err)
dir := t.TempDir()
require.NoError(t, os.Chdir(dir))
t.Cleanup(func() { os.Chdir(origDir) })
return dir
}
func writeConfigJSON(t *testing.T, dir string, cfg Config) {
t.Helper()
data, err := json.MarshalIndent(cfg, "", " ")
require.NoError(t, err)
require.NoError(t, os.WriteFile(dir+"/"+configFileName, data, 0o644))
}
func readConfigJSON(t *testing.T) Config {
t.Helper()
data, err := os.ReadFile(configFileName)
require.NoError(t, err)
var cfg Config
require.NoError(t, json.Unmarshal(data, &cfg))
return cfg
}

View File

@@ -0,0 +1,17 @@
package config
type parsedFlags struct {
databasusHost *string
dbID *string
token *string
pgHost *string
pgPort *int
pgUser *string
pgPassword *string
pgType *string
pgHostBinDir *string
pgDockerContainerName *string
walDir *string
sources map[string]string
}

View File

@@ -0,0 +1,179 @@
package start
import (
"context"
"errors"
"fmt"
"log/slog"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/jackc/pgx/v5"
"databasus-agent/internal/config"
)
const (
pgBasebackupVerifyTimeout = 10 * time.Second
dbVerifyTimeout = 10 * time.Second
)
func Run(cfg *config.Config, log *slog.Logger) error {
if err := validateConfig(cfg); err != nil {
return err
}
if err := verifyPgBasebackup(cfg, log); err != nil {
return err
}
if err := verifyDatabase(cfg, log); err != nil {
return err
}
log.Info("start: stub — not yet implemented",
"dbId", cfg.DbID,
"hasToken", cfg.Token != "",
)
return nil
}
func validateConfig(cfg *config.Config) error {
if cfg.DatabasusHost == "" {
return errors.New("argument databasus-host is required")
}
if cfg.DbID == "" {
return errors.New("argument db-id is required")
}
if cfg.Token == "" {
return errors.New("argument token is required")
}
if cfg.PgHost == "" {
return errors.New("argument pg-host is required")
}
if cfg.PgPort <= 0 {
return errors.New("argument pg-port must be a positive number")
}
if cfg.PgUser == "" {
return errors.New("argument pg-user is required")
}
if cfg.PgType != "host" && cfg.PgType != "docker" {
return fmt.Errorf("argument pg-type must be 'host' or 'docker', got '%s'", cfg.PgType)
}
if cfg.WalDir == "" {
return errors.New("argument wal-dir is required")
}
if cfg.PgType == "docker" && cfg.PgDockerContainerName == "" {
return errors.New("argument pg-docker-container-name is required when pg-type is 'docker'")
}
return nil
}
func verifyPgBasebackup(cfg *config.Config, log *slog.Logger) error {
switch cfg.PgType {
case "host":
return verifyPgBasebackupHost(cfg, log)
case "docker":
return verifyPgBasebackupDocker(cfg, log)
default:
return fmt.Errorf("unexpected pg-type: %s", cfg.PgType)
}
}
func verifyPgBasebackupHost(cfg *config.Config, log *slog.Logger) error {
binary := "pg_basebackup"
if cfg.PgHostBinDir != "" {
binary = filepath.Join(cfg.PgHostBinDir, "pg_basebackup")
}
ctx, cancel := context.WithTimeout(context.Background(), pgBasebackupVerifyTimeout)
defer cancel()
output, err := exec.CommandContext(ctx, binary, "--version").CombinedOutput()
if err != nil {
if cfg.PgHostBinDir != "" {
return fmt.Errorf(
"pg_basebackup not found at '%s': %w. Verify pg-host-bin-dir is correct",
binary, err,
)
}
return fmt.Errorf(
"pg_basebackup not found in PATH: %w. Install PostgreSQL client tools or set pg-host-bin-dir",
err,
)
}
log.Info("pg_basebackup verified", "version", strings.TrimSpace(string(output)))
return nil
}
func verifyPgBasebackupDocker(cfg *config.Config, log *slog.Logger) error {
ctx, cancel := context.WithTimeout(context.Background(), pgBasebackupVerifyTimeout)
defer cancel()
output, err := exec.CommandContext(ctx,
"docker", "exec", cfg.PgDockerContainerName,
"pg_basebackup", "--version",
).CombinedOutput()
if err != nil {
return fmt.Errorf(
"pg_basebackup not available in container '%s': %w. "+
"Check that the container is running and pg_basebackup is installed inside it",
cfg.PgDockerContainerName, err,
)
}
log.Info("pg_basebackup verified (docker)",
"container", cfg.PgDockerContainerName,
"version", strings.TrimSpace(string(output)),
)
return nil
}
func verifyDatabase(cfg *config.Config, log *slog.Logger) error {
connStr := fmt.Sprintf(
"host=%s port=%d user=%s password=%s dbname=postgres sslmode=disable",
cfg.PgHost, cfg.PgPort, cfg.PgUser, cfg.PgPassword,
)
ctx, cancel := context.WithTimeout(context.Background(), dbVerifyTimeout)
defer cancel()
conn, err := pgx.Connect(ctx, connStr)
if err != nil {
return fmt.Errorf(
"failed to connect to PostgreSQL at %s:%d as user '%s': %w",
cfg.PgHost, cfg.PgPort, cfg.PgUser, err,
)
}
defer func() { _ = conn.Close(ctx) }()
if err := conn.Ping(ctx); err != nil {
return fmt.Errorf("PostgreSQL ping failed at %s:%d: %w",
cfg.PgHost, cfg.PgPort, err,
)
}
log.Info("PostgreSQL connection verified",
"host", cfg.PgHost,
"port", cfg.PgPort,
"user", cfg.PgUser,
)
return nil
}

View File

@@ -0,0 +1,5 @@
package upgrade
type versionResponse struct {
Version string `json:"version"`
}

View File

@@ -0,0 +1,154 @@
package upgrade
import (
"context"
"encoding/json"
"fmt"
"io"
"log/slog"
"net/http"
"os"
"os/exec"
"runtime"
"strings"
"syscall"
"time"
)
func CheckAndUpdate(databasusHost, currentVersion string, isDev bool, log *slog.Logger) error {
if isDev {
log.Info("Skipping update check (development mode)")
return nil
}
serverVersion, err := fetchServerVersion(databasusHost, log)
if err != nil {
return fmt.Errorf(
"unable to check version, please verify Databasus server is available at %s: %w",
databasusHost,
err,
)
}
if serverVersion == currentVersion {
log.Info("Agent version is up to date", "version", currentVersion)
return nil
}
log.Info("Updating agent...", "current", currentVersion, "target", serverVersion)
selfPath, err := os.Executable()
if err != nil {
return fmt.Errorf("failed to determine executable path: %w", err)
}
tempPath := selfPath + ".update"
defer func() {
_ = os.Remove(tempPath)
}()
if err := downloadBinary(databasusHost, tempPath); err != nil {
return fmt.Errorf("failed to download update: %w", err)
}
if err := os.Chmod(tempPath, 0o755); err != nil {
return fmt.Errorf("failed to set permissions on update: %w", err)
}
if err := verifyBinary(tempPath, serverVersion); err != nil {
return fmt.Errorf("update verification failed: %w", err)
}
if err := os.Rename(tempPath, selfPath); err != nil {
return fmt.Errorf("failed to replace binary (try --skip-update if this persists): %w", err)
}
log.Info("Update complete, re-executing...")
return syscall.Exec(selfPath, os.Args, os.Environ())
}
func fetchServerVersion(host string, log *slog.Logger) (string, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
client := &http.Client{Timeout: 10 * time.Second}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, host+"/api/v1/system/version", nil)
if err != nil {
return "", err
}
resp, err := client.Do(req)
if err != nil {
log.Warn("Could not reach server for update check, continuing", "error", err)
return "", err
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusOK {
log.Warn(
"Server returned non-OK status for version check, continuing",
"status",
resp.StatusCode,
)
return "", fmt.Errorf("status %d", resp.StatusCode)
}
var ver versionResponse
if err := json.NewDecoder(resp.Body).Decode(&ver); err != nil {
log.Warn("Failed to parse server version response, continuing", "error", err)
return "", err
}
return ver.Version, nil
}
func downloadBinary(host, destPath string) error {
url := fmt.Sprintf("%s/api/v1/system/agent?arch=%s", host, runtime.GOARCH)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("server returned %d for agent download", resp.StatusCode)
}
f, err := os.Create(destPath)
if err != nil {
return err
}
defer func() { _ = f.Close() }()
_, err = io.Copy(f, resp.Body)
return err
}
func verifyBinary(binaryPath, expectedVersion string) error {
cmd := exec.CommandContext(context.Background(), binaryPath, "version")
output, err := cmd.Output()
if err != nil {
return fmt.Errorf("binary failed to execute: %w", err)
}
got := strings.TrimSpace(string(output))
if got != expectedVersion {
return fmt.Errorf("version mismatch: expected %q, got %q", expectedVersion, got)
}
return nil
}

View File

@@ -0,0 +1,45 @@
package logger
import (
"log/slog"
"os"
"sync"
"time"
)
var (
loggerInstance *slog.Logger
once sync.Once
)
func Init(isDebug bool) {
level := slog.LevelInfo
if isDebug {
level = slog.LevelDebug
}
once.Do(func() {
loggerInstance = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
Level: level,
ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
if a.Key == slog.TimeKey {
a.Value = slog.StringValue(time.Now().Format("2006/01/02 15:04:05"))
}
if a.Key == slog.LevelKey {
return slog.Attr{}
}
return a
},
}))
})
}
// GetLogger returns a singleton slog.Logger that logs to the console
func GetLogger() *slog.Logger {
if loggerInstance == nil {
Init(false)
}
return loggerInstance
}

View File

@@ -11,6 +11,9 @@ VICTORIA_LOGS_PASSWORD=devpassword
# tests
TEST_LOCALHOST=localhost
IS_SKIP_EXTERNAL_RESOURCES_TESTS=false
# cloudflare turnstile
CLOUDFLARE_TURNSTILE_SITE_KEY=
CLOUDFLARE_TURNSTILE_SECRET_KEY=
# db
DATABASE_DSN=host=dev-db user=postgres password=Q1234567 dbname=databasus port=5437 sslmode=disable
DATABASE_URL=postgres://postgres:Q1234567@dev-db:5437/databasus?sslmode=disable

View File

@@ -7,6 +7,16 @@ run:
linters:
default: standard
enable:
- funcorder
- bodyclose
- errorlint
- gocritic
- unconvert
- misspell
- errname
- noctx
- modernize
settings:
errcheck:
@@ -14,6 +24,18 @@ linters:
formatters:
enable:
- gofmt
- gofumpt
- golines
- goimports
- gci
settings:
golines:
max-len: 120
gofumpt:
module-path: databasus-backend
extra-rules: true
gci:
sections:
- standard
- default
- localmodule

View File

@@ -12,11 +12,18 @@ import (
"syscall"
"time"
"github.com/gin-contrib/cors"
"github.com/gin-contrib/gzip"
"github.com/gin-gonic/gin"
swaggerFiles "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
"databasus-backend/internal/config"
"databasus-backend/internal/features/audit_logs"
"databasus-backend/internal/features/backups/backups"
"databasus-backend/internal/features/backups/backups/backuping"
backups_controllers "databasus-backend/internal/features/backups/backups/controllers"
backups_download "databasus-backend/internal/features/backups/backups/download"
backups_services "databasus-backend/internal/features/backups/backups/services"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/disk"
@@ -27,7 +34,9 @@ import (
"databasus-backend/internal/features/restores"
"databasus-backend/internal/features/restores/restoring"
"databasus-backend/internal/features/storages"
system_agent "databasus-backend/internal/features/system/agent"
system_healthcheck "databasus-backend/internal/features/system/healthcheck"
system_version "databasus-backend/internal/features/system/version"
task_cancellation "databasus-backend/internal/features/tasks/cancellation"
users_controllers "databasus-backend/internal/features/users/controllers"
users_middleware "databasus-backend/internal/features/users/middleware"
@@ -38,12 +47,6 @@ import (
files_utils "databasus-backend/internal/util/files"
"databasus-backend/internal/util/logger"
_ "databasus-backend/swagger" // swagger docs
"github.com/gin-contrib/cors"
"github.com/gin-contrib/gzip"
"github.com/gin-gonic/gin"
swaggerFiles "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
)
// @title Databasus Backend API
@@ -80,7 +83,6 @@ func main() {
config.GetEnv().TempFolder,
config.GetEnv().DataFolder,
})
if err != nil {
log.Error("Failed to ensure directories", "error", err)
os.Exit(1)
@@ -147,7 +149,7 @@ func handlePasswordReset(log *slog.Logger) {
resetPassword(*email, *newPassword, log)
}
func resetPassword(email string, newPassword string, log *slog.Logger) {
func resetPassword(email, newPassword string, log *slog.Logger) {
log.Info("Resetting password...")
userService := users_services.GetUserService()
@@ -209,7 +211,11 @@ func setUpRoutes(r *gin.Engine) {
userController := users_controllers.GetUserController()
userController.RegisterRoutes(v1)
system_healthcheck.GetHealthcheckController().RegisterRoutes(v1)
backups.GetBackupController().RegisterPublicRoutes(v1)
system_version.GetVersionController().RegisterRoutes(v1)
system_agent.GetAgentController().RegisterRoutes(v1)
backups_controllers.GetBackupController().RegisterPublicRoutes(v1)
backups_controllers.GetPostgresWalBackupController().RegisterRoutes(v1)
databases.GetDatabaseController().RegisterPublicRoutes(v1)
// Setup auth middleware
userService := users_services.GetUserService()
@@ -226,7 +232,7 @@ func setUpRoutes(r *gin.Engine) {
notifiers.GetNotifierController().RegisterRoutes(protected)
storages.GetStorageController().RegisterRoutes(protected)
databases.GetDatabaseController().RegisterRoutes(protected)
backups.GetBackupController().RegisterRoutes(protected)
backups_controllers.GetBackupController().RegisterRoutes(protected)
restores.GetRestoreController().RegisterRoutes(protected)
healthcheck_config.GetHealthcheckConfigController().RegisterRoutes(protected)
healthcheck_attempt.GetHealthcheckAttemptController().RegisterRoutes(protected)
@@ -238,7 +244,7 @@ func setUpRoutes(r *gin.Engine) {
func setUpDependencies() {
databases.SetupDependencies()
backups.SetupDependencies()
backups_services.SetupDependencies()
restores.SetupDependencies()
healthcheck_config.SetupDependencies()
audit_logs.SetupDependencies()
@@ -347,7 +353,9 @@ func generateSwaggerDocs(log *slog.Logger) {
return
}
cmd := exec.Command("swag", "init", "-d", currentDir, "-g", "cmd/main.go", "-o", "swagger")
cmd := exec.CommandContext(
context.Background(), "swag", "init", "-d", currentDir, "-g", "cmd/main.go", "-o", "swagger",
)
output, err := cmd.CombinedOutput()
if err != nil {
@@ -361,7 +369,7 @@ func generateSwaggerDocs(log *slog.Logger) {
func runMigrations(log *slog.Logger) {
log.Info("Running database migrations...")
cmd := exec.Command("goose", "-dir", "./migrations", "up")
cmd := exec.CommandContext(context.Background(), "goose", "-dir", "./migrations", "up")
cmd.Env = append(
os.Environ(),
"GOOSE_DRIVER=postgres",

View File

@@ -1,6 +1,6 @@
module databasus-backend
go 1.24.9
go 1.26.1
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0

View File

@@ -1,9 +1,6 @@
package config
import (
env_utils "databasus-backend/internal/util/env"
"databasus-backend/internal/util/logger"
"databasus-backend/internal/util/tools"
"os"
"path/filepath"
"strings"
@@ -11,6 +8,10 @@ import (
"github.com/ilyakaznacheev/cleanenv"
"github.com/joho/godotenv"
env_utils "databasus-backend/internal/util/env"
"databasus-backend/internal/util/logger"
"databasus-backend/internal/util/tools"
)
var log = logger.GetLogger()
@@ -29,7 +30,7 @@ type EnvVariables struct {
MongodbInstallDir string `env:"MONGODB_INSTALL_DIR"`
// Internal database
DatabaseDsn string `env:"DATABASE_DSN" required:"true"`
DatabaseDsn string `env:"DATABASE_DSN" required:"true"`
// Internal Valkey
ValkeyHost string `env:"VALKEY_HOST" required:"true"`
ValkeyPort string `env:"VALKEY_PORT" required:"true"`
@@ -104,6 +105,10 @@ type EnvVariables struct {
GoogleClientID string `env:"GOOGLE_CLIENT_ID"`
GoogleClientSecret string `env:"GOOGLE_CLIENT_SECRET"`
// Cloudflare Turnstile
CloudflareTurnstileSecretKey string `env:"CLOUDFLARE_TURNSTILE_SECRET_KEY"`
CloudflareTurnstileSiteKey string `env:"CLOUDFLARE_TURNSTILE_SITE_KEY"`
// testing Telegram
TestTelegramBotToken string `env:"TEST_TELEGRAM_BOT_TOKEN"`
TestTelegramChatID string `env:"TEST_TELEGRAM_CHAT_ID"`
@@ -120,6 +125,7 @@ type EnvVariables struct {
SMTPPort int `env:"SMTP_PORT"`
SMTPUser string `env:"SMTP_USER"`
SMTPPassword string `env:"SMTP_PASSWORD"`
SMTPFrom string `env:"SMTP_FROM"`
// Application URL (optional) - used for email links
DatabasusURL string `env:"DATABASUS_URL"`

View File

@@ -1,17 +1,17 @@
package audit_logs
import (
"databasus-backend/internal/storage"
"fmt"
"testing"
"time"
user_enums "databasus-backend/internal/features/users/enums"
users_testing "databasus-backend/internal/features/users/testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
user_enums "databasus-backend/internal/features/users/enums"
users_testing "databasus-backend/internal/features/users/testing"
"databasus-backend/internal/storage"
)
func Test_CleanOldAuditLogs_DeletesLogsOlderThanOneYear(t *testing.T) {

View File

@@ -4,10 +4,10 @@ import (
"errors"
"net/http"
user_models "databasus-backend/internal/features/users/models"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
user_models "databasus-backend/internal/features/users/models"
)
type AuditLogController struct {

View File

@@ -6,15 +6,15 @@ import (
"testing"
"time"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
user_enums "databasus-backend/internal/features/users/enums"
users_middleware "databasus-backend/internal/features/users/middleware"
users_services "databasus-backend/internal/features/users/services"
users_testing "databasus-backend/internal/features/users/testing"
test_utils "databasus-backend/internal/util/testing"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
func Test_GetGlobalAuditLogs_WithDifferentUserRoles_EnforcesPermissionsCorrectly(t *testing.T) {

View File

@@ -8,14 +8,18 @@ import (
"databasus-backend/internal/util/logger"
)
var auditLogRepository = &AuditLogRepository{}
var auditLogService = &AuditLogService{
auditLogRepository,
logger.GetLogger(),
}
var (
auditLogRepository = &AuditLogRepository{}
auditLogService = &AuditLogService{
auditLogRepository,
logger.GetLogger(),
}
)
var auditLogController = &AuditLogController{
auditLogService,
}
var auditLogBackgroundService = &AuditLogBackgroundService{
auditLogService: auditLogService,
logger: logger.GetLogger(),

View File

@@ -1,10 +1,11 @@
package audit_logs
import (
"databasus-backend/internal/storage"
"time"
"github.com/google/uuid"
"databasus-backend/internal/storage"
)
type AuditLogRepository struct{}
@@ -21,7 +22,7 @@ func (r *AuditLogRepository) GetGlobal(
limit, offset int,
beforeDate *time.Time,
) ([]*AuditLogDTO, error) {
var auditLogs = make([]*AuditLogDTO, 0)
auditLogs := make([]*AuditLogDTO, 0)
sql := `
SELECT
@@ -37,7 +38,7 @@ func (r *AuditLogRepository) GetGlobal(
LEFT JOIN users u ON al.user_id = u.id
LEFT JOIN workspaces w ON al.workspace_id = w.id`
args := []interface{}{}
args := []any{}
if beforeDate != nil {
sql += " WHERE al.created_at < ?"
@@ -57,7 +58,7 @@ func (r *AuditLogRepository) GetByUser(
limit, offset int,
beforeDate *time.Time,
) ([]*AuditLogDTO, error) {
var auditLogs = make([]*AuditLogDTO, 0)
auditLogs := make([]*AuditLogDTO, 0)
sql := `
SELECT
@@ -74,7 +75,7 @@ func (r *AuditLogRepository) GetByUser(
LEFT JOIN workspaces w ON al.workspace_id = w.id
WHERE al.user_id = ?`
args := []interface{}{userID}
args := []any{userID}
if beforeDate != nil {
sql += " AND al.created_at < ?"
@@ -94,7 +95,7 @@ func (r *AuditLogRepository) GetByWorkspace(
limit, offset int,
beforeDate *time.Time,
) ([]*AuditLogDTO, error) {
var auditLogs = make([]*AuditLogDTO, 0)
auditLogs := make([]*AuditLogDTO, 0)
sql := `
SELECT
@@ -111,7 +112,7 @@ func (r *AuditLogRepository) GetByWorkspace(
LEFT JOIN workspaces w ON al.workspace_id = w.id
WHERE al.workspace_id = ?`
args := []interface{}{workspaceID}
args := []any{workspaceID}
if beforeDate != nil {
sql += " AND al.created_at < ?"

View File

@@ -4,10 +4,10 @@ import (
"log/slog"
"time"
"github.com/google/uuid"
user_enums "databasus-backend/internal/features/users/enums"
user_models "databasus-backend/internal/features/users/models"
"github.com/google/uuid"
)
type AuditLogService struct {

View File

@@ -4,11 +4,11 @@ import (
"testing"
"time"
user_enums "databasus-backend/internal/features/users/enums"
users_testing "databasus-backend/internal/features/users/testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
user_enums "databasus-backend/internal/features/users/enums"
users_testing "databasus-backend/internal/features/users/testing"
)
func Test_AuditLogs_WorkspaceSpecificLogs(t *testing.T) {

View File

@@ -1,7 +1,9 @@
package backuping
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"log/slog"
@@ -196,7 +198,7 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) {
backupMetadata, err := n.createBackupUseCase.Execute(
ctx,
backup.ID,
backup,
backupConfig,
database,
storage,
@@ -263,7 +265,7 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) {
// Delete partial backup from storage
storage, storageErr := n.storageService.GetStorageByID(backup.StorageID)
if storageErr == nil {
if deleteErr := storage.DeleteFile(n.fieldEncryptor, backup.ID); deleteErr != nil {
if deleteErr := storage.DeleteFile(n.fieldEncryptor, backup.FileName); deleteErr != nil {
n.logger.Error(
"Failed to delete partial backup file",
"backupId",
@@ -311,6 +313,13 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) {
// Update backup with encryption metadata if provided
if backupMetadata != nil {
backupMetadata.BackupID = backup.ID
if err := backupMetadata.Validate(); err != nil {
n.logger.Error("Failed to validate backup metadata", "error", err)
return
}
backup.EncryptionSalt = backupMetadata.EncryptionSalt
backup.EncryptionIV = backupMetadata.EncryptionIV
backup.Encryption = backupMetadata.Encryption
@@ -321,6 +330,39 @@ func (n *BackuperNode) MakeBackup(backupID uuid.UUID, isCallNotifier bool) {
return
}
// Save metadata file to storage
if backupMetadata != nil {
metadataJSON, err := json.Marshal(backupMetadata)
if err != nil {
n.logger.Error("Failed to marshal backup metadata to JSON",
"backupId", backup.ID,
"error", err,
)
} else {
metadataReader := bytes.NewReader(metadataJSON)
metadataFileName := backup.FileName + ".metadata"
if err := storage.SaveFile(
context.Background(),
n.fieldEncryptor,
n.logger,
metadataFileName,
metadataReader,
); err != nil {
n.logger.Error("Failed to save backup metadata file to storage",
"backupId", backup.ID,
"fileName", metadataFileName,
"error", err,
)
} else {
n.logger.Info("Backup metadata file saved successfully",
"backupId", backup.ID,
"fileName", metadataFileName,
)
}
}
}
// Update database last backup time
now := time.Now().UTC()
if updateErr := n.databaseService.SetLastBackupTime(databaseID, now); updateErr != nil {

View File

@@ -5,6 +5,9 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
@@ -14,9 +17,6 @@ import (
users_testing "databasus-backend/internal/features/users/testing"
workspaces_testing "databasus-backend/internal/features/workspaces/testing"
cache_utils "databasus-backend/internal/util/cache"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
func Test_BackupExecuted_NotificationSent(t *testing.T) {

View File

@@ -18,7 +18,8 @@ import (
)
const (
cleanerTickerInterval = 1 * time.Minute
cleanerTickerInterval = 1 * time.Minute
recentBackupGracePeriod = 60 * time.Minute
)
type BackupCleaner struct {
@@ -51,8 +52,8 @@ func (c *BackupCleaner) Run(ctx context.Context) {
case <-ctx.Done():
return
case <-ticker.C:
if err := c.cleanOldBackups(); err != nil {
c.logger.Error("Failed to clean old backups", "error", err)
if err := c.cleanByRetentionPolicy(); err != nil {
c.logger.Error("Failed to clean backups by retention policy", "error", err)
}
if err := c.cleanExceededBackups(); err != nil {
@@ -79,8 +80,7 @@ func (c *BackupCleaner) DeleteBackup(backup *backups_core.Backup) error {
return err
}
err = storage.DeleteFile(c.fieldEncryptor, backup.ID)
if err != nil {
if err := storage.DeleteFile(c.fieldEncryptor, backup.FileName); err != nil {
// we do not return error here, because sometimes clean up performed
// before unavailable storage removal or change - therefore we should
// proceed even in case of error. It's possible that some S3 or
@@ -88,6 +88,11 @@ func (c *BackupCleaner) DeleteBackup(backup *backups_core.Backup) error {
c.logger.Error("Failed to delete backup file", "error", err)
}
metadataFileName := backup.FileName + ".metadata"
if err := storage.DeleteFile(c.fieldEncryptor, metadataFileName); err != nil {
c.logger.Error("Failed to delete backup metadata file", "error", err)
}
return c.backupRepository.DeleteByID(backup.ID)
}
@@ -95,49 +100,30 @@ func (c *BackupCleaner) AddBackupRemoveListener(listener backups_core.BackupRemo
c.backupRemoveListeners = append(c.backupRemoveListeners, listener)
}
func (c *BackupCleaner) cleanOldBackups() error {
func (c *BackupCleaner) cleanByRetentionPolicy() error {
enabledBackupConfigs, err := c.backupConfigService.GetBackupConfigsWithEnabledBackups()
if err != nil {
return err
}
for _, backupConfig := range enabledBackupConfigs {
backupStorePeriod := backupConfig.StorePeriod
var cleanErr error
if backupStorePeriod == period.PeriodForever {
continue
switch backupConfig.RetentionPolicyType {
case backups_config.RetentionPolicyTypeCount:
cleanErr = c.cleanByCount(backupConfig)
case backups_config.RetentionPolicyTypeGFS:
cleanErr = c.cleanByGFS(backupConfig)
default:
cleanErr = c.cleanByTimePeriod(backupConfig)
}
storeDuration := backupStorePeriod.ToDuration()
dateBeforeBackupsShouldBeDeleted := time.Now().UTC().Add(-storeDuration)
oldBackups, err := c.backupRepository.FindBackupsBeforeDate(
backupConfig.DatabaseID,
dateBeforeBackupsShouldBeDeleted,
)
if err != nil {
if cleanErr != nil {
c.logger.Error(
"Failed to find old backups for database",
"databaseId",
backupConfig.DatabaseID,
"error",
err,
)
continue
}
for _, backup := range oldBackups {
if err := c.DeleteBackup(backup); err != nil {
c.logger.Error("Failed to delete old backup", "backupId", backup.ID, "error", err)
continue
}
c.logger.Info(
"Deleted old backup",
"backupId",
backup.ID,
"databaseId",
backupConfig.DatabaseID,
"Failed to clean backups by retention policy",
"databaseId", backupConfig.DatabaseID,
"policy", backupConfig.RetentionPolicyType,
"error", cleanErr,
)
}
}
@@ -174,6 +160,158 @@ func (c *BackupCleaner) cleanExceededBackups() error {
return nil
}
func (c *BackupCleaner) cleanByTimePeriod(backupConfig *backups_config.BackupConfig) error {
if backupConfig.RetentionTimePeriod == "" {
return nil
}
if backupConfig.RetentionTimePeriod == period.PeriodForever {
return nil
}
storeDuration := backupConfig.RetentionTimePeriod.ToDuration()
dateBeforeBackupsShouldBeDeleted := time.Now().UTC().Add(-storeDuration)
oldBackups, err := c.backupRepository.FindBackupsBeforeDate(
backupConfig.DatabaseID,
dateBeforeBackupsShouldBeDeleted,
)
if err != nil {
return fmt.Errorf(
"failed to find old backups for database %s: %w",
backupConfig.DatabaseID,
err,
)
}
for _, backup := range oldBackups {
if isRecentBackup(backup) {
continue
}
if err := c.DeleteBackup(backup); err != nil {
c.logger.Error("Failed to delete old backup", "backupId", backup.ID, "error", err)
continue
}
c.logger.Info(
"Deleted old backup",
"backupId", backup.ID,
"databaseId", backupConfig.DatabaseID,
)
}
return nil
}
func (c *BackupCleaner) cleanByCount(backupConfig *backups_config.BackupConfig) error {
if backupConfig.RetentionCount <= 0 {
return nil
}
completedBackups, err := c.backupRepository.FindByDatabaseIdAndStatus(
backupConfig.DatabaseID,
backups_core.BackupStatusCompleted,
)
if err != nil {
return fmt.Errorf(
"failed to find completed backups for database %s: %w",
backupConfig.DatabaseID,
err,
)
}
// completedBackups are ordered newest first; delete everything beyond position RetentionCount
if len(completedBackups) <= backupConfig.RetentionCount {
return nil
}
toDelete := completedBackups[backupConfig.RetentionCount:]
for _, backup := range toDelete {
if isRecentBackup(backup) {
continue
}
if err := c.DeleteBackup(backup); err != nil {
c.logger.Error(
"Failed to delete backup by count policy",
"backupId",
backup.ID,
"error",
err,
)
continue
}
c.logger.Info(
"Deleted backup by count policy",
"backupId", backup.ID,
"databaseId", backupConfig.DatabaseID,
"retentionCount", backupConfig.RetentionCount,
)
}
return nil
}
func (c *BackupCleaner) cleanByGFS(backupConfig *backups_config.BackupConfig) error {
if backupConfig.RetentionGfsHours <= 0 && backupConfig.RetentionGfsDays <= 0 &&
backupConfig.RetentionGfsWeeks <= 0 && backupConfig.RetentionGfsMonths <= 0 &&
backupConfig.RetentionGfsYears <= 0 {
return nil
}
completedBackups, err := c.backupRepository.FindByDatabaseIdAndStatus(
backupConfig.DatabaseID,
backups_core.BackupStatusCompleted,
)
if err != nil {
return fmt.Errorf(
"failed to find completed backups for database %s: %w",
backupConfig.DatabaseID,
err,
)
}
keepSet := buildGFSKeepSet(
completedBackups,
backupConfig.RetentionGfsHours,
backupConfig.RetentionGfsDays,
backupConfig.RetentionGfsWeeks,
backupConfig.RetentionGfsMonths,
backupConfig.RetentionGfsYears,
)
for _, backup := range completedBackups {
if keepSet[backup.ID] {
continue
}
if isRecentBackup(backup) {
continue
}
if err := c.DeleteBackup(backup); err != nil {
c.logger.Error(
"Failed to delete backup by GFS policy",
"backupId",
backup.ID,
"error",
err,
)
continue
}
c.logger.Info(
"Deleted backup by GFS policy",
"backupId", backup.ID,
"databaseId", backupConfig.DatabaseID,
)
}
return nil
}
func (c *BackupCleaner) cleanExceededBackupsForDatabase(
databaseID uuid.UUID,
limitperDbMB int64,
@@ -210,6 +348,21 @@ func (c *BackupCleaner) cleanExceededBackupsForDatabase(
}
backup := oldestBackups[0]
if isRecentBackup(backup) {
c.logger.Warn(
"Oldest backup is too recent to delete, stopping size cleanup",
"databaseId",
databaseID,
"backupId",
backup.ID,
"totalSizeMB",
backupsTotalSizeMB,
"limitMB",
limitperDbMB,
)
break
}
if err := c.DeleteBackup(backup); err != nil {
c.logger.Error(
"Failed to delete exceeded backup",
@@ -240,3 +393,128 @@ func (c *BackupCleaner) cleanExceededBackupsForDatabase(
return nil
}
func isRecentBackup(backup *backups_core.Backup) bool {
return time.Since(backup.CreatedAt) < recentBackupGracePeriod
}
// buildGFSKeepSet determines which backups to retain under the GFS rotation scheme.
// Backups must be sorted newest-first. A backup can fill multiple slots simultaneously
// (e.g. the newest backup of a year also fills the monthly, weekly, daily, and hourly slot).
func buildGFSKeepSet(
backups []*backups_core.Backup,
hours, days, weeks, months, years int,
) map[uuid.UUID]bool {
keep := make(map[uuid.UUID]bool)
if len(backups) == 0 {
return keep
}
hoursSeen := make(map[string]bool)
daysSeen := make(map[string]bool)
weeksSeen := make(map[string]bool)
monthsSeen := make(map[string]bool)
yearsSeen := make(map[string]bool)
hoursKept, daysKept, weeksKept, monthsKept, yearsKept := 0, 0, 0, 0, 0
// Compute per-level time-window cutoffs so higher-frequency slots
// cannot absorb backups that belong to lower-frequency levels.
ref := backups[0].CreatedAt
rawHourlyCutoff := ref.Add(-time.Duration(hours) * time.Hour)
rawDailyCutoff := ref.Add(-time.Duration(days) * 24 * time.Hour)
rawWeeklyCutoff := ref.Add(-time.Duration(weeks) * 7 * 24 * time.Hour)
rawMonthlyCutoff := ref.AddDate(0, -months, 0)
rawYearlyCutoff := ref.AddDate(-years, 0, 0)
// Hierarchical capping: each level's window cannot extend further back
// than the nearest active lower-frequency level's window.
yearlyCutoff := rawYearlyCutoff
monthlyCutoff := rawMonthlyCutoff
if years > 0 {
monthlyCutoff = laterOf(monthlyCutoff, yearlyCutoff)
}
weeklyCutoff := rawWeeklyCutoff
if months > 0 {
weeklyCutoff = laterOf(weeklyCutoff, monthlyCutoff)
} else if years > 0 {
weeklyCutoff = laterOf(weeklyCutoff, yearlyCutoff)
}
dailyCutoff := rawDailyCutoff
switch {
case weeks > 0:
dailyCutoff = laterOf(dailyCutoff, weeklyCutoff)
case months > 0:
dailyCutoff = laterOf(dailyCutoff, monthlyCutoff)
case years > 0:
dailyCutoff = laterOf(dailyCutoff, yearlyCutoff)
}
hourlyCutoff := rawHourlyCutoff
switch {
case days > 0:
hourlyCutoff = laterOf(hourlyCutoff, dailyCutoff)
case weeks > 0:
hourlyCutoff = laterOf(hourlyCutoff, weeklyCutoff)
case months > 0:
hourlyCutoff = laterOf(hourlyCutoff, monthlyCutoff)
case years > 0:
hourlyCutoff = laterOf(hourlyCutoff, yearlyCutoff)
}
for _, backup := range backups {
t := backup.CreatedAt
hourKey := t.Format("2006-01-02-15")
dayKey := t.Format("2006-01-02")
weekYear, week := t.ISOWeek()
weekKey := fmt.Sprintf("%d-%02d", weekYear, week)
monthKey := t.Format("2006-01")
yearKey := t.Format("2006")
if hours > 0 && hoursKept < hours && !hoursSeen[hourKey] && t.After(hourlyCutoff) {
keep[backup.ID] = true
hoursSeen[hourKey] = true
hoursKept++
}
if days > 0 && daysKept < days && !daysSeen[dayKey] && t.After(dailyCutoff) {
keep[backup.ID] = true
daysSeen[dayKey] = true
daysKept++
}
if weeks > 0 && weeksKept < weeks && !weeksSeen[weekKey] && t.After(weeklyCutoff) {
keep[backup.ID] = true
weeksSeen[weekKey] = true
weeksKept++
}
if months > 0 && monthsKept < months && !monthsSeen[monthKey] && t.After(monthlyCutoff) {
keep[backup.ID] = true
monthsSeen[monthKey] = true
monthsKept++
}
if years > 0 && yearsKept < years && !yearsSeen[yearKey] && t.After(yearlyCutoff) {
keep[backup.ID] = true
yearsSeen[yearKey] = true
yearsKept++
}
}
return keep
}
func laterOf(a, b time.Time) time.Time {
if a.After(b) {
return a
}
return b
}

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,9 @@ import (
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
@@ -15,12 +18,9 @@ import (
workspaces_testing "databasus-backend/internal/features/workspaces/testing"
"databasus-backend/internal/storage"
"databasus-backend/internal/util/period"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
func Test_CleanOldBackups_DeletesBackupsOlderThanStorePeriod(t *testing.T) {
func Test_CleanOldBackups_DeletesBackupsOlderThanRetentionTimePeriod(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
@@ -41,21 +41,20 @@ func Test_CleanOldBackups_DeletesBackupsOlderThanStorePeriod(t *testing.T) {
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
// Create backup interval
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
// Create backups with different ages
now := time.Now().UTC()
oldBackup1 := &backups_core.Backup{
ID: uuid.New(),
@@ -63,7 +62,7 @@ func Test_CleanOldBackups_DeletesBackupsOlderThanStorePeriod(t *testing.T) {
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-10 * 24 * time.Hour), // 10 days old
CreatedAt: now.Add(-10 * 24 * time.Hour),
}
oldBackup2 := &backups_core.Backup{
ID: uuid.New(),
@@ -71,7 +70,7 @@ func Test_CleanOldBackups_DeletesBackupsOlderThanStorePeriod(t *testing.T) {
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-8 * 24 * time.Hour), // 8 days old
CreatedAt: now.Add(-8 * 24 * time.Hour),
}
recentBackup := &backups_core.Backup{
ID: uuid.New(),
@@ -79,7 +78,7 @@ func Test_CleanOldBackups_DeletesBackupsOlderThanStorePeriod(t *testing.T) {
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-3 * 24 * time.Hour), // 3 days old
CreatedAt: now.Add(-3 * 24 * time.Hour),
}
err = backupRepository.Save(oldBackup1)
@@ -89,19 +88,17 @@ func Test_CleanOldBackups_DeletesBackupsOlderThanStorePeriod(t *testing.T) {
err = backupRepository.Save(recentBackup)
assert.NoError(t, err)
// Run cleanup
cleaner := GetBackupCleaner()
err = cleaner.cleanOldBackups()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
// Verify old backups deleted, recent backup remains
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 1, len(remainingBackups))
assert.Equal(t, recentBackup.ID, remainingBackups[0].ID)
}
func Test_CleanOldBackups_SkipsDatabaseWithForeverStorePeriod(t *testing.T) {
func Test_CleanOldBackups_SkipsDatabaseWithForeverRetentionPeriod(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
@@ -122,38 +119,35 @@ func Test_CleanOldBackups_SkipsDatabaseWithForeverStorePeriod(t *testing.T) {
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
// Create backup interval
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodForever,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodForever,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
// Create very old backup
oldBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: time.Now().UTC().Add(-365 * 24 * time.Hour), // 1 year old
CreatedAt: time.Now().UTC().Add(-365 * 24 * time.Hour),
}
err = backupRepository.Save(oldBackup)
assert.NoError(t, err)
// Run cleanup
cleaner := GetBackupCleaner()
err = cleaner.cleanOldBackups()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
// Verify backup still exists
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 1, len(remainingBackups))
@@ -181,22 +175,21 @@ func Test_CleanExceededBackups_WhenUnderLimit_NoBackupsDeleted(t *testing.T) {
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
// Create backup interval
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodForever,
RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodForever,
StorageID: &storage.ID,
MaxBackupsTotalSizeMB: 100, // 100 MB limit
MaxBackupsTotalSizeMB: 100,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
// Create 3 backups totaling 50MB (under limit)
for i := 0; i < 3; i++ {
backup := &backups_core.Backup{
ID: uuid.New(),
@@ -210,12 +203,10 @@ func Test_CleanExceededBackups_WhenUnderLimit_NoBackupsDeleted(t *testing.T) {
assert.NoError(t, err)
}
// Run cleanup
cleaner := GetBackupCleaner()
err = cleaner.cleanExceededBackups()
assert.NoError(t, err)
// Verify all backups remain
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 3, len(remainingBackups))
@@ -242,22 +233,21 @@ func Test_CleanExceededBackups_WhenOverLimit_DeletesOldestBackups(t *testing.T)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
// Create backup interval
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodForever,
RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodForever,
StorageID: &storage.ID,
MaxBackupsTotalSizeMB: 30, // 30 MB limit
MaxBackupsTotalSizeMB: 30,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
// Create 5 backups of 10MB each (total 50MB, over 30MB limit)
now := time.Now().UTC()
var backupIDs []uuid.UUID
for i := 0; i < 5; i++ {
@@ -267,33 +257,30 @@ func Test_CleanExceededBackups_WhenOverLimit_DeletesOldestBackups(t *testing.T)
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-time.Duration(4-i) * time.Hour), // Oldest first
CreatedAt: now.Add(-time.Duration(4-i) * time.Hour),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
backupIDs = append(backupIDs, backup.ID)
}
// Run cleanup
cleaner := GetBackupCleaner()
err = cleaner.cleanExceededBackups()
assert.NoError(t, err)
// Verify 2 oldest backups deleted, 3 newest remain
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 3, len(remainingBackups))
// Check that the newest 3 backups remain
remainingIDs := make(map[uuid.UUID]bool)
for _, backup := range remainingBackups {
remainingIDs[backup.ID] = true
}
assert.False(t, remainingIDs[backupIDs[0]]) // Oldest deleted
assert.False(t, remainingIDs[backupIDs[1]]) // 2nd oldest deleted
assert.True(t, remainingIDs[backupIDs[2]]) // 3rd remains
assert.True(t, remainingIDs[backupIDs[3]]) // 4th remains
assert.True(t, remainingIDs[backupIDs[4]]) // Newest remains
assert.False(t, remainingIDs[backupIDs[0]])
assert.False(t, remainingIDs[backupIDs[1]])
assert.True(t, remainingIDs[backupIDs[2]])
assert.True(t, remainingIDs[backupIDs[3]])
assert.True(t, remainingIDs[backupIDs[4]])
}
func Test_CleanExceededBackups_SkipsInProgressBackups(t *testing.T) {
@@ -317,15 +304,15 @@ func Test_CleanExceededBackups_SkipsInProgressBackups(t *testing.T) {
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
// Create backup interval
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodForever,
RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodForever,
StorageID: &storage.ID,
MaxBackupsTotalSizeMB: 50, // 50 MB limit
MaxBackupsTotalSizeMB: 50,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
@@ -334,7 +321,6 @@ func Test_CleanExceededBackups_SkipsInProgressBackups(t *testing.T) {
now := time.Now().UTC()
// Create 3 completed backups of 30MB each
completedBackups := make([]*backups_core.Backup, 3)
for i := 0; i < 3; i++ {
backup := &backups_core.Backup{
@@ -350,7 +336,6 @@ func Test_CleanExceededBackups_SkipsInProgressBackups(t *testing.T) {
completedBackups[i] = backup
}
// Create 1 in-progress backup (should be excluded from size calculation and deletion)
inProgressBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
@@ -362,19 +347,14 @@ func Test_CleanExceededBackups_SkipsInProgressBackups(t *testing.T) {
err = backupRepository.Save(inProgressBackup)
assert.NoError(t, err)
// Run cleanup
cleaner := GetBackupCleaner()
err = cleaner.cleanExceededBackups()
assert.NoError(t, err)
// Verify: only completed backups deleted, in-progress remains
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
// Should have in-progress + 1 completed (total 40MB completed + 10MB in-progress)
assert.GreaterOrEqual(t, len(remainingBackups), 2)
// Verify in-progress backup still exists
var inProgressFound bool
for _, backup := range remainingBackups {
if backup.ID == inProgressBackup.ID {
@@ -406,22 +386,21 @@ func Test_CleanExceededBackups_WithZeroLimit_SkipsDatabase(t *testing.T) {
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
// Create backup interval
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodForever,
RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodForever,
StorageID: &storage.ID,
MaxBackupsTotalSizeMB: 0, // No size limit
MaxBackupsTotalSizeMB: 0,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
// Create large backups
for i := 0; i < 10; i++ {
backup := &backups_core.Backup{
ID: uuid.New(),
@@ -435,12 +414,10 @@ func Test_CleanExceededBackups_WithZeroLimit_SkipsDatabase(t *testing.T) {
assert.NoError(t, err)
}
// Run cleanup
cleaner := GetBackupCleaner()
err = cleaner.cleanExceededBackups()
assert.NoError(t, err)
// Verify all backups remain
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 10, len(remainingBackups))
@@ -467,7 +444,6 @@ func Test_GetTotalSizeByDatabase_CalculatesCorrectly(t *testing.T) {
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
// Create completed backups
completedBackup1 := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
@@ -484,7 +460,6 @@ func Test_GetTotalSizeByDatabase_CalculatesCorrectly(t *testing.T) {
BackupSizeMb: 20.3,
CreatedAt: time.Now().UTC(),
}
// Create failed backup (should be included)
failedBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
@@ -493,7 +468,6 @@ func Test_GetTotalSizeByDatabase_CalculatesCorrectly(t *testing.T) {
BackupSizeMb: 5.2,
CreatedAt: time.Now().UTC(),
}
// Create in-progress backup (should be excluded)
inProgressBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
@@ -512,25 +486,215 @@ func Test_GetTotalSizeByDatabase_CalculatesCorrectly(t *testing.T) {
err = backupRepository.Save(inProgressBackup)
assert.NoError(t, err)
// Calculate total size
totalSize, err := backupRepository.GetTotalSizeByDatabase(database.ID)
assert.NoError(t, err)
// Should be 10.5 + 20.3 + 5.2 = 36.0 (excluding in-progress 100)
assert.InDelta(t, 36.0, totalSize, 0.1)
}
// Mock listener for testing
type mockBackupRemoveListener struct {
onBeforeBackupRemove func(*backups_core.Backup) error
}
func Test_CleanByCount_KeepsNewestNBackups_DeletesOlder(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
func (m *mockBackupRemoveListener) OnBeforeBackupRemove(backup *backups_core.Backup) error {
if m.onBeforeBackupRemove != nil {
return m.onBeforeBackupRemove(backup)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeCount,
RetentionCount: 3,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
now := time.Now().UTC()
var backupIDs []uuid.UUID
for i := 0; i < 5; i++ {
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(
-time.Duration(4-i) * time.Hour,
), // oldest first in loop, newest = i=4
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
backupIDs = append(backupIDs, backup.ID)
}
return nil
cleaner := GetBackupCleaner()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 3, len(remainingBackups))
remainingIDs := make(map[uuid.UUID]bool)
for _, backup := range remainingBackups {
remainingIDs[backup.ID] = true
}
assert.False(t, remainingIDs[backupIDs[0]], "Oldest backup should be deleted")
assert.False(t, remainingIDs[backupIDs[1]], "2nd oldest backup should be deleted")
assert.True(t, remainingIDs[backupIDs[2]], "3rd backup should remain")
assert.True(t, remainingIDs[backupIDs[3]], "4th backup should remain")
assert.True(t, remainingIDs[backupIDs[4]], "Newest backup should remain")
}
func Test_CleanByCount_WhenUnderLimit_NoBackupsDeleted(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeCount,
RetentionCount: 10,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
for i := 0; i < 5; i++ {
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: time.Now().UTC().Add(-time.Duration(i) * time.Hour),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
}
cleaner := GetBackupCleaner()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 5, len(remainingBackups))
}
func Test_CleanByCount_DoesNotDeleteInProgressBackups(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
interval := createTestInterval()
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeCount,
RetentionCount: 2,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
now := time.Now().UTC()
for i := 0; i < 3; i++ {
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-time.Duration(3-i) * time.Hour),
}
err = backupRepository.Save(backup)
assert.NoError(t, err)
}
inProgressBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusInProgress,
BackupSizeMb: 5,
CreatedAt: now,
}
err = backupRepository.Save(inProgressBackup)
assert.NoError(t, err)
cleaner := GetBackupCleaner()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
var inProgressFound bool
for _, backup := range remainingBackups {
if backup.ID == inProgressBackup.ID {
inProgressFound = true
}
}
assert.True(t, inProgressFound, "In-progress backup should not be deleted by count policy")
}
// Test_DeleteBackup_WhenStorageDeleteFails_BackupStillRemovedFromDatabase verifies resilience
@@ -579,6 +743,267 @@ func Test_DeleteBackup_WhenStorageDeleteFails_BackupStillRemovedFromDatabase(t *
assert.Nil(t, deletedBackup)
}
func Test_CleanByTimePeriod_SkipsRecentBackup_EvenIfOlderThanRetention(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
interval := createTestInterval()
// Retention period is 1 day — any backup older than 1 day should be deleted.
// But the recent backup was created only 30 minutes ago and must be preserved.
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodDay,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
now := time.Now().UTC()
oldBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-2 * 24 * time.Hour),
}
recentBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-30 * time.Minute),
}
err = backupRepository.Save(oldBackup)
assert.NoError(t, err)
err = backupRepository.Save(recentBackup)
assert.NoError(t, err)
cleaner := GetBackupCleaner()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(t, 1, len(remainingBackups))
assert.Equal(t, recentBackup.ID, remainingBackups[0].ID)
}
func Test_CleanByCount_SkipsRecentBackup_EvenIfOverLimit(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
interval := createTestInterval()
// Retention count is 2 — 4 backups exist so 2 should be deleted.
// The oldest backup in the "excess" tail was made 30 min ago — it must be preserved.
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeCount,
RetentionCount: 2,
StorageID: &storage.ID,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
now := time.Now().UTC()
oldBackup1 := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-5 * time.Hour),
}
oldBackup2 := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-3 * time.Hour),
}
// This backup is 3rd newest and would normally be deleted — but it is recent.
recentExcessBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-30 * time.Minute),
}
newestBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 10,
CreatedAt: now.Add(-10 * time.Minute),
}
for _, b := range []*backups_core.Backup{oldBackup1, oldBackup2, recentExcessBackup, newestBackup} {
err = backupRepository.Save(b)
assert.NoError(t, err)
}
cleaner := GetBackupCleaner()
err = cleaner.cleanByRetentionPolicy()
assert.NoError(t, err)
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
remainingIDs := make(map[uuid.UUID]bool)
for _, backup := range remainingBackups {
remainingIDs[backup.ID] = true
}
assert.False(t, remainingIDs[oldBackup1.ID], "Oldest non-recent backup should be deleted")
assert.False(t, remainingIDs[oldBackup2.ID], "2nd oldest non-recent backup should be deleted")
assert.True(
t,
remainingIDs[recentExcessBackup.ID],
"Recent backup must be preserved despite being over limit",
)
assert.True(t, remainingIDs[newestBackup.ID], "Newest backup should be preserved")
}
func Test_CleanExceededBackups_SkipsRecentBackup_WhenOverTotalSizeLimit(t *testing.T) {
router := CreateTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
storage := storages.CreateTestStorage(workspace.ID)
notifier := notifiers.CreateTestNotifier(workspace.ID)
database := databases.CreateTestDatabase(workspace.ID, storage, notifier)
defer func() {
backups, _ := backupRepository.FindByDatabaseID(database.ID)
for _, backup := range backups {
backupRepository.DeleteByID(backup.ID)
}
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
notifiers.RemoveTestNotifier(notifier)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
interval := createTestInterval()
// Total size limit is 10 MB. We have two backups of 8 MB each (16 MB total).
// The oldest backup was created 30 minutes ago — within the grace period.
// The cleaner must stop and leave both backups intact.
backupConfig := &backups_config.BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: backups_config.RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodForever,
StorageID: &storage.ID,
MaxBackupsTotalSizeMB: 10,
BackupIntervalID: interval.ID,
BackupInterval: interval,
}
_, err := backups_config.GetBackupConfigService().SaveBackupConfig(backupConfig)
assert.NoError(t, err)
now := time.Now().UTC()
olderRecentBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 8,
CreatedAt: now.Add(-30 * time.Minute),
}
newerRecentBackup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: storage.ID,
Status: backups_core.BackupStatusCompleted,
BackupSizeMb: 8,
CreatedAt: now.Add(-10 * time.Minute),
}
err = backupRepository.Save(olderRecentBackup)
assert.NoError(t, err)
err = backupRepository.Save(newerRecentBackup)
assert.NoError(t, err)
cleaner := GetBackupCleaner()
err = cleaner.cleanExceededBackups()
assert.NoError(t, err)
remainingBackups, err := backupRepository.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Equal(
t,
2,
len(remainingBackups),
"Both recent backups must be preserved even though total size exceeds limit",
)
}
// Mock listener for testing
type mockBackupRemoveListener struct {
onBeforeBackupRemove func(*backups_core.Backup) error
}
func (m *mockBackupRemoveListener) OnBeforeBackupRemove(backup *backups_core.Backup) error {
if m.onBeforeBackupRemove != nil {
return m.onBeforeBackupRemove(backup)
}
return nil
}
func createTestInterval() *intervals.Interval {
timeOfDay := "04:00"
interval := &intervals.Interval{

View File

@@ -25,24 +25,24 @@ var backupRepository = &backups_core.BackupRepository{}
var taskCancelManager = tasks_cancellation.GetTaskCancelManager()
var backupCleaner = &BackupCleaner{
backupRepository: backupRepository,
storageService: storages.GetStorageService(),
backupConfigService: backups_config.GetBackupConfigService(),
fieldEncryptor: encryption.GetFieldEncryptor(),
logger: logger.GetLogger(),
backupRemoveListeners: []backups_core.BackupRemoveListener{},
runOnce: sync.Once{},
hasRun: atomic.Bool{},
backupRepository,
storages.GetStorageService(),
backups_config.GetBackupConfigService(),
encryption.GetFieldEncryptor(),
logger.GetLogger(),
[]backups_core.BackupRemoveListener{},
sync.Once{},
atomic.Bool{},
}
var backupNodesRegistry = &BackupNodesRegistry{
client: cache_utils.GetValkeyClient(),
logger: logger.GetLogger(),
timeout: cache_utils.DefaultCacheTimeout,
pubsubBackups: cache_utils.NewPubSubManager(),
pubsubCompletions: cache_utils.NewPubSubManager(),
runOnce: sync.Once{},
hasRun: atomic.Bool{},
cache_utils.GetValkeyClient(),
logger.GetLogger(),
cache_utils.DefaultCacheTimeout,
cache_utils.NewPubSubManager(),
cache_utils.NewPubSubManager(),
sync.Once{},
atomic.Bool{},
}
func getNodeID() uuid.UUID {
@@ -50,34 +50,35 @@ func getNodeID() uuid.UUID {
}
var backuperNode = &BackuperNode{
databaseService: databases.GetDatabaseService(),
fieldEncryptor: encryption.GetFieldEncryptor(),
workspaceService: workspaces_services.GetWorkspaceService(),
backupRepository: backupRepository,
backupConfigService: backups_config.GetBackupConfigService(),
storageService: storages.GetStorageService(),
notificationSender: notifiers.GetNotifierService(),
backupCancelManager: taskCancelManager,
backupNodesRegistry: backupNodesRegistry,
logger: logger.GetLogger(),
createBackupUseCase: usecases.GetCreateBackupUsecase(),
nodeID: getNodeID(),
lastHeartbeat: time.Time{},
runOnce: sync.Once{},
hasRun: atomic.Bool{},
databases.GetDatabaseService(),
encryption.GetFieldEncryptor(),
workspaces_services.GetWorkspaceService(),
backupRepository,
backups_config.GetBackupConfigService(),
storages.GetStorageService(),
notifiers.GetNotifierService(),
taskCancelManager,
backupNodesRegistry,
logger.GetLogger(),
usecases.GetCreateBackupUsecase(),
getNodeID(),
time.Time{},
sync.Once{},
atomic.Bool{},
}
var backupsScheduler = &BackupsScheduler{
backupRepository: backupRepository,
backupConfigService: backups_config.GetBackupConfigService(),
taskCancelManager: taskCancelManager,
backupNodesRegistry: backupNodesRegistry,
lastBackupTime: time.Now().UTC(),
logger: logger.GetLogger(),
backupToNodeRelations: make(map[uuid.UUID]BackupToNodeRelation),
backuperNode: backuperNode,
runOnce: sync.Once{},
hasRun: atomic.Bool{},
backupRepository,
backups_config.GetBackupConfigService(),
taskCancelManager,
backupNodesRegistry,
databases.GetDatabaseService(),
time.Now().UTC(),
logger.GetLogger(),
make(map[uuid.UUID]BackupToNodeRelation),
backuperNode,
sync.Once{},
atomic.Bool{},
}
func GetBackupsScheduler() *BackupsScheduler {

View File

@@ -6,14 +6,15 @@ import (
"sync/atomic"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/mock"
common "databasus-backend/internal/features/backups/backups/common"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/notifiers"
"databasus-backend/internal/features/storages"
"github.com/google/uuid"
"github.com/stretchr/testify/mock"
)
type MockNotificationSender struct {
@@ -32,7 +33,7 @@ type CreateFailedBackupUsecase struct{}
func (uc *CreateFailedBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
@@ -46,7 +47,7 @@ type CreateSuccessBackupUsecase struct{}
func (uc *CreateSuccessBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
@@ -65,7 +66,7 @@ type CreateLargeBackupUsecase struct{}
func (uc *CreateLargeBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
@@ -84,7 +85,7 @@ type CreateProgressiveBackupUsecase struct{}
func (uc *CreateProgressiveBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
@@ -124,7 +125,7 @@ type CreateMediumBackupUsecase struct{}
func (uc *CreateMediumBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
@@ -152,7 +153,7 @@ func NewMockTrackingBackupUsecase() *MockTrackingBackupUsecase {
func (m *MockTrackingBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
@@ -162,7 +163,7 @@ func (m *MockTrackingBackupUsecase) Execute(
// Send backup ID to channel (non-blocking)
select {
case m.calledBackupIDs <- backupID:
case m.calledBackupIDs <- backup.ID:
default:
}

View File

@@ -10,10 +10,10 @@ import (
"sync/atomic"
"time"
cache_utils "databasus-backend/internal/util/cache"
"github.com/google/uuid"
"github.com/valkey-io/valkey-go"
cache_utils "databasus-backend/internal/util/cache"
)
const (
@@ -415,7 +415,7 @@ func (r *BackupNodesRegistry) UnsubscribeNodeForBackupsAssignments() error {
return nil
}
func (r *BackupNodesRegistry) PublishBackupCompletion(nodeID uuid.UUID, backupID uuid.UUID) error {
func (r *BackupNodesRegistry) PublishBackupCompletion(nodeID, backupID uuid.UUID) error {
ctx := context.Background()
message := BackupCompletionMessage{
@@ -437,7 +437,7 @@ func (r *BackupNodesRegistry) PublishBackupCompletion(nodeID uuid.UUID, backupID
}
func (r *BackupNodesRegistry) SubscribeForBackupsCompletions(
handler func(nodeID uuid.UUID, backupID uuid.UUID),
handler func(nodeID, backupID uuid.UUID),
) error {
ctx := context.Background()

View File

@@ -9,11 +9,11 @@ import (
"testing"
"time"
cache_utils "databasus-backend/internal/util/cache"
"databasus-backend/internal/util/logger"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
cache_utils "databasus-backend/internal/util/cache"
"databasus-backend/internal/util/logger"
)
func Test_HearthbeatNodeInRegistry_RegistersNodeWithTTL(t *testing.T) {
@@ -903,7 +903,7 @@ func Test_SubscribeForBackupsCompletions_ReceivesCompletedBackups(t *testing.T)
receivedBackupID := make(chan uuid.UUID, 1)
receivedNodeID := make(chan uuid.UUID, 1)
handler := func(nodeID uuid.UUID, backupID uuid.UUID) {
handler := func(nodeID, backupID uuid.UUID) {
receivedNodeID <- nodeID
receivedBackupID <- backupID
}
@@ -940,7 +940,7 @@ func Test_SubscribeForBackupsCompletions_ParsesJsonCorrectly(t *testing.T) {
defer registry.UnsubscribeForBackupsCompletions()
receivedBackups := make(chan uuid.UUID, 2)
handler := func(nodeID uuid.UUID, backupID uuid.UUID) {
handler := func(nodeID, backupID uuid.UUID) {
receivedBackups <- backupID
}
@@ -969,7 +969,7 @@ func Test_SubscribeForBackupsCompletions_HandlesInvalidJson(t *testing.T) {
defer registry.UnsubscribeForBackupsCompletions()
receivedBackupID := make(chan uuid.UUID, 1)
handler := func(nodeID uuid.UUID, backupID uuid.UUID) {
handler := func(nodeID, backupID uuid.UUID) {
receivedBackupID <- backupID
}
@@ -997,7 +997,7 @@ func Test_UnsubscribeForBackupsCompletions_StopsReceivingMessages(t *testing.T)
backupID2 := uuid.New()
receivedBackupID := make(chan uuid.UUID, 2)
handler := func(nodeID uuid.UUID, backupID uuid.UUID) {
handler := func(nodeID, backupID uuid.UUID) {
receivedBackupID <- backupID
}
@@ -1032,7 +1032,7 @@ func Test_SubscribeForBackupsCompletions_WhenAlreadySubscribed_ReturnsError(t *t
registry := createTestRegistry()
defer registry.UnsubscribeForBackupsCompletions()
handler := func(nodeID uuid.UUID, backupID uuid.UUID) {}
handler := func(nodeID, backupID uuid.UUID) {}
err := registry.SubscribeForBackupsCompletions(handler)
assert.NoError(t, err)
@@ -1064,9 +1064,9 @@ func Test_MultipleSubscribers_EachReceivesCompletionMessages(t *testing.T) {
receivedBackups2 := make(chan uuid.UUID, 3)
receivedBackups3 := make(chan uuid.UUID, 3)
handler1 := func(nodeID uuid.UUID, backupID uuid.UUID) { receivedBackups1 <- backupID }
handler2 := func(nodeID uuid.UUID, backupID uuid.UUID) { receivedBackups2 <- backupID }
handler3 := func(nodeID uuid.UUID, backupID uuid.UUID) { receivedBackups3 <- backupID }
handler1 := func(nodeID, backupID uuid.UUID) { receivedBackups1 <- backupID }
handler2 := func(nodeID, backupID uuid.UUID) { receivedBackups2 <- backupID }
handler3 := func(nodeID, backupID uuid.UUID) { receivedBackups3 <- backupID }
err := registry1.SubscribeForBackupsCompletions(handler1)
assert.NoError(t, err)

View File

@@ -13,6 +13,7 @@ import (
"databasus-backend/internal/config"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
task_cancellation "databasus-backend/internal/features/tasks/cancellation"
)
@@ -27,6 +28,7 @@ type BackupsScheduler struct {
backupConfigService *backups_config.BackupConfigService
taskCancelManager *task_cancellation.TaskCancelManager
backupNodesRegistry *BackupNodesRegistry
databaseService *databases.DatabaseService
lastBackupTime time.Time
logger *slog.Logger
@@ -103,28 +105,38 @@ func (s *BackupsScheduler) IsSchedulerRunning() bool {
return s.lastBackupTime.After(time.Now().UTC().Add(-schedulerHealthcheckThreshold))
}
func (s *BackupsScheduler) StartBackup(databaseID uuid.UUID, isCallNotifier bool) {
backupConfig, err := s.backupConfigService.GetBackupConfigByDbId(databaseID)
func (s *BackupsScheduler) IsBackupNodesAvailable() bool {
nodes, err := s.backupNodesRegistry.GetAvailableNodes()
if err != nil {
s.logger.Error("Failed to get available nodes for health check", "error", err)
return false
}
return len(nodes) > 0
}
func (s *BackupsScheduler) StartBackup(database *databases.Database, isCallNotifier bool) {
backupConfig, err := s.backupConfigService.GetBackupConfigByDbId(database.ID)
if err != nil {
s.logger.Error("Failed to get backup config by database ID", "error", err)
return
}
if backupConfig.StorageID == nil {
s.logger.Error("Backup config storage ID is nil", "databaseId", databaseID)
s.logger.Error("Backup config storage ID is nil", "databaseId", database.ID)
return
}
// Check for existing in-progress backups
inProgressBackups, err := s.backupRepository.FindByDatabaseIdAndStatus(
databaseID,
database.ID,
backups_core.BackupStatusInProgress,
)
if err != nil {
s.logger.Error(
"Failed to check for in-progress backups",
"databaseId",
databaseID,
database.ID,
"error",
err,
)
@@ -135,7 +147,7 @@ func (s *BackupsScheduler) StartBackup(databaseID uuid.UUID, isCallNotifier bool
s.logger.Warn(
"Backup already in progress for database, skipping new backup",
"databaseId",
databaseID,
database.ID,
"existingBackupId",
inProgressBackups[0].ID,
)
@@ -154,15 +166,20 @@ func (s *BackupsScheduler) StartBackup(databaseID uuid.UUID, isCallNotifier bool
return
}
fmt.Println("make backup")
backupID := uuid.New()
timestamp := time.Now().UTC()
backup := &backups_core.Backup{
ID: backupID,
DatabaseID: backupConfig.DatabaseID,
StorageID: *backupConfig.StorageID,
Status: backups_core.BackupStatusInProgress,
BackupSizeMb: 0,
CreatedAt: time.Now().UTC(),
CreatedAt: timestamp,
}
backup.GenerateFilename(database.Name)
if err := s.backupRepository.Save(backup); err != nil {
s.logger.Error(
"Failed to save backup",
@@ -214,8 +231,8 @@ func (s *BackupsScheduler) StartBackup(databaseID uuid.UUID, isCallNotifier bool
s.backupToNodeRelations[*leastBusyNodeID] = relation
} else {
s.backupToNodeRelations[*leastBusyNodeID] = BackupToNodeRelation{
NodeID: *leastBusyNodeID,
BackupsIDs: []uuid.UUID{backup.ID},
*leastBusyNodeID,
[]uuid.UUID{backup.ID},
}
}
@@ -319,7 +336,13 @@ func (s *BackupsScheduler) runPendingBackups() error {
backupConfig.BackupInterval.Interval,
)
s.StartBackup(backupConfig.DatabaseID, remainedBackupTryCount == 1)
database, err := s.databaseService.GetDatabaseByID(backupConfig.DatabaseID)
if err != nil {
s.logger.Error("Failed to get database by ID", "error", err)
continue
}
s.StartBackup(database, remainedBackupTryCount == 1)
continue
}
}
@@ -418,7 +441,7 @@ func (s *BackupsScheduler) calculateLeastBusyNode() (*uuid.UUID, error) {
return &bestNode.ID, nil
}
func (s *BackupsScheduler) onBackupCompleted(nodeID uuid.UUID, backupID uuid.UUID) {
func (s *BackupsScheduler) onBackupCompleted(nodeID, backupID uuid.UUID) {
// Verify this task is actually a backup (registry contains multiple task types)
_, err := s.backupRepository.FindByID(backupID)
if err != nil {

View File

@@ -1,6 +1,12 @@
package backuping
import (
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
@@ -12,11 +18,6 @@ import (
workspaces_testing "databasus-backend/internal/features/workspaces/testing"
cache_utils "databasus-backend/internal/util/cache"
"databasus-backend/internal/util/period"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
func Test_RunPendingBackups_WhenLastBackupWasYesterday_CreatesNewBackup(t *testing.T) {
@@ -57,7 +58,8 @@ func Test_RunPendingBackups_WhenLastBackupWasYesterday_CreatesNewBackup(t *testi
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = true
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig.RetentionTimePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
@@ -126,7 +128,8 @@ func Test_RunPendingBackups_WhenLastBackupWasRecentlyCompleted_SkipsBackup(t *te
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = true
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig.RetentionTimePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
@@ -194,7 +197,8 @@ func Test_RunPendingBackups_WhenLastBackupFailedAndRetriesDisabled_SkipsBackup(t
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = true
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig.RetentionTimePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
backupConfig.IsRetryIfFailed = false
@@ -266,7 +270,8 @@ func Test_RunPendingBackups_WhenLastBackupFailedAndRetriesEnabled_CreatesNewBack
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = true
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig.RetentionTimePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
backupConfig.IsRetryIfFailed = true
@@ -339,7 +344,8 @@ func Test_RunPendingBackups_WhenFailedBackupsExceedMaxRetries_SkipsBackup(t *tes
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = true
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig.RetentionTimePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
backupConfig.IsRetryIfFailed = true
@@ -410,7 +416,8 @@ func Test_RunPendingBackups_WhenBackupsDisabled_SkipsBackup(t *testing.T) {
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = false
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig.RetentionTimePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
@@ -479,7 +486,8 @@ func Test_CheckDeadNodesAndFailBackups_WhenNodeDies_FailsBackupAndCleansUpRegist
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = true
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig.RetentionTimePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
@@ -492,7 +500,7 @@ func Test_CheckDeadNodesAndFailBackups_WhenNodeDies_FailsBackupAndCleansUpRegist
assert.NoError(t, err)
// Scheduler assigns backup to mock node
GetBackupsScheduler().StartBackup(database.ID, false)
GetBackupsScheduler().StartBackup(database, false)
time.Sleep(100 * time.Millisecond)
backups, err := backupRepository.FindByDatabaseID(database.ID)
@@ -582,7 +590,8 @@ func Test_OnBackupCompleted_WhenTaskIsNotBackup_SkipsProcessing(t *testing.T) {
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = true
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig.RetentionTimePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
@@ -595,7 +604,7 @@ func Test_OnBackupCompleted_WhenTaskIsNotBackup_SkipsProcessing(t *testing.T) {
assert.NoError(t, err)
// Start a backup and assign it to the node
GetBackupsScheduler().StartBackup(database.ID, false)
GetBackupsScheduler().StartBackup(database, false)
time.Sleep(100 * time.Millisecond)
backups, err := backupRepository.FindByDatabaseID(database.ID)
@@ -759,7 +768,8 @@ func Test_FailBackupsInProgress_WhenSchedulerStarts_CancelsBackupsAndUpdatesStat
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = true
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig.RetentionTimePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
@@ -872,7 +882,8 @@ func Test_StartBackup_WhenBackupCompletes_DecrementsActiveTaskCount(t *testing.T
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = true
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig.RetentionTimePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
@@ -892,7 +903,7 @@ func Test_StartBackup_WhenBackupCompletes_DecrementsActiveTaskCount(t *testing.T
t.Logf("Initial active tasks: %d", initialActiveTasks)
// Start backup
scheduler.StartBackup(database.ID, false)
scheduler.StartBackup(database, false)
// Wait for backup to complete
WaitForBackupCompletion(t, database.ID, 0, 10*time.Second)
@@ -975,7 +986,8 @@ func Test_StartBackup_WhenBackupFails_DecrementsActiveTaskCount(t *testing.T) {
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = true
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig.RetentionTimePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
@@ -995,7 +1007,7 @@ func Test_StartBackup_WhenBackupFails_DecrementsActiveTaskCount(t *testing.T) {
t.Logf("Initial active tasks: %d", initialActiveTasks)
// Start backup
scheduler.StartBackup(database.ID, false)
scheduler.StartBackup(database, false)
// Wait for backup to fail
WaitForBackupCompletion(t, database.ID, 0, 10*time.Second)
@@ -1069,7 +1081,8 @@ func Test_StartBackup_WhenBackupAlreadyInProgress_SkipsNewBackup(t *testing.T) {
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = true
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig.RetentionTimePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
@@ -1088,7 +1101,7 @@ func Test_StartBackup_WhenBackupAlreadyInProgress_SkipsNewBackup(t *testing.T) {
assert.NoError(t, err)
// Try to start a new backup - should be skipped
GetBackupsScheduler().StartBackup(database.ID, false)
GetBackupsScheduler().StartBackup(database, false)
time.Sleep(200 * time.Millisecond)
@@ -1140,7 +1153,8 @@ func Test_RunPendingBackups_WhenLastBackupFailedWithIsSkipRetry_SkipsBackupEvenW
TimeOfDay: &timeOfDay,
}
backupConfig.IsBackupsEnabled = true
backupConfig.StorePeriod = period.PeriodWeek
backupConfig.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig.RetentionTimePeriod = period.PeriodWeek
backupConfig.Storage = storage
backupConfig.StorageID = &storage.ID
backupConfig.IsRetryIfFailed = true
@@ -1242,7 +1256,8 @@ func Test_StartBackup_When2BackupsStartedForDifferentDatabases_BothUseCasesAreCa
TimeOfDay: &timeOfDay,
}
backupConfig1.IsBackupsEnabled = true
backupConfig1.StorePeriod = period.PeriodWeek
backupConfig1.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig1.RetentionTimePeriod = period.PeriodWeek
backupConfig1.Storage = storage
backupConfig1.StorageID = &storage.ID
@@ -1259,7 +1274,8 @@ func Test_StartBackup_When2BackupsStartedForDifferentDatabases_BothUseCasesAreCa
TimeOfDay: &timeOfDay,
}
backupConfig2.IsBackupsEnabled = true
backupConfig2.StorePeriod = period.PeriodWeek
backupConfig2.RetentionPolicyType = backups_config.RetentionPolicyTypeTimePeriod
backupConfig2.RetentionTimePeriod = period.PeriodWeek
backupConfig2.Storage = storage
backupConfig2.StorageID = &storage.ID
@@ -1268,10 +1284,10 @@ func Test_StartBackup_When2BackupsStartedForDifferentDatabases_BothUseCasesAreCa
// Start 2 backups simultaneously
t.Log("Starting backup for database1")
scheduler.StartBackup(database1.ID, false)
scheduler.StartBackup(database1, false)
t.Log("Starting backup for database2")
scheduler.StartBackup(database2.ID, false)
scheduler.StartBackup(database2, false)
// Wait up to 10 seconds for both backups to complete
t.Log("Waiting for both backups to complete...")

View File

@@ -8,6 +8,9 @@ import (
"testing"
"time"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
backups_core "databasus-backend/internal/features/backups/backups/core"
"databasus-backend/internal/features/backups/backups/usecases"
backups_config "databasus-backend/internal/features/backups/config"
@@ -19,9 +22,6 @@ import (
workspaces_testing "databasus-backend/internal/features/workspaces/testing"
"databasus-backend/internal/util/encryption"
"databasus-backend/internal/util/logger"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
func CreateTestRouter() *gin.Engine {

View File

@@ -1,17 +1,38 @@
package common
import backups_config "databasus-backend/internal/features/backups/config"
import (
"errors"
type BackupType string
"github.com/google/uuid"
const (
BackupTypeDefault BackupType = "DEFAULT" // For MySQL, MongoDB, PostgreSQL legacy (-Fc)
BackupTypeDirectory BackupType = "DIRECTORY" // PostgreSQL directory type (-Fd)
backups_config "databasus-backend/internal/features/backups/config"
)
type BackupMetadata struct {
EncryptionSalt *string
EncryptionIV *string
Encryption backups_config.BackupEncryption
Type BackupType
BackupID uuid.UUID `json:"backupId"`
EncryptionSalt *string `json:"encryptionSalt"`
EncryptionIV *string `json:"encryptionIV"`
Encryption backups_config.BackupEncryption `json:"encryption"`
}
func (m *BackupMetadata) Validate() error {
if m.BackupID == uuid.Nil {
return errors.New("backup ID is required")
}
if m.Encryption == "" {
return errors.New("encryption is required")
}
if m.Encryption == backups_config.BackupEncryptionEncrypted {
if m.EncryptionSalt == nil {
return errors.New("encryption salt is required when encryption is enabled")
}
if m.EncryptionIV == nil {
return errors.New("encryption IV is required when encryption is enabled")
}
}
return nil
}

View File

@@ -7,6 +7,10 @@ type CountingWriter struct {
BytesWritten int64
}
func NewCountingWriter(writer io.Writer) *CountingWriter {
return &CountingWriter{Writer: writer}
}
func (cw *CountingWriter) Write(p []byte) (n int, err error) {
n, err = cw.Writer.Write(p)
cw.BytesWritten += int64(n)
@@ -16,7 +20,3 @@ func (cw *CountingWriter) Write(p []byte) (n int, err error) {
func (cw *CountingWriter) GetBytesWritten() int64 {
return cw.BytesWritten
}
func NewCountingWriter(writer io.Writer) *CountingWriter {
return &CountingWriter{Writer: writer}
}

View File

@@ -1,11 +1,8 @@
package backups
package backups_controllers
import (
"context"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_download "databasus-backend/internal/features/backups/backups/download"
"databasus-backend/internal/features/databases"
users_middleware "databasus-backend/internal/features/users/middleware"
"errors"
"fmt"
"io"
"net/http"
@@ -13,10 +10,18 @@ import (
"github.com/gin-gonic/gin"
"github.com/google/uuid"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_download "databasus-backend/internal/features/backups/backups/download"
backups_dto "databasus-backend/internal/features/backups/backups/dto"
backups_services "databasus-backend/internal/features/backups/backups/services"
"databasus-backend/internal/features/databases"
users_middleware "databasus-backend/internal/features/users/middleware"
files_utils "databasus-backend/internal/util/files"
)
type BackupController struct {
backupService *BackupService
backupService *backups_services.BackupService
}
func (c *BackupController) RegisterRoutes(router *gin.RouterGroup) {
@@ -41,7 +46,7 @@ func (c *BackupController) RegisterPublicRoutes(router *gin.RouterGroup) {
// @Param database_id query string true "Database ID"
// @Param limit query int false "Number of items per page" default(10)
// @Param offset query int false "Offset for pagination" default(0)
// @Success 200 {object} GetBackupsResponse
// @Success 200 {object} backups_dto.GetBackupsResponse
// @Failure 400
// @Failure 401
// @Failure 500
@@ -53,7 +58,7 @@ func (c *BackupController) GetBackups(ctx *gin.Context) {
return
}
var request GetBackupsRequest
var request backups_dto.GetBackupsRequest
if err := ctx.ShouldBindQuery(&request); err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
@@ -80,7 +85,7 @@ func (c *BackupController) GetBackups(ctx *gin.Context) {
// @Tags backups
// @Accept json
// @Produce json
// @Param request body MakeBackupRequest true "Backup creation data"
// @Param request body backups_dto.MakeBackupRequest true "Backup creation data"
// @Success 200 {object} map[string]string
// @Failure 400
// @Failure 401
@@ -93,7 +98,7 @@ func (c *BackupController) MakeBackup(ctx *gin.Context) {
return
}
var request MakeBackupRequest
var request backups_dto.MakeBackupRequest
if err := ctx.ShouldBindJSON(&request); err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
@@ -194,7 +199,7 @@ func (c *BackupController) GenerateDownloadToken(ctx *gin.Context) {
response, err := c.backupService.GenerateDownloadToken(user, id)
if err != nil {
if err == backups_download.ErrDownloadAlreadyInProgress {
if errors.Is(err, backups_download.ErrDownloadAlreadyInProgress) {
ctx.JSON(
http.StatusConflict,
gin.H{
@@ -245,7 +250,7 @@ func (c *BackupController) GetFile(ctx *gin.Context) {
downloadToken, rateLimiter, err := c.backupService.ValidateDownloadToken(token)
if err != nil {
if err == backups_download.ErrDownloadAlreadyInProgress {
if errors.Is(err, backups_download.ErrDownloadAlreadyInProgress) {
ctx.JSON(
http.StatusConflict,
gin.H{
@@ -304,16 +309,11 @@ func (c *BackupController) GetFile(ctx *gin.Context) {
_, err = io.Copy(ctx.Writer, rateLimitedReader)
if err != nil {
fmt.Printf("Error streaming file: %v\n", err)
return
}
c.backupService.WriteAuditLogForDownload(downloadToken.UserID, backup, database)
}
type MakeBackupRequest struct {
DatabaseID uuid.UUID `json:"database_id" binding:"required"`
}
func (c *BackupController) generateBackupFilename(
backup *backups_core.Backup,
database *databases.Database,
@@ -322,7 +322,7 @@ func (c *BackupController) generateBackupFilename(
timestamp := backup.CreatedAt.Format("2006-01-02_15-04-05")
// Sanitize database name for filename (replace spaces and special chars)
safeName := sanitizeFilename(database.Name)
safeName := files_utils.SanitizeFilename(database.Name)
// Determine extension based on database type
extension := c.getBackupExtension(database.Type)
@@ -346,33 +346,6 @@ func (c *BackupController) getBackupExtension(
}
}
func sanitizeFilename(name string) string {
// Replace characters that are invalid in filenames
replacer := map[rune]rune{
' ': '_',
'/': '-',
'\\': '-',
':': '-',
'*': '-',
'?': '-',
'"': '-',
'<': '-',
'>': '-',
'|': '-',
}
result := make([]rune, 0, len(name))
for _, char := range name {
if replacement, exists := replacer[char]; exists {
result = append(result, replacement)
} else {
result = append(result, char)
}
}
return string(result)
}
func (c *BackupController) startDownloadHeartbeat(ctx context.Context, userID uuid.UUID) {
ticker := time.NewTicker(backups_download.GetDownloadHeartbeatInterval())
defer ticker.Stop()

View File

@@ -1,4 +1,4 @@
package backups
package backups_controllers
import (
"context"
@@ -7,6 +7,8 @@ import (
"io"
"log/slog"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
@@ -18,13 +20,18 @@ import (
"databasus-backend/internal/config"
audit_logs "databasus-backend/internal/features/audit_logs"
"databasus-backend/internal/features/backups/backups/backuping"
backups_common "databasus-backend/internal/features/backups/backups/common"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_download "databasus-backend/internal/features/backups/backups/download"
backups_dto "databasus-backend/internal/features/backups/backups/dto"
backups_services "databasus-backend/internal/features/backups/backups/services"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/databases/databases/postgresql"
"databasus-backend/internal/features/storages"
local_storage "databasus-backend/internal/features/storages/models/local"
task_cancellation "databasus-backend/internal/features/tasks/cancellation"
users_dto "databasus-backend/internal/features/users/dto"
users_enums "databasus-backend/internal/features/users/enums"
users_services "databasus-backend/internal/features/users/services"
@@ -32,6 +39,7 @@ import (
workspaces_models "databasus-backend/internal/features/workspaces/models"
workspaces_testing "databasus-backend/internal/features/workspaces/testing"
"databasus-backend/internal/util/encryption"
files_utils "databasus-backend/internal/util/files"
test_utils "databasus-backend/internal/util/testing"
"databasus-backend/internal/util/tools"
)
@@ -114,7 +122,7 @@ func Test_GetBackups_PermissionsEnforced(t *testing.T) {
)
if tt.expectSuccess {
var response GetBackupsResponse
var response backups_dto.GetBackupsResponse
err := json.Unmarshal(testResp.Body, &response)
assert.NoError(t, err)
assert.GreaterOrEqual(t, len(response.Backups), 1)
@@ -209,7 +217,7 @@ func Test_CreateBackup_PermissionsEnforced(t *testing.T) {
testUserToken = nonMember.Token
}
request := MakeBackupRequest{DatabaseID: database.ID}
request := backups_dto.MakeBackupRequest{DatabaseID: database.ID}
testResp := test_utils.MakePostRequest(
t,
router,
@@ -240,7 +248,7 @@ func Test_CreateBackup_AuditLogWritten(t *testing.T) {
database := createTestDatabase("Test Database", workspace.ID, owner.Token, router)
enableBackupForDatabase(database.ID)
request := MakeBackupRequest{DatabaseID: database.ID}
request := backups_dto.MakeBackupRequest{DatabaseID: database.ID}
test_utils.MakePostRequest(
t,
router,
@@ -368,7 +376,7 @@ func Test_DeleteBackup_PermissionsEnforced(t *testing.T) {
ownerUser, err := userService.GetUserFromToken(owner.Token)
assert.NoError(t, err)
response, err := GetBackupService().GetBackups(ownerUser, database.ID, 10, 0)
response, err := backups_services.GetBackupService().GetBackups(ownerUser, database.ID, 10, 0)
assert.NoError(t, err)
assert.Equal(t, 0, len(response.Backups))
}
@@ -956,7 +964,7 @@ func Test_SanitizeFilename(t *testing.T) {
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
result := sanitizeFilename(tt.input)
result := files_utils.SanitizeFilename(tt.input)
assert.Equal(t, tt.expected, result)
})
}
@@ -994,7 +1002,7 @@ func Test_CancelBackup_InProgressBackup_SuccessfullyCancelled(t *testing.T) {
assert.NoError(t, err)
// Register a cancellable context for the backup
GetBackupService().taskCancelManager.RegisterTask(backup.ID, func() {})
task_cancellation.GetTaskCancelManager().RegisterTask(backup.ID, func() {})
resp := test_utils.MakePostRequest(
t,
@@ -1086,7 +1094,7 @@ func Test_ConcurrentDownloadPrevention(t *testing.T) {
time.Sleep(50 * time.Millisecond)
service := GetBackupService()
service := backups_services.GetBackupService()
if !service.IsDownloadInProgress(owner.UserID) {
t.Log("Warning: First download completed before we could test concurrency")
<-downloadComplete
@@ -1187,7 +1195,7 @@ func Test_GenerateDownloadToken_BlockedWhenDownloadInProgress(t *testing.T) {
time.Sleep(50 * time.Millisecond)
service := GetBackupService()
service := backups_services.GetBackupService()
if !service.IsDownloadInProgress(owner.UserID) {
t.Log("Warning: First download completed before we could test token generation blocking")
<-downloadComplete
@@ -1244,6 +1252,86 @@ func Test_GenerateDownloadToken_BlockedWhenDownloadInProgress(t *testing.T) {
workspaces_testing.RemoveTestWorkspace(workspace, router)
}
func Test_MakeBackup_VerifyBackupAndMetadataFilesExistInStorage(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
database, _, storage := createTestDatabaseWithBackups(workspace, owner, router)
backuperNode := backuping.CreateTestBackuperNode()
backuperCancel := backuping.StartBackuperNodeForTest(t, backuperNode)
defer backuping.StopBackuperNodeForTest(t, backuperCancel, backuperNode)
scheduler := backuping.CreateTestScheduler()
schedulerCancel := backuping.StartSchedulerForTest(t, scheduler)
defer schedulerCancel()
backupRepo := &backups_core.BackupRepository{}
initialBackups, err := backupRepo.FindByDatabaseID(database.ID)
assert.NoError(t, err)
request := backups_dto.MakeBackupRequest{DatabaseID: database.ID}
test_utils.MakePostRequest(
t,
router,
"/api/v1/backups",
"Bearer "+owner.Token,
request,
http.StatusOK,
)
backuping.WaitForBackupCompletion(t, database.ID, len(initialBackups), 30*time.Second)
backups, err := backupRepo.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Greater(t, len(backups), len(initialBackups))
backup := backups[0]
assert.Equal(t, backups_core.BackupStatusCompleted, backup.Status)
storageService := storages.GetStorageService()
backupStorage, err := storageService.GetStorageByID(backup.StorageID)
assert.NoError(t, err)
encryptor := encryption.GetFieldEncryptor()
backupFile, err := backupStorage.GetFile(encryptor, backup.FileName)
assert.NoError(t, err)
backupFile.Close()
metadataFile, err := backupStorage.GetFile(encryptor, backup.FileName+".metadata")
assert.NoError(t, err)
metadataContent, err := io.ReadAll(metadataFile)
assert.NoError(t, err)
metadataFile.Close()
var storageMetadata backups_common.BackupMetadata
err = json.Unmarshal(metadataContent, &storageMetadata)
assert.NoError(t, err)
assert.Equal(t, backup.ID, storageMetadata.BackupID)
if backup.EncryptionSalt != nil && storageMetadata.EncryptionSalt != nil {
assert.Equal(t, *backup.EncryptionSalt, *storageMetadata.EncryptionSalt)
}
if backup.EncryptionIV != nil && storageMetadata.EncryptionIV != nil {
assert.Equal(t, *backup.EncryptionIV, *storageMetadata.EncryptionIV)
}
assert.Equal(t, backup.Encryption, storageMetadata.Encryption)
err = backupRepo.DeleteByID(backup.ID)
assert.NoError(t, err)
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}
func createTestRouter() *gin.Engine {
return CreateTestRouter()
}
@@ -1366,11 +1454,24 @@ func createTestBackup(
panic(err)
}
storages, err := storages.GetStorageService().GetStorages(user, *database.WorkspaceID)
if err != nil || len(storages) == 0 {
loadedStorages, err := storages.GetStorageService().GetStorages(user, *database.WorkspaceID)
if err != nil || len(loadedStorages) == 0 {
panic("No storage found for workspace")
}
// Filter out system storages
var nonSystemStorages []*storages.Storage
for _, storage := range loadedStorages {
if !storage.IsSystem {
nonSystemStorages = append(nonSystemStorages, storage)
}
}
if len(nonSystemStorages) == 0 {
panic("No non-system storage found for workspace")
}
storages := nonSystemStorages
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
@@ -1394,7 +1495,7 @@ func createTestBackup(
context.Background(),
encryption.GetFieldEncryptor(),
logger,
backup.ID,
backup.ID.String(),
reader,
); err != nil {
panic(fmt.Sprintf("Failed to create test backup file: %v", err))
@@ -1404,7 +1505,7 @@ func createTestBackup(
}
func createExpiredDownloadToken(backupID, userID uuid.UUID) string {
tokenService := GetBackupService().downloadTokenService
tokenService := backups_download.GetDownloadTokenService()
token, err := tokenService.Generate(backupID, userID)
if err != nil {
panic(fmt.Sprintf("Failed to generate download token: %v", err))
@@ -1707,3 +1808,84 @@ func Test_BandwidthThrottling_DynamicAdjustment(t *testing.T) {
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}
func Test_DeleteBackup_RemovesBackupAndMetadataFilesFromDisk(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
database := createTestDatabase("Test Database", workspace.ID, owner.Token, router)
storage := createTestStorage(workspace.ID)
configService := backups_config.GetBackupConfigService()
backupConfig, err := configService.GetBackupConfigByDbId(database.ID)
assert.NoError(t, err)
backupConfig.IsBackupsEnabled = true
backupConfig.StorageID = &storage.ID
backupConfig.Storage = storage
_, err = configService.SaveBackupConfig(backupConfig)
assert.NoError(t, err)
defer func() {
databases.RemoveTestDatabase(database)
time.Sleep(50 * time.Millisecond)
storages.RemoveTestStorage(storage.ID)
workspaces_testing.RemoveTestWorkspace(workspace, router)
}()
backuperNode := backuping.CreateTestBackuperNode()
backuperCancel := backuping.StartBackuperNodeForTest(t, backuperNode)
defer backuping.StopBackuperNodeForTest(t, backuperCancel, backuperNode)
scheduler := backuping.CreateTestScheduler()
schedulerCancel := backuping.StartSchedulerForTest(t, scheduler)
defer schedulerCancel()
backupRepo := &backups_core.BackupRepository{}
initialBackups, err := backupRepo.FindByDatabaseID(database.ID)
assert.NoError(t, err)
request := backups_dto.MakeBackupRequest{DatabaseID: database.ID}
test_utils.MakePostRequest(
t,
router,
"/api/v1/backups",
"Bearer "+owner.Token,
request,
http.StatusOK,
)
backuping.WaitForBackupCompletion(t, database.ID, len(initialBackups), 30*time.Second)
backups, err := backupRepo.FindByDatabaseID(database.ID)
assert.NoError(t, err)
assert.Greater(t, len(backups), len(initialBackups))
backup := backups[0]
assert.Equal(t, backups_core.BackupStatusCompleted, backup.Status)
dataFolder := config.GetEnv().DataFolder
backupFilePath := filepath.Join(dataFolder, backup.FileName)
metadataFilePath := filepath.Join(dataFolder, backup.FileName+".metadata")
_, err = os.Stat(backupFilePath)
assert.NoError(t, err, "backup file should exist on disk before deletion")
_, err = os.Stat(metadataFilePath)
assert.NoError(t, err, "metadata file should exist on disk before deletion")
test_utils.MakeDeleteRequest(
t,
router,
fmt.Sprintf("/api/v1/backups/%s", backup.ID.String()),
"Bearer "+owner.Token,
http.StatusNoContent,
)
_, err = os.Stat(backupFilePath)
assert.True(t, os.IsNotExist(err), "backup file should be removed from disk after deletion")
_, err = os.Stat(metadataFilePath)
assert.True(t, os.IsNotExist(err), "metadata file should be removed from disk after deletion")
}

View File

@@ -0,0 +1,23 @@
package backups_controllers
import (
backups_services "databasus-backend/internal/features/backups/backups/services"
"databasus-backend/internal/features/databases"
)
var backupController = &BackupController{
backups_services.GetBackupService(),
}
func GetBackupController() *BackupController {
return backupController
}
var postgresWalBackupController = &PostgreWalBackupController{
databases.GetDatabaseService(),
backups_services.GetWalService(),
}
func GetPostgresWalBackupController() *PostgreWalBackupController {
return postgresWalBackupController
}

View File

@@ -0,0 +1,291 @@
package backups_controllers
import (
"io"
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_dto "databasus-backend/internal/features/backups/backups/dto"
backups_services "databasus-backend/internal/features/backups/backups/services"
"databasus-backend/internal/features/databases"
)
// PostgreWalBackupController handles WAL backup endpoints used by the databasus-cli agent.
// Authentication is via a plain agent token in the Authorization header (no Bearer prefix).
type PostgreWalBackupController struct {
databaseService *databases.DatabaseService
walService *backups_services.PostgreWalBackupService
}
func (c *PostgreWalBackupController) RegisterRoutes(router *gin.RouterGroup) {
walRoutes := router.Group("/backups/postgres/wal")
walRoutes.GET("/next-full-backup-time", c.GetNextFullBackupTime)
walRoutes.POST("/error", c.ReportError)
walRoutes.POST("/upload", c.Upload)
walRoutes.GET("/restore/plan", c.GetRestorePlan)
walRoutes.GET("/restore/download", c.DownloadBackupFile)
}
// GetNextFullBackupTime
// @Summary Get next full backup time
// @Description Returns the next scheduled full basebackup time for the authenticated database
// @Tags backups-wal
// @Produce json
// @Security AgentToken
// @Success 200 {object} backups_dto.GetNextFullBackupTimeResponse
// @Failure 401 {object} map[string]string
// @Failure 500 {object} map[string]string
// @Router /backups/postgres/wal/next-full-backup-time [get]
func (c *PostgreWalBackupController) GetNextFullBackupTime(ctx *gin.Context) {
database, err := c.getDatabase(ctx)
if err != nil {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "invalid agent token"})
return
}
response, err := c.walService.GetNextFullBackupTime(database)
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
ctx.JSON(http.StatusOK, response)
}
// ReportError
// @Summary Report agent error
// @Description Records a fatal error from the agent against the database record and marks it as errored
// @Tags backups-wal
// @Accept json
// @Security AgentToken
// @Param request body backups_dto.ReportErrorRequest true "Error details"
// @Success 200
// @Failure 400 {object} map[string]string
// @Failure 401 {object} map[string]string
// @Failure 500 {object} map[string]string
// @Router /backups/postgres/wal/error [post]
func (c *PostgreWalBackupController) ReportError(ctx *gin.Context) {
database, err := c.getDatabase(ctx)
if err != nil {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "invalid agent token"})
return
}
var request backups_dto.ReportErrorRequest
if err := ctx.ShouldBindJSON(&request); err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if err := c.walService.ReportError(database, request.Error); err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
ctx.Status(http.StatusOK)
}
// Upload
// @Summary Stream upload a basebackup or WAL segment
// @Description Accepts a zstd-compressed binary stream and stores it in the database's configured storage.
// The server generates the storage filename; agents do not control the destination path.
// For WAL segment uploads the server validates the WAL chain and returns 409 if a gap is detected
// or 400 if no full backup exists yet (agent should trigger a full basebackup in both cases).
// @Tags backups-wal
// @Accept application/octet-stream
// @Produce json
// @Security AgentToken
// @Param X-Upload-Type header string true "Upload type" Enums(basebackup, wal)
// @Param X-Wal-Segment-Name header string false "24-hex WAL segment identifier (required for wal uploads, e.g. 0000000100000001000000AB)"
// @Param X-Wal-Segment-Size header int false "WAL segment size in bytes reported by the PostgreSQL instance (default: 16777216)"
// @Param fullBackupWalStartSegment query string false "First WAL segment needed to make the basebackup consistent (required for basebackup uploads)"
// @Param fullBackupWalStopSegment query string false "Last WAL segment included in the basebackup (required for basebackup uploads)"
// @Success 204
// @Failure 400 {object} backups_dto.UploadGapResponse "No full backup exists (error: no_full_backup)"
// @Failure 401 {object} map[string]string
// @Failure 409 {object} backups_dto.UploadGapResponse "WAL chain gap detected (error: gap_detected)"
// @Failure 500 {object} map[string]string
// @Router /backups/postgres/wal/upload [post]
func (c *PostgreWalBackupController) Upload(ctx *gin.Context) {
database, err := c.getDatabase(ctx)
if err != nil {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "invalid agent token"})
return
}
uploadType := backups_core.PgWalUploadType(ctx.GetHeader("X-Upload-Type"))
if uploadType != backups_core.PgWalUploadTypeBasebackup &&
uploadType != backups_core.PgWalUploadTypeWal {
ctx.JSON(
http.StatusBadRequest,
gin.H{"error": "X-Upload-Type must be 'basebackup' or 'wal'"},
)
return
}
walSegmentName := ""
if uploadType == backups_core.PgWalUploadTypeWal {
walSegmentName = ctx.GetHeader("X-Wal-Segment-Name")
if walSegmentName == "" {
ctx.JSON(
http.StatusBadRequest,
gin.H{"error": "X-Wal-Segment-Name is required for wal uploads"},
)
return
}
}
if uploadType == backups_core.PgWalUploadTypeBasebackup {
if ctx.Query("fullBackupWalStartSegment") == "" ||
ctx.Query("fullBackupWalStopSegment") == "" {
ctx.JSON(
http.StatusBadRequest,
gin.H{
"error": "fullBackupWalStartSegment and fullBackupWalStopSegment are required for basebackup uploads",
},
)
return
}
}
walSegmentSizeBytes := int64(0)
if raw := ctx.GetHeader("X-Wal-Segment-Size"); raw != "" {
parsed, parseErr := strconv.ParseInt(raw, 10, 64)
if parseErr != nil || parsed <= 0 {
ctx.JSON(
http.StatusBadRequest,
gin.H{"error": "X-Wal-Segment-Size must be a positive integer"},
)
return
}
walSegmentSizeBytes = parsed
}
gapResp, uploadErr := c.walService.UploadWal(
ctx.Request.Context(),
database,
uploadType,
walSegmentName,
ctx.Query("fullBackupWalStartSegment"),
ctx.Query("fullBackupWalStopSegment"),
walSegmentSizeBytes,
ctx.Request.Body,
)
if uploadErr != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": uploadErr.Error()})
return
}
if gapResp != nil {
if gapResp.Error == "no_full_backup" {
ctx.JSON(http.StatusBadRequest, gapResp)
return
}
ctx.JSON(http.StatusConflict, gapResp)
return
}
ctx.Status(http.StatusNoContent)
}
// GetRestorePlan
// @Summary Get restore plan
// @Description Resolves the full backup and all required WAL segments needed for recovery. Validates the WAL chain is continuous.
// @Tags backups-wal
// @Produce json
// @Security AgentToken
// @Param backupId query string false "UUID of a specific full backup to restore from; defaults to the most recent"
// @Success 200 {object} backups_dto.GetRestorePlanResponse
// @Failure 400 {object} map[string]string "Broken WAL chain or no backups available"
// @Failure 401 {object} map[string]string
// @Failure 500 {object} map[string]string
// @Router /backups/postgres/wal/restore/plan [get]
func (c *PostgreWalBackupController) GetRestorePlan(ctx *gin.Context) {
database, err := c.getDatabase(ctx)
if err != nil {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "invalid agent token"})
return
}
var backupID *uuid.UUID
if raw := ctx.Query("backupId"); raw != "" {
parsed, parseErr := uuid.Parse(raw)
if parseErr != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "invalid backupId format"})
return
}
backupID = &parsed
}
response, planErr, err := c.walService.GetRestorePlan(database, backupID)
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if planErr != nil {
ctx.JSON(http.StatusBadRequest, planErr)
return
}
ctx.JSON(http.StatusOK, response)
}
// DownloadBackupFile
// @Summary Download a backup or WAL segment file for restore
// @Description Retrieves the backup file by ID (validated against the authenticated database), decrypts it server-side if encrypted, and streams the zstd-compressed result to the agent
// @Tags backups-wal
// @Produce application/octet-stream
// @Security AgentToken
// @Param backupId query string true "Backup ID from the restore plan response"
// @Success 200 {file} file
// @Failure 400 {object} map[string]string
// @Failure 401 {object} map[string]string
// @Router /backups/postgres/wal/restore/download [get]
func (c *PostgreWalBackupController) DownloadBackupFile(ctx *gin.Context) {
database, err := c.getDatabase(ctx)
if err != nil {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "invalid agent token"})
return
}
backupIDRaw := ctx.Query("backupId")
if backupIDRaw == "" {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "backupId is required"})
return
}
backupID, err := uuid.Parse(backupIDRaw)
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "invalid backupId format"})
return
}
reader, err := c.walService.DownloadBackupFile(database, backupID)
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
defer func() { _ = reader.Close() }()
ctx.Header("Content-Type", "application/octet-stream")
ctx.Status(http.StatusOK)
_, _ = io.Copy(ctx.Writer, reader)
}
func (c *PostgreWalBackupController) getDatabase(
ctx *gin.Context,
) (*databases.Database, error) {
token := ctx.GetHeader("Authorization")
return c.databaseService.GetDatabaseByAgentToken(token)
}

View File

@@ -1,17 +1,17 @@
package backups
package backups_controllers
import (
"testing"
"time"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
workspaces_controllers "databasus-backend/internal/features/workspaces/controllers"
workspaces_testing "databasus-backend/internal/features/workspaces/testing"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
func CreateTestRouter() *gin.Engine {
@@ -41,7 +41,7 @@ func WaitForBackupCompletion(
deadline := time.Now().UTC().Add(timeout)
for time.Now().UTC().Before(deadline) {
backups, err := backupRepository.FindByDatabaseID(databaseID)
backups, err := backups_core.GetBackupRepository().FindByDatabaseID(databaseID)
if err != nil {
t.Logf("WaitForBackupCompletion: error finding backups: %v", err)
time.Sleep(50 * time.Millisecond)

View File

@@ -0,0 +1,7 @@
package backups_core
var backupRepository = &BackupRepository{}
func GetBackupRepository() *BackupRepository {
return backupRepository
}

View File

@@ -8,3 +8,10 @@ const (
BackupStatusFailed BackupStatus = "FAILED"
BackupStatusCanceled BackupStatus = "CANCELED"
)
type PgWalUploadType string
const (
PgWalUploadTypeBasebackup PgWalUploadType = "basebackup"
PgWalUploadTypeWal PgWalUploadType = "wal"
)

View File

@@ -8,8 +8,6 @@ import (
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/notifiers"
"databasus-backend/internal/features/storages"
"github.com/google/uuid"
)
type NotificationSender interface {
@@ -23,7 +21,7 @@ type NotificationSender interface {
type CreateBackupUsecase interface {
Execute(
ctx context.Context,
backupID uuid.UUID,
backup *Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,

View File

@@ -1,14 +1,25 @@
package backups_core
import (
backups_config "databasus-backend/internal/features/backups/config"
"fmt"
"time"
"github.com/google/uuid"
backups_config "databasus-backend/internal/features/backups/config"
files_utils "databasus-backend/internal/util/files"
)
type PgWalBackupType string
const (
PgWalBackupTypeFullBackup PgWalBackupType = "PG_FULL_BACKUP"
PgWalBackupTypeWalSegment PgWalBackupType = "PG_WAL_SEGMENT"
)
type Backup struct {
ID uuid.UUID `json:"id" gorm:"column:id;type:uuid;primaryKey"`
ID uuid.UUID `json:"id" gorm:"column:id;type:uuid;primaryKey"`
FileName string `json:"fileName" gorm:"column:file_name;type:text;not null"`
DatabaseID uuid.UUID `json:"databaseId" gorm:"column:database_id;type:uuid;not null"`
StorageID uuid.UUID `json:"storageId" gorm:"column:storage_id;type:uuid;not null"`
@@ -25,5 +36,23 @@ type Backup struct {
EncryptionIV *string `json:"-" gorm:"column:encryption_iv"`
Encryption backups_config.BackupEncryption `json:"encryption" gorm:"column:encryption;type:text;not null;default:'NONE'"`
// Postgres WAL backup specific fields
PgWalBackupType *PgWalBackupType `json:"pgWalBackupType" gorm:"column:pg_wal_backup_type;type:text"`
PgFullBackupWalStartSegmentName *string `json:"pgFullBackupWalStartSegmentName" gorm:"column:pg_wal_start_segment;type:text"`
PgFullBackupWalStopSegmentName *string `json:"pgFullBackupWalStopSegmentName" gorm:"column:pg_wal_stop_segment;type:text"`
PgVersion *string `json:"pgVersion" gorm:"column:pg_version;type:text"`
PgWalSegmentName *string `json:"pgWalSegmentName" gorm:"column:pg_wal_segment_name;type:text"`
CreatedAt time.Time `json:"createdAt" gorm:"column:created_at"`
}
func (b *Backup) GenerateFilename(dbName string) {
timestamp := time.Now().UTC()
b.FileName = fmt.Sprintf(
"%s-%s-%s",
files_utils.SanitizeFilename(dbName),
timestamp.Format("20060102-150405"),
b.ID.String(),
)
}

View File

@@ -1,13 +1,13 @@
package backups_core
import (
"databasus-backend/internal/storage"
"errors"
"time"
"github.com/google/uuid"
"gorm.io/gorm"
"databasus-backend/internal/storage"
)
type BackupRepository struct{}
@@ -88,7 +88,7 @@ func (r *BackupRepository) FindLastByDatabaseID(databaseID uuid.UUID) (*Backup,
Where("database_id = ?", databaseID).
Order("created_at DESC").
First(&backup).Error; err != nil {
if err == gorm.ErrRecordNotFound {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
@@ -245,3 +245,134 @@ func (r *BackupRepository) FindOldestByDatabaseExcludingInProgress(
return backups, nil
}
func (r *BackupRepository) FindCompletedFullWalBackupByID(
databaseID uuid.UUID,
backupID uuid.UUID,
) (*Backup, error) {
var backup Backup
err := storage.
GetDb().
Where(
"database_id = ? AND id = ? AND pg_wal_backup_type = ? AND status = ?",
databaseID,
backupID,
PgWalBackupTypeFullBackup,
BackupStatusCompleted,
).
First(&backup).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, err
}
return &backup, nil
}
func (r *BackupRepository) FindCompletedWalSegmentsAfter(
databaseID uuid.UUID,
afterSegmentName string,
) ([]*Backup, error) {
var backups []*Backup
err := storage.
GetDb().
Where(
"database_id = ? AND pg_wal_backup_type = ? AND pg_wal_segment_name >= ? AND status = ?",
databaseID,
PgWalBackupTypeWalSegment,
afterSegmentName,
BackupStatusCompleted,
).
Order("pg_wal_segment_name ASC").
Find(&backups).Error
if err != nil {
return nil, err
}
return backups, nil
}
func (r *BackupRepository) FindLastCompletedFullWalBackupByDatabaseID(
databaseID uuid.UUID,
) (*Backup, error) {
var backup Backup
err := storage.
GetDb().
Where(
"database_id = ? AND pg_wal_backup_type = ? AND status = ?",
databaseID,
PgWalBackupTypeFullBackup,
BackupStatusCompleted,
).
Order("created_at DESC").
First(&backup).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, err
}
return &backup, nil
}
func (r *BackupRepository) FindWalSegmentByName(
databaseID uuid.UUID,
segmentName string,
) (*Backup, error) {
var backup Backup
err := storage.
GetDb().
Where(
"database_id = ? AND pg_wal_backup_type = ? AND pg_wal_segment_name = ?",
databaseID,
PgWalBackupTypeWalSegment,
segmentName,
).
First(&backup).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, err
}
return &backup, nil
}
func (r *BackupRepository) FindLastWalSegmentAfter(
databaseID uuid.UUID,
afterSegmentName string,
) (*Backup, error) {
var backup Backup
err := storage.
GetDb().
Where(
"database_id = ? AND pg_wal_backup_type = ? AND pg_wal_segment_name > ? AND status = ?",
databaseID,
PgWalBackupTypeWalSegment,
afterSegmentName,
BackupStatusCompleted,
).
Order("pg_wal_segment_name DESC").
First(&backup).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, err
}
return &backup, nil
}

View File

@@ -13,9 +13,11 @@ var downloadTokenRepository = &DownloadTokenRepository{}
var downloadTracker = NewDownloadTracker(cache_utils.GetValkeyClient())
var bandwidthManager *BandwidthManager
var downloadTokenService *DownloadTokenService
var downloadTokenBackgroundService *DownloadTokenBackgroundService
var (
bandwidthManager *BandwidthManager
downloadTokenService *DownloadTokenService
downloadTokenBackgroundService *DownloadTokenBackgroundService
)
func init() {
env := config.GetEnv()

View File

@@ -66,9 +66,7 @@ func (rl *RateLimiter) Wait(bytes int64) {
tokensNeeded := float64(bytes) - rl.availableTokens
waitTime := time.Duration(tokensNeeded/float64(rl.bytesPerSecond)*1000) * time.Millisecond
if waitTime < time.Millisecond {
waitTime = time.Millisecond
}
waitTime = max(waitTime, time.Millisecond)
rl.mu.Unlock()
time.Sleep(waitTime)

View File

@@ -2,12 +2,14 @@ package backups_download
import (
"crypto/rand"
"databasus-backend/internal/storage"
"encoding/base64"
"errors"
"time"
"github.com/google/uuid"
"gorm.io/gorm"
"databasus-backend/internal/storage"
)
type DownloadTokenRepository struct{}
@@ -28,9 +30,8 @@ func (r *DownloadTokenRepository) FindByToken(token string) (*DownloadToken, err
err := storage.GetDb().
Where("token = ?", token).
First(&downloadToken).Error
if err != nil {
if err == gorm.ErrRecordNotFound {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, err

View File

@@ -1,12 +1,13 @@
package backups_download
import (
cache_utils "databasus-backend/internal/util/cache"
"errors"
"time"
"github.com/google/uuid"
"github.com/valkey-io/valkey-go"
cache_utils "databasus-backend/internal/util/cache"
)
const (
@@ -16,9 +17,7 @@ const (
downloadHeartbeatDelay = 3 * time.Second
)
var (
ErrDownloadAlreadyInProgress = errors.New("download already in progress for this user")
)
var ErrDownloadAlreadyInProgress = errors.New("download already in progress for this user")
type DownloadTracker struct {
cache *cache_utils.CacheUtil[string]

View File

@@ -1,29 +0,0 @@
package backups
import (
backups_core "databasus-backend/internal/features/backups/backups/core"
"databasus-backend/internal/features/backups/backups/encryption"
"io"
)
type GetBackupsRequest struct {
DatabaseID string `form:"database_id" binding:"required"`
Limit int `form:"limit"`
Offset int `form:"offset"`
}
type GetBackupsResponse struct {
Backups []*backups_core.Backup `json:"backups"`
Total int64 `json:"total"`
Limit int `json:"limit"`
Offset int `json:"offset"`
}
type DecryptionReaderCloser struct {
*encryption.DecryptionReader
BaseReader io.ReadCloser
}
func (r *DecryptionReaderCloser) Close() error {
return r.BaseReader.Close()
}

View File

@@ -0,0 +1,79 @@
package backups_dto
import (
"io"
"time"
"github.com/google/uuid"
backups_core "databasus-backend/internal/features/backups/backups/core"
"databasus-backend/internal/features/backups/backups/encryption"
)
type GetBackupsRequest struct {
DatabaseID string `form:"database_id" binding:"required"`
Limit int `form:"limit"`
Offset int `form:"offset"`
}
type GetBackupsResponse struct {
Backups []*backups_core.Backup `json:"backups"`
Total int64 `json:"total"`
Limit int `json:"limit"`
Offset int `json:"offset"`
}
type DecryptionReaderCloser struct {
*encryption.DecryptionReader
BaseReader io.ReadCloser
}
func (r *DecryptionReaderCloser) Close() error {
return r.BaseReader.Close()
}
type MakeBackupRequest struct {
DatabaseID uuid.UUID `json:"database_id" binding:"required"`
}
type GetNextFullBackupTimeResponse struct {
NextFullBackupTime *time.Time `json:"nextFullBackupTime"`
}
type ReportErrorRequest struct {
Error string `json:"error" binding:"required"`
}
type UploadGapResponse struct {
Error string `json:"error"`
ExpectedSegmentName string `json:"expectedSegmentName"`
ReceivedSegmentName string `json:"receivedSegmentName"`
}
type RestorePlanFullBackup struct {
BackupID uuid.UUID `json:"id"`
FullBackupWalStartSegment string `json:"fullBackupWalStartSegment"`
FullBackupWalStopSegment string `json:"fullBackupWalStopSegment"`
PgVersion string `json:"pgVersion"`
CreatedAt time.Time `json:"createdAt"`
SizeBytes int64 `json:"sizeBytes"`
}
type RestorePlanWalSegment struct {
BackupID uuid.UUID `json:"backupId"`
SegmentName string `json:"segmentName"`
SizeBytes int64 `json:"sizeBytes"`
}
type GetRestorePlanErrorResponse struct {
Error string `json:"error"`
Message string `json:"message"`
LastContiguousSegment string `json:"lastContiguousSegment,omitempty"`
}
type GetRestorePlanResponse struct {
FullBackup RestorePlanFullBackup `json:"fullBackup"`
WalSegments []RestorePlanWalSegment `json:"walSegments"`
TotalSizeBytes int64 `json:"totalSizeBytes"`
LatestAvailableSegment string `json:"latestAvailableSegment"`
}

View File

@@ -4,6 +4,7 @@ import (
"crypto/aes"
"crypto/cipher"
"encoding/binary"
"errors"
"fmt"
"io"
@@ -69,7 +70,7 @@ func NewDecryptionReader(
func (r *DecryptionReader) Read(p []byte) (n int, err error) {
for len(r.buffer) < len(p) && !r.eof {
if err := r.readAndDecryptChunk(); err != nil {
if err == io.EOF {
if errors.Is(err, io.EOF) {
r.eof = true
break
}

View File

@@ -0,0 +1,45 @@
package encryption
import (
"encoding/base64"
"fmt"
"io"
"github.com/google/uuid"
)
// EncryptionSetup holds the result of setting up encryption for a backup stream.
type EncryptionSetup struct {
Writer *EncryptionWriter
SaltBase64 string
NonceBase64 string
}
// SetupEncryptionWriter generates salt/nonce, creates an EncryptionWriter, and
// returns the base64-encoded salt and nonce for storage on the backup record.
func SetupEncryptionWriter(
baseWriter io.Writer,
masterKey string,
backupID uuid.UUID,
) (*EncryptionSetup, error) {
salt, err := GenerateSalt()
if err != nil {
return nil, fmt.Errorf("failed to generate salt: %w", err)
}
nonce, err := GenerateNonce()
if err != nil {
return nil, fmt.Errorf("failed to generate nonce: %w", err)
}
encWriter, err := NewEncryptionWriter(baseWriter, masterKey, backupID, salt, nonce)
if err != nil {
return nil, fmt.Errorf("failed to create encryption writer: %w", err)
}
return &EncryptionSetup{
Writer: encWriter,
SaltBase64: base64.StdEncoding.EncodeToString(salt),
NonceBase64: base64.StdEncoding.EncodeToString(nonce),
}, nil
}

View File

@@ -1,4 +1,4 @@
package backups
package backups_services
import (
"sync"
@@ -20,14 +20,12 @@ import (
"databasus-backend/internal/util/logger"
)
var backupRepository = &backups_core.BackupRepository{}
var taskCancelManager = task_cancellation.GetTaskCancelManager()
var backupService = &BackupService{
databases.GetDatabaseService(),
storages.GetStorageService(),
backupRepository,
backups_core.GetBackupRepository(),
notifiers.GetNotifierService(),
notifiers.GetNotifierService(),
backups_config.GetBackupConfigService(),
@@ -44,16 +42,21 @@ var backupService = &BackupService{
backuping.GetBackupCleaner(),
}
var backupController = &BackupController{
backupService: backupService,
}
func GetBackupService() *BackupService {
return backupService
}
func GetBackupController() *BackupController {
return backupController
var walService = &PostgreWalBackupService{
backups_config.GetBackupConfigService(),
backups_core.GetBackupRepository(),
encryption.GetFieldEncryptor(),
encryption_secrets.GetSecretKeyService(),
logger.GetLogger(),
backupService,
}
func GetWalService() *PostgreWalBackupService {
return walService
}
var (

View File

@@ -0,0 +1,613 @@
package backups_services
import (
"context"
"fmt"
"io"
"log/slog"
"time"
"github.com/google/uuid"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_dto "databasus-backend/internal/features/backups/backups/dto"
backup_encryption "databasus-backend/internal/features/backups/backups/encryption"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/databases/databases/postgresql"
encryption_secrets "databasus-backend/internal/features/encryption/secrets"
util_encryption "databasus-backend/internal/util/encryption"
util_wal "databasus-backend/internal/util/wal"
)
// PostgreWalBackupService handles WAL segment and basebackup uploads from the databasus-cli agent.
type PostgreWalBackupService struct {
backupConfigService *backups_config.BackupConfigService
backupRepository *backups_core.BackupRepository
fieldEncryptor util_encryption.FieldEncryptor
secretKeyService *encryption_secrets.SecretKeyService
logger *slog.Logger
backupService *BackupService
}
// UploadWal accepts a streaming WAL segment or basebackup upload from the agent.
// For WAL segments it validates the WAL chain before accepting. Returns an UploadGapResponse
// (409) when the chain is broken so the agent knows to trigger a full basebackup.
func (s *PostgreWalBackupService) UploadWal(
ctx context.Context,
database *databases.Database,
uploadType backups_core.PgWalUploadType,
walSegmentName string,
fullBackupWalStartSegment string,
fullBackupWalStopSegment string,
walSegmentSizeBytes int64,
body io.Reader,
) (*backups_dto.UploadGapResponse, error) {
if err := s.validateWalBackupType(database); err != nil {
return nil, err
}
if uploadType == backups_core.PgWalUploadTypeBasebackup {
if fullBackupWalStartSegment == "" || fullBackupWalStopSegment == "" {
return nil, fmt.Errorf(
"fullBackupWalStartSegment and fullBackupWalStopSegment are required for basebackup uploads",
)
}
}
backupConfig, err := s.backupConfigService.GetBackupConfigByDbId(database.ID)
if err != nil {
return nil, fmt.Errorf("failed to get backup config: %w", err)
}
if backupConfig.Storage == nil {
return nil, fmt.Errorf("no storage configured for database %s", database.ID)
}
if uploadType == backups_core.PgWalUploadTypeWal {
// Idempotency: check before chain validation so a successful re-upload is
// not misidentified as a gap.
existing, err := s.backupRepository.FindWalSegmentByName(database.ID, walSegmentName)
if err != nil {
return nil, fmt.Errorf("failed to check for duplicate WAL segment: %w", err)
}
if existing != nil {
return nil, nil
}
gapResp, err := s.validateWalChain(database.ID, walSegmentName, walSegmentSizeBytes)
if err != nil {
return nil, err
}
if gapResp != nil {
return gapResp, nil
}
}
backup := s.createBackupRecord(
database.ID,
backupConfig.Storage.ID,
uploadType,
database.Name,
walSegmentName,
fullBackupWalStartSegment,
fullBackupWalStopSegment,
backupConfig.Encryption,
)
if err := s.backupRepository.Save(backup); err != nil {
return nil, fmt.Errorf("failed to create backup record: %w", err)
}
sizeBytes, streamErr := s.streamToStorage(ctx, backup, backupConfig, body)
if streamErr != nil {
errMsg := streamErr.Error()
s.markFailed(backup, errMsg)
return nil, fmt.Errorf("upload failed: %w", streamErr)
}
s.markCompleted(backup, sizeBytes)
return nil, nil
}
func (s *PostgreWalBackupService) GetRestorePlan(
database *databases.Database,
backupID *uuid.UUID,
) (*backups_dto.GetRestorePlanResponse, *backups_dto.GetRestorePlanErrorResponse, error) {
if err := s.validateWalBackupType(database); err != nil {
return nil, nil, err
}
fullBackup, err := s.resolveFullBackup(database.ID, backupID)
if err != nil {
return nil, nil, err
}
if fullBackup == nil {
msg := "no full backups available for this database"
if backupID != nil {
msg = fmt.Sprintf("full backup %s not found or not completed", backupID)
}
return nil, &backups_dto.GetRestorePlanErrorResponse{
Error: "no_backups",
Message: msg,
}, nil
}
startSegment := ""
if fullBackup.PgFullBackupWalStartSegmentName != nil {
startSegment = *fullBackup.PgFullBackupWalStartSegmentName
}
walSegments, err := s.backupRepository.FindCompletedWalSegmentsAfter(database.ID, startSegment)
if err != nil {
return nil, nil, fmt.Errorf("failed to query WAL segments: %w", err)
}
chainErr := s.validateRestoreWalChain(fullBackup, walSegments)
if chainErr != nil {
return nil, chainErr, nil
}
fullBackupSizeBytes := int64(fullBackup.BackupSizeMb * 1024 * 1024)
pgVersion := ""
if fullBackup.PgVersion != nil {
pgVersion = *fullBackup.PgVersion
}
stopSegment := ""
if fullBackup.PgFullBackupWalStopSegmentName != nil {
stopSegment = *fullBackup.PgFullBackupWalStopSegmentName
}
response := &backups_dto.GetRestorePlanResponse{
FullBackup: backups_dto.RestorePlanFullBackup{
BackupID: fullBackup.ID,
FullBackupWalStartSegment: startSegment,
FullBackupWalStopSegment: stopSegment,
PgVersion: pgVersion,
CreatedAt: fullBackup.CreatedAt,
SizeBytes: fullBackupSizeBytes,
},
TotalSizeBytes: fullBackupSizeBytes,
}
for _, seg := range walSegments {
segName := ""
if seg.PgWalSegmentName != nil {
segName = *seg.PgWalSegmentName
}
segSizeBytes := int64(seg.BackupSizeMb * 1024 * 1024)
response.WalSegments = append(response.WalSegments, backups_dto.RestorePlanWalSegment{
BackupID: seg.ID,
SegmentName: segName,
SizeBytes: segSizeBytes,
})
response.TotalSizeBytes += segSizeBytes
response.LatestAvailableSegment = segName
}
return response, nil, nil
}
// DownloadBackupFile returns a reader for a backup file belonging to the given database.
// Decryption is handled transparently if the backup is encrypted.
func (s *PostgreWalBackupService) DownloadBackupFile(
database *databases.Database,
backupID uuid.UUID,
) (io.ReadCloser, error) {
if err := s.validateWalBackupType(database); err != nil {
return nil, err
}
backup, err := s.backupRepository.FindByID(backupID)
if err != nil {
return nil, fmt.Errorf("backup not found: %w", err)
}
if backup.DatabaseID != database.ID {
return nil, fmt.Errorf("backup does not belong to this database")
}
if backup.Status != backups_core.BackupStatusCompleted {
return nil, fmt.Errorf("backup is not completed")
}
return s.backupService.GetBackupReader(backupID)
}
func (s *PostgreWalBackupService) GetNextFullBackupTime(
database *databases.Database,
) (*backups_dto.GetNextFullBackupTimeResponse, error) {
if err := s.validateWalBackupType(database); err != nil {
return nil, err
}
backupConfig, err := s.backupConfigService.GetBackupConfigByDbId(database.ID)
if err != nil {
return nil, fmt.Errorf("failed to get backup config: %w", err)
}
if backupConfig.BackupInterval == nil {
return nil, fmt.Errorf("no backup interval configured for database %s", database.ID)
}
lastFullBackup, err := s.backupRepository.FindLastCompletedFullWalBackupByDatabaseID(
database.ID,
)
if err != nil {
return nil, fmt.Errorf("failed to query last full backup: %w", err)
}
var lastBackupTime *time.Time
if lastFullBackup != nil {
lastBackupTime = &lastFullBackup.CreatedAt
}
now := time.Now().UTC()
nextTime := backupConfig.BackupInterval.NextTriggerTime(now, lastBackupTime)
return &backups_dto.GetNextFullBackupTimeResponse{
NextFullBackupTime: nextTime,
}, nil
}
// ReportError creates a FAILED backup record with the agent's error message.
func (s *PostgreWalBackupService) ReportError(
database *databases.Database,
errorMsg string,
) error {
if err := s.validateWalBackupType(database); err != nil {
return err
}
backupConfig, err := s.backupConfigService.GetBackupConfigByDbId(database.ID)
if err != nil {
return fmt.Errorf("failed to get backup config: %w", err)
}
if backupConfig.Storage == nil {
return fmt.Errorf("no storage configured for database %s", database.ID)
}
now := time.Now().UTC()
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: database.ID,
StorageID: backupConfig.Storage.ID,
Status: backups_core.BackupStatusFailed,
FailMessage: &errorMsg,
Encryption: backupConfig.Encryption,
CreatedAt: now,
}
backup.GenerateFilename(database.Name)
if err := s.backupRepository.Save(backup); err != nil {
return fmt.Errorf("failed to save error backup record: %w", err)
}
return nil
}
func (s *PostgreWalBackupService) validateWalChain(
databaseID uuid.UUID,
incomingSegment string,
walSegmentSizeBytes int64,
) (*backups_dto.UploadGapResponse, error) {
fullBackup, err := s.backupRepository.FindLastCompletedFullWalBackupByDatabaseID(databaseID)
if err != nil {
return nil, fmt.Errorf("failed to query full backup: %w", err)
}
// No full backup exists yet: cannot accept WAL segments without a chain anchor.
if fullBackup == nil || fullBackup.PgFullBackupWalStopSegmentName == nil {
return &backups_dto.UploadGapResponse{
Error: "no_full_backup",
ExpectedSegmentName: "",
ReceivedSegmentName: incomingSegment,
}, nil
}
stopSegment := *fullBackup.PgFullBackupWalStopSegmentName
lastWal, err := s.backupRepository.FindLastWalSegmentAfter(databaseID, stopSegment)
if err != nil {
return nil, fmt.Errorf("failed to query last WAL segment: %w", err)
}
walCalculator := util_wal.NewWalCalculator(walSegmentSizeBytes)
var chainTail string
if lastWal != nil && lastWal.PgWalSegmentName != nil {
chainTail = *lastWal.PgWalSegmentName
} else {
chainTail = stopSegment
}
expectedNext, err := walCalculator.NextSegment(chainTail)
if err != nil {
return nil, fmt.Errorf("WAL arithmetic failed for %q: %w", chainTail, err)
}
if incomingSegment != expectedNext {
return &backups_dto.UploadGapResponse{
Error: "gap_detected",
ExpectedSegmentName: expectedNext,
ReceivedSegmentName: incomingSegment,
}, nil
}
return nil, nil
}
func (s *PostgreWalBackupService) createBackupRecord(
databaseID uuid.UUID,
storageID uuid.UUID,
uploadType backups_core.PgWalUploadType,
dbName string,
walSegmentName string,
fullBackupWalStartSegment string,
fullBackupWalStopSegment string,
encryption backups_config.BackupEncryption,
) *backups_core.Backup {
now := time.Now().UTC()
backup := &backups_core.Backup{
ID: uuid.New(),
DatabaseID: databaseID,
StorageID: storageID,
Status: backups_core.BackupStatusInProgress,
Encryption: encryption,
CreatedAt: now,
}
backup.GenerateFilename(dbName)
if uploadType == backups_core.PgWalUploadTypeBasebackup {
walBackupType := backups_core.PgWalBackupTypeFullBackup
backup.PgWalBackupType = &walBackupType
if fullBackupWalStartSegment != "" {
backup.PgFullBackupWalStartSegmentName = &fullBackupWalStartSegment
}
if fullBackupWalStopSegment != "" {
backup.PgFullBackupWalStopSegmentName = &fullBackupWalStopSegment
}
} else {
walBackupType := backups_core.PgWalBackupTypeWalSegment
backup.PgWalBackupType = &walBackupType
backup.PgWalSegmentName = &walSegmentName
}
return backup
}
func (s *PostgreWalBackupService) streamToStorage(
ctx context.Context,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
body io.Reader,
) (int64, error) {
if backupConfig.Encryption == backups_config.BackupEncryptionEncrypted {
return s.streamEncrypted(ctx, backup, backupConfig, body, backup.FileName)
}
return s.streamDirect(ctx, backupConfig, body, backup.FileName)
}
func (s *PostgreWalBackupService) streamDirect(
ctx context.Context,
backupConfig *backups_config.BackupConfig,
body io.Reader,
fileName string,
) (int64, error) {
cr := &countingReader{r: body}
if err := backupConfig.Storage.SaveFile(ctx, s.fieldEncryptor, s.logger, fileName, cr); err != nil {
return 0, err
}
return cr.n, nil
}
func (s *PostgreWalBackupService) streamEncrypted(
ctx context.Context,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
body io.Reader,
fileName string,
) (int64, error) {
masterKey, err := s.secretKeyService.GetSecretKey()
if err != nil {
return 0, fmt.Errorf("failed to get master encryption key: %w", err)
}
pipeReader, pipeWriter := io.Pipe()
encryptionSetup, err := backup_encryption.SetupEncryptionWriter(
pipeWriter,
masterKey,
backup.ID,
)
if err != nil {
_ = pipeWriter.Close()
return 0, err
}
copyErrCh := make(chan error, 1)
go func() {
_, copyErr := io.Copy(encryptionSetup.Writer, body)
if copyErr != nil {
_ = encryptionSetup.Writer.Close()
_ = pipeWriter.CloseWithError(copyErr)
copyErrCh <- copyErr
return
}
if closeErr := encryptionSetup.Writer.Close(); closeErr != nil {
_ = pipeWriter.CloseWithError(closeErr)
copyErrCh <- closeErr
return
}
copyErrCh <- pipeWriter.Close()
}()
cr := &countingReader{r: pipeReader}
saveErr := backupConfig.Storage.SaveFile(ctx, s.fieldEncryptor, s.logger, fileName, cr)
copyErr := <-copyErrCh
if copyErr != nil {
return 0, copyErr
}
if saveErr != nil {
return 0, saveErr
}
backup.EncryptionSalt = &encryptionSetup.SaltBase64
backup.EncryptionIV = &encryptionSetup.NonceBase64
return cr.n, nil
}
func (s *PostgreWalBackupService) markCompleted(backup *backups_core.Backup, sizeBytes int64) {
backup.Status = backups_core.BackupStatusCompleted
backup.BackupSizeMb = float64(sizeBytes) / (1024 * 1024)
if err := s.backupRepository.Save(backup); err != nil {
s.logger.Error(
"failed to mark WAL backup as completed",
"backupId",
backup.ID,
"error",
err,
)
}
}
func (s *PostgreWalBackupService) markFailed(backup *backups_core.Backup, errMsg string) {
backup.Status = backups_core.BackupStatusFailed
backup.FailMessage = &errMsg
if err := s.backupRepository.Save(backup); err != nil {
s.logger.Error("failed to mark WAL backup as failed", "backupId", backup.ID, "error", err)
}
}
func (s *PostgreWalBackupService) resolveFullBackup(
databaseID uuid.UUID,
backupID *uuid.UUID,
) (*backups_core.Backup, error) {
if backupID != nil {
return s.backupRepository.FindCompletedFullWalBackupByID(databaseID, *backupID)
}
return s.backupRepository.FindLastCompletedFullWalBackupByDatabaseID(databaseID)
}
func (s *PostgreWalBackupService) validateRestoreWalChain(
fullBackup *backups_core.Backup,
walSegments []*backups_core.Backup,
) *backups_dto.GetRestorePlanErrorResponse {
if len(walSegments) == 0 {
return nil
}
stopSegment := ""
if fullBackup.PgFullBackupWalStopSegmentName != nil {
stopSegment = *fullBackup.PgFullBackupWalStopSegmentName
}
walCalculator := util_wal.NewWalCalculator(0)
expectedNext, err := walCalculator.NextSegment(stopSegment)
if err != nil {
return nil
}
for _, seg := range walSegments {
segName := ""
if seg.PgWalSegmentName != nil {
segName = *seg.PgWalSegmentName
}
cmp, cmpErr := walCalculator.Compare(segName, stopSegment)
if cmpErr != nil {
return nil
}
// Skip segments that are <= stopSegment (they are part of the basebackup range)
if cmp <= 0 {
continue
}
if segName != expectedNext {
lastContiguous := stopSegment
// Walk back to find the segment before the gap
for _, prev := range walSegments {
prevName := ""
if prev.PgWalSegmentName != nil {
prevName = *prev.PgWalSegmentName
}
prevCmp, _ := walCalculator.Compare(prevName, stopSegment)
if prevCmp <= 0 {
continue
}
if prevName == segName {
break
}
lastContiguous = prevName
}
return &backups_dto.GetRestorePlanErrorResponse{
Error: "wal_chain_broken",
Message: fmt.Sprintf(
"WAL chain has a gap after segment %s. Recovery is only possible up to this segment.",
lastContiguous,
),
LastContiguousSegment: lastContiguous,
}
}
expectedNext, err = walCalculator.NextSegment(segName)
if err != nil {
return nil
}
}
return nil
}
func (s *PostgreWalBackupService) validateWalBackupType(database *databases.Database) error {
if database.Postgresql == nil ||
database.Postgresql.BackupType != postgresql.PostgresBackupTypeWalV1 {
return fmt.Errorf("database %s is not configured for WAL backups", database.ID)
}
return nil
}
type countingReader struct {
r io.Reader
n int64
}
func (cr *countingReader) Read(p []byte) (n int, err error) {
n, err = cr.r.Read(p)
cr.n += int64(n)
return n, err
}

View File

@@ -1,4 +1,4 @@
package backups
package backups_services
import (
"encoding/base64"
@@ -7,10 +7,13 @@ import (
"io"
"log/slog"
"github.com/google/uuid"
audit_logs "databasus-backend/internal/features/audit_logs"
"databasus-backend/internal/features/backups/backups/backuping"
backups_core "databasus-backend/internal/features/backups/backups/core"
backups_download "databasus-backend/internal/features/backups/backups/download"
backups_dto "databasus-backend/internal/features/backups/backups/dto"
"databasus-backend/internal/features/backups/backups/encryption"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
@@ -21,8 +24,7 @@ import (
users_models "databasus-backend/internal/features/users/models"
workspaces_services "databasus-backend/internal/features/workspaces/services"
util_encryption "databasus-backend/internal/util/encryption"
"github.com/google/uuid"
files_utils "databasus-backend/internal/util/files"
)
type BackupService struct {
@@ -92,7 +94,7 @@ func (s *BackupService) MakeBackupWithAuth(
return errors.New("insufficient permissions to create backup for this database")
}
s.backupSchedulerService.StartBackup(databaseID, true)
s.backupSchedulerService.StartBackup(database, true)
s.auditLogService.WriteAuditLog(
fmt.Sprintf("Backup manually initiated for database: %s", database.Name),
@@ -107,7 +109,7 @@ func (s *BackupService) GetBackups(
user *users_models.User,
databaseID uuid.UUID,
limit, offset int,
) (*GetBackupsResponse, error) {
) (*backups_dto.GetBackupsResponse, error) {
database, err := s.databaseService.GetDatabaseByID(databaseID)
if err != nil {
return nil, err
@@ -142,7 +144,7 @@ func (s *BackupService) GetBackups(
return nil, err
}
return &GetBackupsResponse{
return &backups_dto.GetBackupsResponse{
Backups: backups,
Total: total,
Limit: limit,
@@ -181,11 +183,7 @@ func (s *BackupService) DeleteBackup(
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf(
"Backup deleted for database: %s (ID: %s)",
database.Name,
backupID.String(),
),
fmt.Sprintf("Backup deleted for database: %s", database.Name),
&user.ID,
database.WorkspaceID,
)
@@ -232,11 +230,7 @@ func (s *BackupService) CancelBackup(
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf(
"Backup cancelled for database: %s (ID: %s)",
database.Name,
backupID.String(),
),
fmt.Sprintf("Backup cancelled for database: %s", database.Name),
&user.ID,
database.WorkspaceID,
)
@@ -276,16 +270,12 @@ func (s *BackupService) GetBackupFile(
}
s.auditLogService.WriteAuditLog(
fmt.Sprintf(
"Backup file downloaded for database: %s (ID: %s)",
database.Name,
backupID.String(),
),
fmt.Sprintf("Backup file downloaded for database: %s", database.Name),
&user.ID,
database.WorkspaceID,
)
reader, err := s.getBackupReader(backupID)
reader, err := s.GetBackupReader(backupID)
if err != nil {
return nil, nil, nil, err
}
@@ -293,39 +283,9 @@ func (s *BackupService) GetBackupFile(
return reader, backup, database, nil
}
func (s *BackupService) deleteDbBackups(databaseID uuid.UUID) error {
dbBackupsInProgress, err := s.backupRepository.FindByDatabaseIdAndStatus(
databaseID,
backups_core.BackupStatusInProgress,
)
if err != nil {
return err
}
if len(dbBackupsInProgress) > 0 {
return errors.New("backup is in progress, storage cannot be removed")
}
dbBackups, err := s.backupRepository.FindByDatabaseID(
databaseID,
)
if err != nil {
return err
}
for _, dbBackup := range dbBackups {
err := s.backupCleaner.DeleteBackup(dbBackup)
if err != nil {
return err
}
}
return nil
}
// GetBackupReader returns a reader for the backup file
// If encrypted, wraps with DecryptionReader
func (s *BackupService) getBackupReader(backupID uuid.UUID) (io.ReadCloser, error) {
// GetBackupReader returns a reader for the backup file.
// If encrypted, wraps with DecryptionReader.
func (s *BackupService) GetBackupReader(backupID uuid.UUID) (io.ReadCloser, error) {
backup, err := s.backupRepository.FindByID(backupID)
if err != nil {
return nil, fmt.Errorf("failed to find backup: %w", err)
@@ -336,7 +296,7 @@ func (s *BackupService) getBackupReader(backupID uuid.UUID) (io.ReadCloser, erro
return nil, fmt.Errorf("failed to get storage: %w", err)
}
fileReader, err := storage.GetFile(s.fieldEncryptor, backup.ID)
fileReader, err := storage.GetFile(s.fieldEncryptor, backup.FileName)
if err != nil {
return nil, fmt.Errorf("failed to get backup file: %w", err)
}
@@ -405,7 +365,7 @@ func (s *BackupService) getBackupReader(backupID uuid.UUID) (io.ReadCloser, erro
s.logger.Info("Returning encrypted backup with decryption", "backupId", backupID)
return &DecryptionReaderCloser{
return &backups_dto.DecryptionReaderCloser{
DecryptionReader: decryptionReader,
BaseReader: fileReader,
}, nil
@@ -476,7 +436,7 @@ func (s *BackupService) GetBackupFileWithoutAuth(
return nil, nil, nil, err
}
reader, err := s.getBackupReader(backupID)
reader, err := s.GetBackupReader(backupID)
if err != nil {
return nil, nil, nil, err
}
@@ -490,11 +450,7 @@ func (s *BackupService) WriteAuditLogForDownload(
database *databases.Database,
) {
s.auditLogService.WriteAuditLog(
fmt.Sprintf(
"Backup file downloaded for database: %s (ID: %s)",
database.Name,
backup.ID.String(),
),
fmt.Sprintf("Backup file downloaded for database: %s", database.Name),
&userID,
database.WorkspaceID,
)
@@ -516,12 +472,42 @@ func (s *BackupService) UnregisterDownload(userID uuid.UUID) {
s.downloadTokenService.UnregisterDownload(userID)
}
func (s *BackupService) deleteDbBackups(databaseID uuid.UUID) error {
dbBackupsInProgress, err := s.backupRepository.FindByDatabaseIdAndStatus(
databaseID,
backups_core.BackupStatusInProgress,
)
if err != nil {
return err
}
if len(dbBackupsInProgress) > 0 {
return errors.New("backup is in progress, storage cannot be removed")
}
dbBackups, err := s.backupRepository.FindByDatabaseID(
databaseID,
)
if err != nil {
return err
}
for _, dbBackup := range dbBackups {
err := s.backupCleaner.DeleteBackup(dbBackup)
if err != nil {
return err
}
}
return nil
}
func (s *BackupService) generateBackupFilename(
backup *backups_core.Backup,
database *databases.Database,
) string {
timestamp := backup.CreatedAt.Format("2006-01-02_15-04-05")
safeName := sanitizeFilename(database.Name)
safeName := files_utils.SanitizeFilename(database.Name)
extension := s.getBackupExtension(database.Type)
return fmt.Sprintf("%s_backup_%s%s", safeName, timestamp, extension)
}

View File

@@ -5,6 +5,7 @@ import (
"errors"
common "databasus-backend/internal/features/backups/backups/common"
backups_core "databasus-backend/internal/features/backups/backups/core"
usecases_mariadb "databasus-backend/internal/features/backups/backups/usecases/mariadb"
usecases_mongodb "databasus-backend/internal/features/backups/backups/usecases/mongodb"
usecases_mysql "databasus-backend/internal/features/backups/backups/usecases/mysql"
@@ -12,8 +13,6 @@ import (
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/storages"
"github.com/google/uuid"
)
type CreateBackupUsecase struct {
@@ -25,7 +24,7 @@ type CreateBackupUsecase struct {
func (uc *CreateBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
database *databases.Database,
storage *storages.Storage,
@@ -35,7 +34,7 @@ func (uc *CreateBackupUsecase) Execute(
case databases.DatabaseTypePostgres:
return uc.CreatePostgresqlBackupUsecase.Execute(
ctx,
backupID,
backup,
backupConfig,
database,
storage,
@@ -45,7 +44,7 @@ func (uc *CreateBackupUsecase) Execute(
case databases.DatabaseTypeMysql:
return uc.CreateMysqlBackupUsecase.Execute(
ctx,
backupID,
backup,
backupConfig,
database,
storage,
@@ -55,7 +54,7 @@ func (uc *CreateBackupUsecase) Execute(
case databases.DatabaseTypeMariadb:
return uc.CreateMariadbBackupUsecase.Execute(
ctx,
backupID,
backup,
backupConfig,
database,
storage,
@@ -65,7 +64,7 @@ func (uc *CreateBackupUsecase) Execute(
case databases.DatabaseTypeMongodb:
return uc.CreateMongodbBackupUsecase.Execute(
ctx,
backupID,
backup,
backupConfig,
database,
storage,

View File

@@ -2,7 +2,6 @@ package usecases_mariadb
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
@@ -19,6 +18,7 @@ import (
"databasus-backend/internal/config"
common "databasus-backend/internal/features/backups/backups/common"
backups_core "databasus-backend/internal/features/backups/backups/core"
backup_encryption "databasus-backend/internal/features/backups/backups/encryption"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
@@ -52,7 +52,7 @@ type writeResult struct {
func (uc *CreateMariadbBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
db *databases.Database,
storage *storages.Storage,
@@ -82,7 +82,7 @@ func (uc *CreateMariadbBackupUsecase) Execute(
return uc.streamToStorage(
ctx,
backupID,
backup,
backupConfig,
tools.GetMariadbExecutable(
tools.MariadbExecutableMariadbDump,
@@ -108,18 +108,24 @@ func (uc *CreateMariadbBackupUsecase) buildMariadbDumpArgs(
"--single-transaction",
"--routines",
"--quick",
"--skip-extended-insert",
"--verbose",
}
if mdb.HasPrivilege("TRIGGER") {
args = append(args, "--triggers")
}
if mdb.HasPrivilege("EVENT") {
if mdb.HasPrivilege("EVENT") && !mdb.IsExcludeEvents {
args = append(args, "--events")
}
args = append(args, "--compress")
if !config.GetEnv().IsCloud {
args = append(args, "--max-allowed-packet=1G")
}
if mdb.IsHttps {
args = append(args, "--ssl")
args = append(args, "--skip-ssl-verify-server-cert")
@@ -134,7 +140,7 @@ func (uc *CreateMariadbBackupUsecase) buildMariadbDumpArgs(
func (uc *CreateMariadbBackupUsecase) streamToStorage(
parentCtx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
mariadbBin string,
args []string,
@@ -185,7 +191,7 @@ func (uc *CreateMariadbBackupUsecase) streamToStorage(
storageReader, storageWriter := io.Pipe()
finalWriter, encryptionWriter, backupMetadata, err := uc.setupBackupEncryption(
backupID,
backup.ID,
backupConfig,
storageWriter,
)
@@ -202,7 +208,13 @@ func (uc *CreateMariadbBackupUsecase) streamToStorage(
saveErrCh := make(chan error, 1)
go func() {
saveErr := storage.SaveFile(ctx, uc.fieldEncryptor, uc.logger, backupID, storageReader)
saveErr := storage.SaveFile(
ctx,
uc.fieldEncryptor,
uc.logger,
backup.FileName,
storageReader,
)
saveErrCh <- saveErr
}()
@@ -267,10 +279,10 @@ func (uc *CreateMariadbBackupUsecase) createTempMyCnfFile(
password string,
) (string, error) {
tempFolder := config.GetEnv().TempFolder
if err := os.MkdirAll(tempFolder, 0700); err != nil {
if err := os.MkdirAll(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to ensure temp folder exists: %w", err)
}
if err := os.Chmod(tempFolder, 0700); err != nil {
if err := os.Chmod(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to set temp folder permissions: %w", err)
}
@@ -279,7 +291,7 @@ func (uc *CreateMariadbBackupUsecase) createTempMyCnfFile(
return "", fmt.Errorf("failed to create temp directory: %w", err)
}
if err := os.Chmod(tempDir, 0700); err != nil {
if err := os.Chmod(tempDir, 0o700); err != nil {
_ = os.RemoveAll(tempDir)
return "", fmt.Errorf("failed to set temp directory permissions: %w", err)
}
@@ -299,7 +311,7 @@ port=%d
content += "ssl=false\n"
}
err = os.WriteFile(myCnfFile, []byte(content), 0600)
err = os.WriteFile(myCnfFile, []byte(content), 0o600)
if err != nil {
_ = os.RemoveAll(tempDir)
return "", fmt.Errorf("failed to write .my.cnf: %w", err)
@@ -418,7 +430,9 @@ func (uc *CreateMariadbBackupUsecase) setupBackupEncryption(
backupConfig *backups_config.BackupConfig,
storageWriter io.WriteCloser,
) (io.Writer, *backup_encryption.EncryptionWriter, common.BackupMetadata, error) {
metadata := common.BackupMetadata{}
metadata := common.BackupMetadata{
BackupID: backupID,
}
if backupConfig.Encryption != backups_config.BackupEncryptionEncrypted {
metadata.Encryption = backups_config.BackupEncryptionNone
@@ -426,40 +440,22 @@ func (uc *CreateMariadbBackupUsecase) setupBackupEncryption(
return storageWriter, nil, metadata, nil
}
salt, err := backup_encryption.GenerateSalt()
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to generate salt: %w", err)
}
nonce, err := backup_encryption.GenerateNonce()
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to generate nonce: %w", err)
}
masterKey, err := uc.secretKeyService.GetSecretKey()
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to get master key: %w", err)
}
encWriter, err := backup_encryption.NewEncryptionWriter(
storageWriter,
masterKey,
backupID,
salt,
nonce,
)
encSetup, err := backup_encryption.SetupEncryptionWriter(storageWriter, masterKey, backupID)
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to create encrypting writer: %w", err)
return nil, nil, metadata, err
}
saltBase64 := base64.StdEncoding.EncodeToString(salt)
nonceBase64 := base64.StdEncoding.EncodeToString(nonce)
metadata.EncryptionSalt = &saltBase64
metadata.EncryptionIV = &nonceBase64
metadata.EncryptionSalt = &encSetup.SaltBase64
metadata.EncryptionIV = &encSetup.NonceBase64
metadata.Encryption = backups_config.BackupEncryptionEncrypted
uc.logger.Info("Encryption enabled for backup", "backupId", backupID)
return encWriter, encWriter, metadata, nil
return encSetup.Writer, encSetup.Writer, metadata, nil
}
func (uc *CreateMariadbBackupUsecase) cleanupOnCancellation(
@@ -552,8 +548,8 @@ func (uc *CreateMariadbBackupUsecase) buildMariadbDumpErrorMessage(
stderrStr,
)
exitErr, ok := waitErr.(*exec.ExitError)
if !ok {
var exitErr *exec.ExitError
if !errors.As(waitErr, &exitErr) {
return errors.New(errorMsg)
}

View File

@@ -2,7 +2,6 @@ package usecases_mongodb
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
@@ -16,6 +15,7 @@ import (
"databasus-backend/internal/config"
common "databasus-backend/internal/features/backups/backups/common"
backups_core "databasus-backend/internal/features/backups/backups/core"
backup_encryption "databasus-backend/internal/features/backups/backups/encryption"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
@@ -46,7 +46,7 @@ type writeResult struct {
func (uc *CreateMongodbBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
db *databases.Database,
storage *storages.Storage,
@@ -76,7 +76,7 @@ func (uc *CreateMongodbBackupUsecase) Execute(
return uc.streamToStorage(
ctx,
backupID,
backup,
backupConfig,
tools.GetMongodbExecutable(
tools.MongodbExecutableMongodump,
@@ -114,7 +114,7 @@ func (uc *CreateMongodbBackupUsecase) buildMongodumpArgs(
func (uc *CreateMongodbBackupUsecase) streamToStorage(
parentCtx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
mongodumpBin string,
args []string,
@@ -163,7 +163,7 @@ func (uc *CreateMongodbBackupUsecase) streamToStorage(
storageReader, storageWriter := io.Pipe()
finalWriter, encryptionWriter, backupMetadata, err := uc.setupBackupEncryption(
backupID,
backup.ID,
backupConfig,
storageWriter,
)
@@ -175,7 +175,13 @@ func (uc *CreateMongodbBackupUsecase) streamToStorage(
saveErrCh := make(chan error, 1)
go func() {
saveErr := storage.SaveFile(ctx, uc.fieldEncryptor, uc.logger, backupID, storageReader)
saveErr := storage.SaveFile(
ctx,
uc.fieldEncryptor,
uc.logger,
backup.FileName,
storageReader,
)
saveErrCh <- saveErr
}()
@@ -262,6 +268,7 @@ func (uc *CreateMongodbBackupUsecase) setupBackupEncryption(
storageWriter io.WriteCloser,
) (io.Writer, *backup_encryption.EncryptionWriter, common.BackupMetadata, error) {
backupMetadata := common.BackupMetadata{
BackupID: backupID,
Encryption: backups_config.BackupEncryptionNone,
}
@@ -269,40 +276,21 @@ func (uc *CreateMongodbBackupUsecase) setupBackupEncryption(
return storageWriter, nil, backupMetadata, nil
}
salt, err := backup_encryption.GenerateSalt()
if err != nil {
return nil, nil, backupMetadata, fmt.Errorf("failed to generate salt: %w", err)
}
nonce, err := backup_encryption.GenerateNonce()
if err != nil {
return nil, nil, backupMetadata, fmt.Errorf("failed to generate nonce: %w", err)
}
masterKey, err := uc.secretKeyService.GetSecretKey()
if err != nil {
return nil, nil, backupMetadata, fmt.Errorf("failed to get master key: %w", err)
}
encryptionWriter, err := backup_encryption.NewEncryptionWriter(
storageWriter,
masterKey,
backupID,
salt,
nonce,
)
encSetup, err := backup_encryption.SetupEncryptionWriter(storageWriter, masterKey, backupID)
if err != nil {
return nil, nil, backupMetadata, fmt.Errorf("failed to create encryption writer: %w", err)
return nil, nil, backupMetadata, err
}
saltBase64 := base64.StdEncoding.EncodeToString(salt)
nonceBase64 := base64.StdEncoding.EncodeToString(nonce)
backupMetadata.Encryption = backups_config.BackupEncryptionEncrypted
backupMetadata.EncryptionSalt = &saltBase64
backupMetadata.EncryptionIV = &nonceBase64
backupMetadata.EncryptionSalt = &encSetup.SaltBase64
backupMetadata.EncryptionIV = &encSetup.NonceBase64
return encryptionWriter, encryptionWriter, backupMetadata, nil
return encSetup.Writer, encSetup.Writer, backupMetadata, nil
}
func (uc *CreateMongodbBackupUsecase) copyWithShutdownCheck(

View File

@@ -2,7 +2,6 @@ package usecases_mysql
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
@@ -19,6 +18,7 @@ import (
"databasus-backend/internal/config"
common "databasus-backend/internal/features/backups/backups/common"
backups_core "databasus-backend/internal/features/backups/backups/core"
backup_encryption "databasus-backend/internal/features/backups/backups/encryption"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
@@ -52,7 +52,7 @@ type writeResult struct {
func (uc *CreateMysqlBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
db *databases.Database,
storage *storages.Storage,
@@ -82,7 +82,7 @@ func (uc *CreateMysqlBackupUsecase) Execute(
return uc.streamToStorage(
ctx,
backupID,
backup,
backupConfig,
tools.GetMysqlExecutable(
my.Version,
@@ -107,6 +107,7 @@ func (uc *CreateMysqlBackupUsecase) buildMysqldumpArgs(my *mysqltypes.MysqlDatab
"--routines",
"--set-gtid-purged=OFF",
"--quick",
"--skip-extended-insert",
"--verbose",
}
@@ -117,7 +118,11 @@ func (uc *CreateMysqlBackupUsecase) buildMysqldumpArgs(my *mysqltypes.MysqlDatab
args = append(args, "--events")
}
args = append(args, uc.getNetworkCompressionArgs(my.Version)...)
args = append(args, uc.getNetworkCompressionArgs(my)...)
if !config.GetEnv().IsCloud {
args = append(args, "--max-allowed-packet=1G")
}
if my.IsHttps {
args = append(args, "--ssl-mode=REQUIRED")
@@ -130,15 +135,21 @@ func (uc *CreateMysqlBackupUsecase) buildMysqldumpArgs(my *mysqltypes.MysqlDatab
return args
}
func (uc *CreateMysqlBackupUsecase) getNetworkCompressionArgs(version tools.MysqlVersion) []string {
func (uc *CreateMysqlBackupUsecase) getNetworkCompressionArgs(
my *mysqltypes.MysqlDatabase,
) []string {
const zstdCompressionLevel = 5
switch version {
switch my.Version {
case tools.MysqlVersion80, tools.MysqlVersion84, tools.MysqlVersion9:
return []string{
"--compression-algorithms=zstd",
fmt.Sprintf("--zstd-compression-level=%d", zstdCompressionLevel),
if my.IsZstdSupported {
return []string{
"--compression-algorithms=zstd",
fmt.Sprintf("--zstd-compression-level=%d", zstdCompressionLevel),
}
}
return []string{"--compress"}
case tools.MysqlVersion57:
return []string{"--compress"}
default:
@@ -148,7 +159,7 @@ func (uc *CreateMysqlBackupUsecase) getNetworkCompressionArgs(version tools.Mysq
func (uc *CreateMysqlBackupUsecase) streamToStorage(
parentCtx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
mysqlBin string,
args []string,
@@ -199,7 +210,7 @@ func (uc *CreateMysqlBackupUsecase) streamToStorage(
storageReader, storageWriter := io.Pipe()
finalWriter, encryptionWriter, backupMetadata, err := uc.setupBackupEncryption(
backupID,
backup.ID,
backupConfig,
storageWriter,
)
@@ -216,7 +227,13 @@ func (uc *CreateMysqlBackupUsecase) streamToStorage(
saveErrCh := make(chan error, 1)
go func() {
saveErr := storage.SaveFile(ctx, uc.fieldEncryptor, uc.logger, backupID, storageReader)
saveErr := storage.SaveFile(
ctx,
uc.fieldEncryptor,
uc.logger,
backup.FileName,
storageReader,
)
saveErrCh <- saveErr
}()
@@ -281,10 +298,10 @@ func (uc *CreateMysqlBackupUsecase) createTempMyCnfFile(
password string,
) (string, error) {
tempFolder := config.GetEnv().TempFolder
if err := os.MkdirAll(tempFolder, 0700); err != nil {
if err := os.MkdirAll(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to ensure temp folder exists: %w", err)
}
if err := os.Chmod(tempFolder, 0700); err != nil {
if err := os.Chmod(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to set temp folder permissions: %w", err)
}
@@ -293,7 +310,7 @@ func (uc *CreateMysqlBackupUsecase) createTempMyCnfFile(
return "", fmt.Errorf("failed to create temp directory: %w", err)
}
if err := os.Chmod(tempDir, 0700); err != nil {
if err := os.Chmod(tempDir, 0o700); err != nil {
_ = os.RemoveAll(tempDir)
return "", fmt.Errorf("failed to set temp directory permissions: %w", err)
}
@@ -311,7 +328,7 @@ port=%d
content += "ssl-mode=REQUIRED\n"
}
err = os.WriteFile(myCnfFile, []byte(content), 0600)
err = os.WriteFile(myCnfFile, []byte(content), 0o600)
if err != nil {
_ = os.RemoveAll(tempDir)
return "", fmt.Errorf("failed to write .my.cnf: %w", err)
@@ -430,7 +447,9 @@ func (uc *CreateMysqlBackupUsecase) setupBackupEncryption(
backupConfig *backups_config.BackupConfig,
storageWriter io.WriteCloser,
) (io.Writer, *backup_encryption.EncryptionWriter, common.BackupMetadata, error) {
metadata := common.BackupMetadata{}
metadata := common.BackupMetadata{
BackupID: backupID,
}
if backupConfig.Encryption != backups_config.BackupEncryptionEncrypted {
metadata.Encryption = backups_config.BackupEncryptionNone
@@ -438,40 +457,22 @@ func (uc *CreateMysqlBackupUsecase) setupBackupEncryption(
return storageWriter, nil, metadata, nil
}
salt, err := backup_encryption.GenerateSalt()
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to generate salt: %w", err)
}
nonce, err := backup_encryption.GenerateNonce()
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to generate nonce: %w", err)
}
masterKey, err := uc.secretKeyService.GetSecretKey()
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to get master key: %w", err)
}
encWriter, err := backup_encryption.NewEncryptionWriter(
storageWriter,
masterKey,
backupID,
salt,
nonce,
)
encSetup, err := backup_encryption.SetupEncryptionWriter(storageWriter, masterKey, backupID)
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to create encrypting writer: %w", err)
return nil, nil, metadata, err
}
saltBase64 := base64.StdEncoding.EncodeToString(salt)
nonceBase64 := base64.StdEncoding.EncodeToString(nonce)
metadata.EncryptionSalt = &saltBase64
metadata.EncryptionIV = &nonceBase64
metadata.EncryptionSalt = &encSetup.SaltBase64
metadata.EncryptionIV = &encSetup.NonceBase64
metadata.Encryption = backups_config.BackupEncryptionEncrypted
uc.logger.Info("Encryption enabled for backup", "backupId", backupID)
return encWriter, encWriter, metadata, nil
return encSetup.Writer, encSetup.Writer, metadata, nil
}
func (uc *CreateMysqlBackupUsecase) cleanupOnCancellation(
@@ -564,8 +565,8 @@ func (uc *CreateMysqlBackupUsecase) buildMysqldumpErrorMessage(
stderrStr,
)
exitErr, ok := waitErr.(*exec.ExitError)
if !ok {
var exitErr *exec.ExitError
if !errors.As(waitErr, &exitErr) {
return errors.New(errorMsg)
}
@@ -594,6 +595,15 @@ func (uc *CreateMysqlBackupUsecase) handleConnectionErrors(stderrStr string) err
)
}
if containsIgnoreCase(stderrStr, "compression algorithm") ||
containsIgnoreCase(stderrStr, "2066") {
return fmt.Errorf(
"MySQL connection failed due to unsupported compression algorithm. "+
"Try re-saving the database connection to re-detect compression support. stderr: %s",
stderrStr,
)
}
if containsIgnoreCase(stderrStr, "unknown database") {
return fmt.Errorf(
"MySQL database does not exist. stderr: %s",

View File

@@ -2,7 +2,6 @@ package usecases_postgresql
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
@@ -14,8 +13,11 @@ import (
"strings"
"time"
"github.com/google/uuid"
"databasus-backend/internal/config"
common "databasus-backend/internal/features/backups/backups/common"
backups_core "databasus-backend/internal/features/backups/backups/core"
backup_encryption "databasus-backend/internal/features/backups/backups/encryption"
backups_config "databasus-backend/internal/features/backups/config"
"databasus-backend/internal/features/databases"
@@ -24,8 +26,6 @@ import (
"databasus-backend/internal/features/storages"
"databasus-backend/internal/util/encryption"
"databasus-backend/internal/util/tools"
"github.com/google/uuid"
)
const (
@@ -53,7 +53,7 @@ type writeResult struct {
func (uc *CreatePostgresqlBackupUsecase) Execute(
ctx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
db *databases.Database,
storage *storages.Storage,
@@ -88,7 +88,7 @@ func (uc *CreatePostgresqlBackupUsecase) Execute(
return uc.streamToStorage(
ctx,
backupID,
backup,
backupConfig,
tools.GetPostgresqlExecutable(
pg.Version,
@@ -107,7 +107,7 @@ func (uc *CreatePostgresqlBackupUsecase) Execute(
// streamToStorage streams pg_dump output directly to storage
func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
parentCtx context.Context,
backupID uuid.UUID,
backup *backups_core.Backup,
backupConfig *backups_config.BackupConfig,
pgBin string,
args []string,
@@ -166,7 +166,7 @@ func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
storageReader, storageWriter := io.Pipe()
finalWriter, encryptionWriter, backupMetadata, err := uc.setupBackupEncryption(
backupID,
backup.ID,
backupConfig,
storageWriter,
)
@@ -181,7 +181,13 @@ func (uc *CreatePostgresqlBackupUsecase) streamToStorage(
// Start streaming into storage in its own goroutine
saveErrCh := make(chan error, 1)
go func() {
saveErr := storage.SaveFile(ctx, uc.fieldEncryptor, uc.logger, backupID, storageReader)
saveErr := storage.SaveFile(
ctx,
uc.fieldEncryptor,
uc.logger,
backup.FileName,
storageReader,
)
saveErrCh <- saveErr
}()
@@ -475,7 +481,9 @@ func (uc *CreatePostgresqlBackupUsecase) setupBackupEncryption(
backupConfig *backups_config.BackupConfig,
storageWriter io.WriteCloser,
) (io.Writer, *backup_encryption.EncryptionWriter, common.BackupMetadata, error) {
metadata := common.BackupMetadata{}
metadata := common.BackupMetadata{
BackupID: backupID,
}
if backupConfig.Encryption != backups_config.BackupEncryptionEncrypted {
metadata.Encryption = backups_config.BackupEncryptionNone
@@ -483,40 +491,22 @@ func (uc *CreatePostgresqlBackupUsecase) setupBackupEncryption(
return storageWriter, nil, metadata, nil
}
salt, err := backup_encryption.GenerateSalt()
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to generate salt: %w", err)
}
nonce, err := backup_encryption.GenerateNonce()
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to generate nonce: %w", err)
}
masterKey, err := uc.secretKeyService.GetSecretKey()
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to get master key: %w", err)
}
encWriter, err := backup_encryption.NewEncryptionWriter(
storageWriter,
masterKey,
backupID,
salt,
nonce,
)
encSetup, err := backup_encryption.SetupEncryptionWriter(storageWriter, masterKey, backupID)
if err != nil {
return nil, nil, metadata, fmt.Errorf("failed to create encrypting writer: %w", err)
return nil, nil, metadata, err
}
saltBase64 := base64.StdEncoding.EncodeToString(salt)
nonceBase64 := base64.StdEncoding.EncodeToString(nonce)
metadata.EncryptionSalt = &saltBase64
metadata.EncryptionIV = &nonceBase64
metadata.EncryptionSalt = &encSetup.SaltBase64
metadata.EncryptionIV = &encSetup.NonceBase64
metadata.Encryption = backups_config.BackupEncryptionEncrypted
uc.logger.Info("Encryption enabled for backup", "backupId", backupID)
return encWriter, encWriter, metadata, nil
return encSetup.Writer, encSetup.Writer, metadata, nil
}
func (uc *CreatePostgresqlBackupUsecase) cleanupOnCancellation(
@@ -605,8 +595,8 @@ func (uc *CreatePostgresqlBackupUsecase) buildPgDumpErrorMessage(
stderrStr := string(stderrOutput)
errorMsg := fmt.Sprintf("%s failed: %v stderr: %s", filepath.Base(pgBin), waitErr, stderrStr)
exitErr, ok := waitErr.(*exec.ExitError)
if !ok {
var exitErr *exec.ExitError
if !errors.As(waitErr, &exitErr) {
return errors.New(errorMsg)
}
@@ -758,10 +748,10 @@ func (uc *CreatePostgresqlBackupUsecase) createTempPgpassFile(
)
tempFolder := config.GetEnv().TempFolder
if err := os.MkdirAll(tempFolder, 0700); err != nil {
if err := os.MkdirAll(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to ensure temp folder exists: %w", err)
}
if err := os.Chmod(tempFolder, 0700); err != nil {
if err := os.Chmod(tempFolder, 0o700); err != nil {
return "", fmt.Errorf("failed to set temp folder permissions: %w", err)
}
@@ -770,13 +760,13 @@ func (uc *CreatePostgresqlBackupUsecase) createTempPgpassFile(
return "", fmt.Errorf("failed to create temporary directory: %w", err)
}
if err := os.Chmod(tempDir, 0700); err != nil {
if err := os.Chmod(tempDir, 0o700); err != nil {
_ = os.RemoveAll(tempDir)
return "", fmt.Errorf("failed to set temporary directory permissions: %w", err)
}
pgpassFile := filepath.Join(tempDir, ".pgpass")
err = os.WriteFile(pgpassFile, []byte(pgpassContent), 0600)
err = os.WriteFile(pgpassFile, []byte(pgpassContent), 0o600)
if err != nil {
_ = os.RemoveAll(tempDir)
return "", fmt.Errorf("failed to write temporary .pgpass file: %w", err)

View File

@@ -4,10 +4,10 @@ import (
"errors"
"net/http"
users_middleware "databasus-backend/internal/features/users/middleware"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
users_middleware "databasus-backend/internal/features/users/middleware"
)
type BackupConfigController struct {

View File

@@ -118,9 +118,10 @@ func Test_SaveBackupConfig_PermissionsEnforced(t *testing.T) {
timeOfDay := "04:00"
request := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -146,7 +147,7 @@ func Test_SaveBackupConfig_PermissionsEnforced(t *testing.T) {
if tt.expectSuccess {
assert.Equal(t, database.ID, response.DatabaseID)
assert.True(t, response.IsBackupsEnabled)
assert.Equal(t, period.PeriodWeek, response.StorePeriod)
assert.Equal(t, period.PeriodWeek, response.RetentionTimePeriod)
} else {
assert.Contains(t, string(testResp.Body), "insufficient permissions")
}
@@ -170,9 +171,10 @@ func Test_SaveBackupConfig_WhenUserIsNotWorkspaceMember_ReturnsForbidden(t *test
timeOfDay := "04:00"
request := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -337,7 +339,7 @@ func Test_GetBackupConfigByDbID_ReturnsDefaultConfigForNewDatabase(t *testing.T)
assert.Equal(t, database.ID, response.DatabaseID)
assert.False(t, response.IsBackupsEnabled)
assert.Equal(t, plan.MaxStoragePeriod, response.StorePeriod)
assert.Equal(t, plan.MaxStoragePeriod, response.RetentionTimePeriod)
assert.Equal(t, plan.MaxBackupSizeMB, response.MaxBackupSizeMB)
assert.Equal(t, plan.MaxBackupsTotalSizeMB, response.MaxBackupsTotalSizeMB)
assert.True(t, response.IsRetryIfFailed)
@@ -411,9 +413,10 @@ func Test_SaveBackupConfig_WhenPlanLimitsAreAdjusted_ValidationEnforced(t *testi
// Test 1: Try to save backup config with exceeded backup size limit
timeOfDay := "04:00"
backupConfigExceededSize := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -440,9 +443,10 @@ func Test_SaveBackupConfig_WhenPlanLimitsAreAdjusted_ValidationEnforced(t *testi
// Test 2: Try to save backup config with exceeded total size limit
backupConfigExceededTotal := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -469,9 +473,10 @@ func Test_SaveBackupConfig_WhenPlanLimitsAreAdjusted_ValidationEnforced(t *testi
// Test 3: Try to save backup config with exceeded storage period limit
backupConfigExceededPeriod := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodYear, // Exceeds limit of Month
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodYear, // Exceeds limit of Month
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -498,9 +503,10 @@ func Test_SaveBackupConfig_WhenPlanLimitsAreAdjusted_ValidationEnforced(t *testi
// Test 4: Save backup config within all limits - should succeed
backupConfigValid := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek, // Within Month limit
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek, // Within Month limit
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -529,7 +535,7 @@ func Test_SaveBackupConfig_WhenPlanLimitsAreAdjusted_ValidationEnforced(t *testi
assert.Equal(t, database.ID, responseValid.DatabaseID)
assert.Equal(t, int64(80), responseValid.MaxBackupSizeMB)
assert.Equal(t, int64(800), responseValid.MaxBackupsTotalSizeMB)
assert.Equal(t, period.PeriodWeek, responseValid.StorePeriod)
assert.Equal(t, period.PeriodWeek, responseValid.RetentionTimePeriod)
}
func Test_IsStorageUsing_PermissionsEnforced(t *testing.T) {
@@ -618,9 +624,10 @@ func Test_SaveBackupConfig_WithEncryptionNone_ConfigSaved(t *testing.T) {
timeOfDay := "04:00"
request := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -662,9 +669,10 @@ func Test_SaveBackupConfig_WithEncryptionEncrypted_ConfigSaved(t *testing.T) {
timeOfDay := "04:00"
request := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -959,9 +967,10 @@ func Test_TransferDatabase_ToNewStorage_DatabaseTransferd(t *testing.T) {
timeOfDay := "04:00"
backupConfigRequest := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -1045,9 +1054,10 @@ func Test_TransferDatabase_WithExistingStorage_DatabaseAndStorageTransferd(t *te
timeOfDay := "04:00"
backupConfigRequest := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -1142,9 +1152,10 @@ func Test_TransferDatabase_StorageHasOtherDBs_CannotTransfer(t *testing.T) {
timeOfDay := "04:00"
backupConfigRequest1 := BackupConfig{
DatabaseID: database1.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database1.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -1168,9 +1179,10 @@ func Test_TransferDatabase_StorageHasOtherDBs_CannotTransfer(t *testing.T) {
)
backupConfigRequest2 := BackupConfig{
DatabaseID: database2.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database2.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -1244,9 +1256,10 @@ func Test_TransferDatabase_WithNotifiers_NotifiersTransferred(t *testing.T) {
timeOfDay := "04:00"
backupConfigRequest := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -1364,9 +1377,10 @@ func Test_TransferDatabase_NotifierHasOtherDBs_NotifierSkipped(t *testing.T) {
timeOfDay := "04:00"
backupConfigRequest := BackupConfig{
DatabaseID: database1.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database1.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -1486,9 +1500,10 @@ func Test_TransferDatabase_WithMultipleNotifiers_OnlyExclusiveOnesTransferred(t
timeOfDay := "04:00"
backupConfigRequest := BackupConfig{
DatabaseID: database1.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database1.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -1585,9 +1600,10 @@ func Test_TransferDatabase_WithTargetNotifiers_NotifiersAssigned(t *testing.T) {
timeOfDay := "04:00"
backupConfigRequest := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -1665,9 +1681,10 @@ func Test_TransferDatabase_TargetNotifierFromDifferentWorkspace_ReturnsBadReques
timeOfDay := "04:00"
backupConfigRequest := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -1730,9 +1747,10 @@ func Test_TransferDatabase_TargetStorageFromDifferentWorkspace_ReturnsBadRequest
timeOfDay := "04:00"
backupConfigRequest := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -1789,9 +1807,10 @@ func Test_SaveBackupConfig_WithSystemStorage_CanBeUsedByAnyDatabase(t *testing.T
timeOfDay := "04:00"
backupConfigWithRegularStorage := BackupConfig{
DatabaseID: databaseA.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: databaseA.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -1840,9 +1859,10 @@ func Test_SaveBackupConfig_WithSystemStorage_CanBeUsedByAnyDatabase(t *testing.T
assert.True(t, savedSystemStorage.IsSystem)
backupConfigWithSystemStorage := BackupConfig{
DatabaseID: databaseA.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: databaseA.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,

View File

@@ -12,16 +12,19 @@ import (
"databasus-backend/internal/util/logger"
)
var backupConfigRepository = &BackupConfigRepository{}
var backupConfigService = &BackupConfigService{
backupConfigRepository,
databases.GetDatabaseService(),
storages.GetStorageService(),
notifiers.GetNotifierService(),
workspaces_services.GetWorkspaceService(),
plans.GetDatabasePlanService(),
nil,
}
var (
backupConfigRepository = &BackupConfigRepository{}
backupConfigService = &BackupConfigService{
backupConfigRepository,
databases.GetDatabaseService(),
storages.GetStorageService(),
notifiers.GetNotifierService(),
workspaces_services.GetWorkspaceService(),
plans.GetDatabasePlanService(),
nil,
}
)
var backupConfigController = &BackupConfigController{
backupConfigService,
}

View File

@@ -13,3 +13,11 @@ const (
BackupEncryptionNone BackupEncryption = "NONE"
BackupEncryptionEncrypted BackupEncryption = "ENCRYPTED"
)
type RetentionPolicyType string
const (
RetentionPolicyTypeTimePeriod RetentionPolicyType = "TIME_PERIOD"
RetentionPolicyTypeCount RetentionPolicyType = "COUNT"
RetentionPolicyTypeGFS RetentionPolicyType = "GFS"
)

View File

@@ -1,16 +1,17 @@
package backups_config
import (
"databasus-backend/internal/config"
"databasus-backend/internal/features/intervals"
plans "databasus-backend/internal/features/plan"
"databasus-backend/internal/features/storages"
"databasus-backend/internal/util/period"
"errors"
"strings"
"github.com/google/uuid"
"gorm.io/gorm"
"databasus-backend/internal/config"
"databasus-backend/internal/features/intervals"
plans "databasus-backend/internal/features/plan"
"databasus-backend/internal/features/storages"
"databasus-backend/internal/util/period"
)
type BackupConfig struct {
@@ -18,7 +19,15 @@ type BackupConfig struct {
IsBackupsEnabled bool `json:"isBackupsEnabled" gorm:"column:is_backups_enabled;type:boolean;not null"`
StorePeriod period.Period `json:"storePeriod" gorm:"column:store_period;type:text;not null"`
RetentionPolicyType RetentionPolicyType `json:"retentionPolicyType" gorm:"column:retention_policy_type;type:text;not null;default:'TIME_PERIOD'"`
RetentionTimePeriod period.TimePeriod `json:"retentionTimePeriod" gorm:"column:retention_time_period;type:text;not null;default:''"`
RetentionCount int `json:"retentionCount" gorm:"column:retention_count;type:int;not null;default:0"`
RetentionGfsHours int `json:"retentionGfsHours" gorm:"column:retention_gfs_hours;type:int;not null;default:0"`
RetentionGfsDays int `json:"retentionGfsDays" gorm:"column:retention_gfs_days;type:int;not null;default:0"`
RetentionGfsWeeks int `json:"retentionGfsWeeks" gorm:"column:retention_gfs_weeks;type:int;not null;default:0"`
RetentionGfsMonths int `json:"retentionGfsMonths" gorm:"column:retention_gfs_months;type:int;not null;default:0"`
RetentionGfsYears int `json:"retentionGfsYears" gorm:"column:retention_gfs_years;type:int;not null;default:0"`
BackupIntervalID uuid.UUID `json:"backupIntervalId" gorm:"column:backup_interval_id;type:uuid;not null"`
BackupInterval *intervals.Interval `json:"backupInterval,omitempty" gorm:"foreignKey:BackupIntervalID"`
@@ -35,7 +44,7 @@ type BackupConfig struct {
Encryption BackupEncryption `json:"encryption" gorm:"column:encryption;type:text;not null;default:'NONE'"`
// MaxBackupSizeMB limits individual backup size. 0 = unlimited.
MaxBackupSizeMB int64 `json:"maxBackupSizeMb" gorm:"column:max_backup_size_mb;type:int;not null"`
MaxBackupSizeMB int64 `json:"maxBackupSizeMb" gorm:"column:max_backup_size_mb;type:int;not null"`
// MaxBackupsTotalSizeMB limits total size of all backups. 0 = unlimited.
MaxBackupsTotalSizeMB int64 `json:"maxBackupsTotalSizeMb" gorm:"column:max_backups_total_size_mb;type:int;not null"`
}
@@ -78,13 +87,12 @@ func (b *BackupConfig) AfterFind(tx *gorm.DB) error {
}
func (b *BackupConfig) Validate(plan *plans.DatabasePlan) error {
// Backup interval is required either as ID or as object
if b.BackupIntervalID == uuid.Nil && b.BackupInterval == nil {
return errors.New("backup interval is required")
}
if b.StorePeriod == "" {
return errors.New("store period is required")
if err := b.validateRetentionPolicy(plan); err != nil {
return err
}
if b.IsRetryIfFailed && b.MaxFailedTriesCount <= 0 {
@@ -110,22 +118,12 @@ func (b *BackupConfig) Validate(plan *plans.DatabasePlan) error {
return errors.New("max backups total size must be non-negative")
}
// Validate against plan limits
// Check storage period limit
if plan.MaxStoragePeriod != period.PeriodForever {
if b.StorePeriod.CompareTo(plan.MaxStoragePeriod) > 0 {
return errors.New("storage period exceeds plan limit")
}
}
// Check max backup size limit (0 in plan means unlimited)
if plan.MaxBackupSizeMB > 0 {
if b.MaxBackupSizeMB == 0 || b.MaxBackupSizeMB > plan.MaxBackupSizeMB {
return errors.New("max backup size exceeds plan limit")
}
}
// Check max total backups size limit (0 in plan means unlimited)
if plan.MaxBackupsTotalSizeMB > 0 {
if b.MaxBackupsTotalSizeMB == 0 ||
b.MaxBackupsTotalSizeMB > plan.MaxBackupsTotalSizeMB {
@@ -140,7 +138,14 @@ func (b *BackupConfig) Copy(newDatabaseID uuid.UUID) *BackupConfig {
return &BackupConfig{
DatabaseID: newDatabaseID,
IsBackupsEnabled: b.IsBackupsEnabled,
StorePeriod: b.StorePeriod,
RetentionPolicyType: b.RetentionPolicyType,
RetentionTimePeriod: b.RetentionTimePeriod,
RetentionCount: b.RetentionCount,
RetentionGfsHours: b.RetentionGfsHours,
RetentionGfsDays: b.RetentionGfsDays,
RetentionGfsWeeks: b.RetentionGfsWeeks,
RetentionGfsMonths: b.RetentionGfsMonths,
RetentionGfsYears: b.RetentionGfsYears,
BackupIntervalID: uuid.Nil,
BackupInterval: b.BackupInterval.Copy(),
StorageID: b.StorageID,
@@ -152,3 +157,34 @@ func (b *BackupConfig) Copy(newDatabaseID uuid.UUID) *BackupConfig {
MaxBackupsTotalSizeMB: b.MaxBackupsTotalSizeMB,
}
}
func (b *BackupConfig) validateRetentionPolicy(plan *plans.DatabasePlan) error {
switch b.RetentionPolicyType {
case RetentionPolicyTypeTimePeriod, "":
if b.RetentionTimePeriod == "" {
return errors.New("retention time period is required")
}
if plan.MaxStoragePeriod != period.PeriodForever {
if b.RetentionTimePeriod.CompareTo(plan.MaxStoragePeriod) > 0 {
return errors.New("storage period exceeds plan limit")
}
}
case RetentionPolicyTypeCount:
if b.RetentionCount <= 0 {
return errors.New("retention count must be greater than 0")
}
case RetentionPolicyTypeGFS:
if b.RetentionGfsHours <= 0 && b.RetentionGfsDays <= 0 && b.RetentionGfsWeeks <= 0 &&
b.RetentionGfsMonths <= 0 && b.RetentionGfsYears <= 0 {
return errors.New("at least one GFS retention field must be greater than 0")
}
default:
return errors.New("invalid retention policy type")
}
return nil
}

View File

@@ -3,17 +3,17 @@ package backups_config
import (
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"databasus-backend/internal/features/intervals"
plans "databasus-backend/internal/features/plan"
"databasus-backend/internal/util/period"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
func Test_Validate_WhenStoragePeriodIsWeekAndPlanAllowsMonth_ValidationPasses(t *testing.T) {
func Test_Validate_WhenRetentionTimePeriodIsWeekAndPlanAllowsMonth_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = period.PeriodWeek
config.RetentionTimePeriod = period.PeriodWeek
plan := createUnlimitedPlan()
plan.MaxStoragePeriod = period.PeriodMonth
@@ -22,9 +22,9 @@ func Test_Validate_WhenStoragePeriodIsWeekAndPlanAllowsMonth_ValidationPasses(t
assert.NoError(t, err)
}
func Test_Validate_WhenStoragePeriodIsYearAndPlanAllowsMonth_ValidationFails(t *testing.T) {
func Test_Validate_WhenRetentionTimePeriodIsYearAndPlanAllowsMonth_ValidationFails(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = period.PeriodYear
config.RetentionTimePeriod = period.PeriodYear
plan := createUnlimitedPlan()
plan.MaxStoragePeriod = period.PeriodMonth
@@ -33,9 +33,11 @@ func Test_Validate_WhenStoragePeriodIsYearAndPlanAllowsMonth_ValidationFails(t *
assert.EqualError(t, err, "storage period exceeds plan limit")
}
func Test_Validate_WhenStoragePeriodIsForeverAndPlanAllowsForever_ValidationPasses(t *testing.T) {
func Test_Validate_WhenRetentionTimePeriodIsForeverAndPlanAllowsForever_ValidationPasses(
t *testing.T,
) {
config := createValidBackupConfig()
config.StorePeriod = period.PeriodForever
config.RetentionTimePeriod = period.PeriodForever
plan := createUnlimitedPlan()
plan.MaxStoragePeriod = period.PeriodForever
@@ -44,9 +46,9 @@ func Test_Validate_WhenStoragePeriodIsForeverAndPlanAllowsForever_ValidationPass
assert.NoError(t, err)
}
func Test_Validate_WhenStoragePeriodIsForeverAndPlanAllowsYear_ValidationFails(t *testing.T) {
func Test_Validate_WhenRetentionTimePeriodIsForeverAndPlanAllowsYear_ValidationFails(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = period.PeriodForever
config.RetentionTimePeriod = period.PeriodForever
plan := createUnlimitedPlan()
plan.MaxStoragePeriod = period.PeriodYear
@@ -55,9 +57,9 @@ func Test_Validate_WhenStoragePeriodIsForeverAndPlanAllowsYear_ValidationFails(t
assert.EqualError(t, err, "storage period exceeds plan limit")
}
func Test_Validate_WhenStoragePeriodEqualsExactPlanLimit_ValidationPasses(t *testing.T) {
func Test_Validate_WhenRetentionTimePeriodEqualsExactPlanLimit_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = period.PeriodMonth
config.RetentionTimePeriod = period.PeriodMonth
plan := createUnlimitedPlan()
plan.MaxStoragePeriod = period.PeriodMonth
@@ -178,7 +180,7 @@ func Test_Validate_WhenTotalSizeEqualsExactPlanLimit_ValidationPasses(t *testing
func Test_Validate_WhenAllLimitsAreUnlimitedInPlan_AnyConfigurationPasses(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = period.PeriodForever
config.RetentionTimePeriod = period.PeriodForever
config.MaxBackupSizeMB = 0
config.MaxBackupsTotalSizeMB = 0
@@ -190,7 +192,7 @@ func Test_Validate_WhenAllLimitsAreUnlimitedInPlan_AnyConfigurationPasses(t *tes
func Test_Validate_WhenMultipleLimitsExceeded_ValidationFailsWithFirstError(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = period.PeriodYear
config.RetentionTimePeriod = period.PeriodYear
config.MaxBackupSizeMB = 500
config.MaxBackupsTotalSizeMB = 5000
@@ -249,14 +251,14 @@ func Test_Validate_WhenEncryptionIsInvalid_ValidationFailsRegardlessOfPlan(t *te
assert.EqualError(t, err, "encryption must be NONE or ENCRYPTED")
}
func Test_Validate_WhenStoragePeriodIsEmpty_ValidationFails(t *testing.T) {
func Test_Validate_WhenRetentionTimePeriodIsEmpty_ValidationFails(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = ""
config.RetentionTimePeriod = ""
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.EqualError(t, err, "store period is required")
assert.EqualError(t, err, "retention time period is required")
}
func Test_Validate_WhenMaxBackupSizeIsNegative_ValidationFails(t *testing.T) {
@@ -282,8 +284,8 @@ func Test_Validate_WhenMaxTotalSizeIsNegative_ValidationFails(t *testing.T) {
func Test_Validate_WhenPlanLimitsAreAtBoundary_ValidationWorks(t *testing.T) {
tests := []struct {
name string
configPeriod period.Period
planPeriod period.Period
configPeriod period.TimePeriod
planPeriod period.TimePeriod
configSize int64
planSize int64
configTotal int64
@@ -345,7 +347,7 @@ func Test_Validate_WhenPlanLimitsAreAtBoundary_ValidationWorks(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
config := createValidBackupConfig()
config.StorePeriod = tt.configPeriod
config.RetentionTimePeriod = tt.configPeriod
config.MaxBackupSizeMB = tt.configSize
config.MaxBackupsTotalSizeMB = tt.configTotal
@@ -364,12 +366,96 @@ func Test_Validate_WhenPlanLimitsAreAtBoundary_ValidationWorks(t *testing.T) {
}
}
func Test_Validate_WhenPolicyTypeIsCount_RequiresPositiveCount(t *testing.T) {
config := createValidBackupConfig()
config.RetentionPolicyType = RetentionPolicyTypeCount
config.RetentionCount = 0
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.EqualError(t, err, "retention count must be greater than 0")
}
func Test_Validate_WhenPolicyTypeIsCount_WithPositiveCount_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.RetentionPolicyType = RetentionPolicyTypeCount
config.RetentionCount = 10
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.NoError(t, err)
}
func Test_Validate_WhenPolicyTypeIsGFS_RequiresAtLeastOneField(t *testing.T) {
config := createValidBackupConfig()
config.RetentionPolicyType = RetentionPolicyTypeGFS
config.RetentionGfsDays = 0
config.RetentionGfsWeeks = 0
config.RetentionGfsMonths = 0
config.RetentionGfsYears = 0
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.EqualError(t, err, "at least one GFS retention field must be greater than 0")
}
func Test_Validate_WhenPolicyTypeIsGFS_WithOnlyHours_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.RetentionPolicyType = RetentionPolicyTypeGFS
config.RetentionGfsHours = 24
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.NoError(t, err)
}
func Test_Validate_WhenPolicyTypeIsGFS_WithOnlyDays_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.RetentionPolicyType = RetentionPolicyTypeGFS
config.RetentionGfsDays = 7
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.NoError(t, err)
}
func Test_Validate_WhenPolicyTypeIsGFS_WithAllFields_ValidationPasses(t *testing.T) {
config := createValidBackupConfig()
config.RetentionPolicyType = RetentionPolicyTypeGFS
config.RetentionGfsHours = 24
config.RetentionGfsDays = 7
config.RetentionGfsWeeks = 4
config.RetentionGfsMonths = 12
config.RetentionGfsYears = 3
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.NoError(t, err)
}
func Test_Validate_WhenPolicyTypeIsInvalid_ValidationFails(t *testing.T) {
config := createValidBackupConfig()
config.RetentionPolicyType = "INVALID"
plan := createUnlimitedPlan()
err := config.Validate(plan)
assert.EqualError(t, err, "invalid retention policy type")
}
func createValidBackupConfig() *BackupConfig {
intervalID := uuid.New()
return &BackupConfig{
DatabaseID: uuid.New(),
IsBackupsEnabled: true,
StorePeriod: period.PeriodMonth,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodMonth,
BackupIntervalID: intervalID,
BackupInterval: &intervals.Interval{ID: intervalID},
SendNotificationsOn: []BackupNotificationType{},

View File

@@ -1,11 +1,12 @@
package backups_config
import (
"databasus-backend/internal/storage"
"errors"
"github.com/google/uuid"
"gorm.io/gorm"
"databasus-backend/internal/storage"
)
type BackupConfigRepository struct{}
@@ -47,7 +48,6 @@ func (r *BackupConfigRepository) Save(
return nil
})
if err != nil {
return nil, err
}

View File

@@ -3,6 +3,8 @@ package backups_config
import (
"errors"
"github.com/google/uuid"
"databasus-backend/internal/features/databases"
"databasus-backend/internal/features/intervals"
"databasus-backend/internal/features/notifiers"
@@ -10,8 +12,6 @@ import (
"databasus-backend/internal/features/storages"
users_models "databasus-backend/internal/features/users/models"
workspaces_services "databasus-backend/internal/features/workspaces/services"
"github.com/google/uuid"
)
type BackupConfigService struct {
@@ -214,38 +214,6 @@ func (s *BackupConfigService) CreateDisabledBackupConfig(databaseID uuid.UUID) e
return s.initializeDefaultConfig(databaseID)
}
func (s *BackupConfigService) initializeDefaultConfig(
databaseID uuid.UUID,
) error {
plan, err := s.databasePlanService.GetDatabasePlan(databaseID)
if err != nil {
return err
}
timeOfDay := "04:00"
_, err = s.backupConfigRepository.Save(&BackupConfig{
DatabaseID: databaseID,
IsBackupsEnabled: false,
StorePeriod: plan.MaxStoragePeriod,
MaxBackupSizeMB: plan.MaxBackupSizeMB,
MaxBackupsTotalSizeMB: plan.MaxBackupsTotalSizeMB,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
},
SendNotificationsOn: []BackupNotificationType{
NotificationBackupFailed,
NotificationBackupSuccess,
},
IsRetryIfFailed: true,
MaxFailedTriesCount: 3,
Encryption: BackupEncryptionNone,
})
return err
}
func (s *BackupConfigService) TransferDatabaseToWorkspace(
user *users_models.User,
databaseID uuid.UUID,
@@ -289,7 +257,8 @@ func (s *BackupConfigService) TransferDatabaseToWorkspace(
s.transferNotifiers(user, database, request.TargetWorkspaceID)
}
if request.IsTransferWithStorage {
switch {
case request.IsTransferWithStorage:
if backupConfig.StorageID == nil {
return ErrDatabaseHasNoStorage
}
@@ -314,7 +283,7 @@ func (s *BackupConfigService) TransferDatabaseToWorkspace(
if err != nil {
return err
}
} else if request.TargetStorageID != nil {
case request.TargetStorageID != nil:
targetStorage, err := s.storageService.GetStorageByID(*request.TargetStorageID)
if err != nil {
return err
@@ -331,7 +300,7 @@ func (s *BackupConfigService) TransferDatabaseToWorkspace(
if err != nil {
return err
}
} else {
default:
return ErrTargetStorageNotSpecified
}
@@ -350,6 +319,39 @@ func (s *BackupConfigService) TransferDatabaseToWorkspace(
return nil
}
func (s *BackupConfigService) initializeDefaultConfig(
databaseID uuid.UUID,
) error {
plan, err := s.databasePlanService.GetDatabasePlan(databaseID)
if err != nil {
return err
}
timeOfDay := "04:00"
_, err = s.backupConfigRepository.Save(&BackupConfig{
DatabaseID: databaseID,
IsBackupsEnabled: false,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: plan.MaxStoragePeriod,
MaxBackupSizeMB: plan.MaxBackupSizeMB,
MaxBackupsTotalSizeMB: plan.MaxBackupsTotalSizeMB,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
},
SendNotificationsOn: []BackupNotificationType{
NotificationBackupFailed,
NotificationBackupSuccess,
},
IsRetryIfFailed: true,
MaxFailedTriesCount: 3,
Encryption: BackupEncryptionNone,
})
return err
}
func (s *BackupConfigService) transferNotifiers(
user *users_models.User,
database *databases.Database,

View File

@@ -35,9 +35,10 @@ func Test_AttachStorageFromSameWorkspace_SuccessfullyAttached(t *testing.T) {
timeOfDay := "04:00"
request := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -87,9 +88,10 @@ func Test_AttachStorageFromDifferentWorkspace_ReturnsForbidden(t *testing.T) {
timeOfDay := "04:00"
request := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -131,9 +133,10 @@ func Test_DeleteStorageWithAttachedDatabases_CannotDelete(t *testing.T) {
timeOfDay := "04:00"
request := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,
@@ -191,9 +194,10 @@ func Test_TransferStorageWithAttachedDatabase_CannotTransfer(t *testing.T) {
timeOfDay := "04:00"
request := BackupConfig{
DatabaseID: database.ID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodWeek,
DatabaseID: database.ID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodWeek,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,

View File

@@ -1,11 +1,11 @@
package backups_config
import (
"github.com/google/uuid"
"databasus-backend/internal/features/intervals"
"databasus-backend/internal/features/storages"
"databasus-backend/internal/util/period"
"github.com/google/uuid"
)
func EnableBackupsForTestDatabase(
@@ -15,9 +15,10 @@ func EnableBackupsForTestDatabase(
timeOfDay := "16:00"
backupConfig := &BackupConfig{
DatabaseID: databaseID,
IsBackupsEnabled: true,
StorePeriod: period.PeriodDay,
DatabaseID: databaseID,
IsBackupsEnabled: true,
RetentionPolicyType: RetentionPolicyTypeTimePeriod,
RetentionTimePeriod: period.PeriodDay,
BackupInterval: &intervals.Interval{
Interval: intervals.IntervalDaily,
TimeOfDay: &timeOfDay,

View File

@@ -1,13 +1,14 @@
package databases
import (
users_middleware "databasus-backend/internal/features/users/middleware"
users_services "databasus-backend/internal/features/users/services"
workspaces_services "databasus-backend/internal/features/workspaces/services"
"net/http"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
users_middleware "databasus-backend/internal/features/users/middleware"
users_services "databasus-backend/internal/features/users/services"
workspaces_services "databasus-backend/internal/features/workspaces/services"
)
type DatabaseController struct {
@@ -29,6 +30,11 @@ func (c *DatabaseController) RegisterRoutes(router *gin.RouterGroup) {
router.GET("/databases/notifier/:id/databases-count", c.CountDatabasesByNotifier)
router.POST("/databases/is-readonly", c.IsUserReadOnly)
router.POST("/databases/create-readonly-user", c.CreateReadOnlyUser)
router.POST("/databases/:id/regenerate-token", c.RegenerateAgentToken)
}
func (c *DatabaseController) RegisterPublicRoutes(router *gin.RouterGroup) {
router.POST("/databases/verify-token", c.VerifyAgentToken)
}
// CreateDatabase
@@ -438,3 +444,61 @@ func (c *DatabaseController) CreateReadOnlyUser(ctx *gin.Context) {
Password: password,
})
}
// RegenerateAgentToken
// @Summary Regenerate agent token for a database
// @Description Generate a new agent token for the database. The token is returned once and stored as a hash.
// @Tags databases
// @Produce json
// @Security BearerAuth
// @Param id path string true "Database ID"
// @Success 200 {object} map[string]string
// @Failure 400 {object} map[string]string
// @Failure 401 {object} map[string]string
// @Router /databases/{id}/regenerate-token [post]
func (c *DatabaseController) RegenerateAgentToken(ctx *gin.Context) {
user, ok := users_middleware.GetUserFromContext(ctx)
if !ok {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "User not authenticated"})
return
}
id, err := uuid.Parse(ctx.Param("id"))
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "invalid database ID"})
return
}
token, err := c.databaseService.RegenerateAgentToken(user, id)
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
ctx.JSON(http.StatusOK, gin.H{"token": token})
}
// VerifyAgentToken
// @Summary Verify agent token
// @Description Verify that a given agent token is valid for any database
// @Tags databases
// @Accept json
// @Produce json
// @Param request body VerifyAgentTokenRequest true "Token to verify"
// @Success 200 {object} map[string]string
// @Failure 401 {object} map[string]string
// @Router /databases/verify-token [post]
func (c *DatabaseController) VerifyAgentToken(ctx *gin.Context) {
var request VerifyAgentTokenRequest
if err := ctx.ShouldBindJSON(&request); err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if err := c.databaseService.VerifyAgentToken(request.Token); err != nil {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "invalid token"})
return
}
ctx.JSON(http.StatusOK, gin.H{"message": "token is valid"})
}

View File

@@ -13,10 +13,13 @@ import (
"github.com/stretchr/testify/assert"
"databasus-backend/internal/config"
"databasus-backend/internal/features/audit_logs"
"databasus-backend/internal/features/databases/databases/mariadb"
"databasus-backend/internal/features/databases/databases/mongodb"
"databasus-backend/internal/features/databases/databases/postgresql"
users_enums "databasus-backend/internal/features/users/enums"
users_middleware "databasus-backend/internal/features/users/middleware"
users_services "databasus-backend/internal/features/users/services"
users_testing "databasus-backend/internal/features/users/testing"
workspaces_controllers "databasus-backend/internal/features/workspaces/controllers"
workspaces_testing "databasus-backend/internal/features/workspaces/testing"
@@ -144,6 +147,66 @@ func Test_CreateDatabase_WhenUserIsNotWorkspaceMember_ReturnsForbidden(t *testin
assert.Contains(t, string(testResp.Body), "insufficient permissions")
}
func Test_CreateDatabase_WalV1Type_NoConnectionFieldsRequired(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
defer workspaces_testing.RemoveTestWorkspace(workspace, router)
request := Database{
Name: "Test WAL Database",
WorkspaceID: &workspace.ID,
Type: DatabaseTypePostgres,
Postgresql: &postgresql.PostgresqlDatabase{
BackupType: postgresql.PostgresBackupTypeWalV1,
CpuCount: 1,
},
}
var response Database
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/databases/create",
"Bearer "+owner.Token,
request,
http.StatusCreated,
&response,
)
defer RemoveTestDatabase(&response)
assert.Equal(t, "Test WAL Database", response.Name)
assert.NotEqual(t, uuid.Nil, response.ID)
}
func Test_CreateDatabase_PgDumpType_ConnectionFieldsRequired(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
defer workspaces_testing.RemoveTestWorkspace(workspace, router)
request := Database{
Name: "Test PG_DUMP Database",
WorkspaceID: &workspace.ID,
Type: DatabaseTypePostgres,
Postgresql: &postgresql.PostgresqlDatabase{
BackupType: postgresql.PostgresBackupTypePgDump,
CpuCount: 1,
},
}
testResp := test_utils.MakePostRequest(
t,
router,
"/api/v1/databases/create",
"Bearer "+owner.Token,
request,
http.StatusBadRequest,
)
assert.Contains(t, string(testResp.Body), "host is required")
}
func Test_UpdateDatabase_PermissionsEnforced(t *testing.T) {
tests := []struct {
name string
@@ -256,6 +319,52 @@ func Test_UpdateDatabase_WhenUserIsNotWorkspaceMember_ReturnsForbidden(t *testin
assert.Contains(t, string(testResp.Body), "insufficient permissions")
}
func Test_UpdateDatabase_WhenDatabaseTypeChanged_ReturnsBadRequest(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
defer workspaces_testing.RemoveTestWorkspace(workspace, router)
database := createTestDatabaseViaAPI("Test Database", workspace.ID, owner.Token, router)
defer RemoveTestDatabase(database)
database.Type = DatabaseTypeMysql
testResp := test_utils.MakePostRequest(
t,
router,
"/api/v1/databases/update",
"Bearer "+owner.Token,
database,
http.StatusBadRequest,
)
assert.Contains(t, string(testResp.Body), "database type cannot be changed")
}
func Test_UpdateDatabase_WhenBackupTypeChanged_ReturnsBadRequest(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
defer workspaces_testing.RemoveTestWorkspace(workspace, router)
database := createTestDatabaseViaAPI("Test Database", workspace.ID, owner.Token, router)
defer RemoveTestDatabase(database)
database.Postgresql.BackupType = postgresql.PostgresBackupTypeWalV1
testResp := test_utils.MakePostRequest(
t,
router,
"/api/v1/databases/update",
"Bearer "+owner.Token,
database,
http.StatusBadRequest,
)
assert.Contains(t, string(testResp.Body), "backup type cannot be changed")
}
func Test_DeleteDatabase_PermissionsEnforced(t *testing.T) {
tests := []struct {
name string
@@ -753,7 +862,7 @@ func Test_DatabaseSensitiveDataLifecycle_AllTypes(t *testing.T) {
name string
databaseType DatabaseType
createDatabase func(workspaceID uuid.UUID) *Database
updateDatabase func(workspaceID uuid.UUID, databaseID uuid.UUID) *Database
updateDatabase func(workspaceID, databaseID uuid.UUID) *Database
verifySensitiveData func(t *testing.T, database *Database)
verifyHiddenData func(t *testing.T, database *Database)
}{
@@ -769,7 +878,7 @@ func Test_DatabaseSensitiveDataLifecycle_AllTypes(t *testing.T) {
Postgresql: pgConfig,
}
},
updateDatabase: func(workspaceID uuid.UUID, databaseID uuid.UUID) *Database {
updateDatabase: func(workspaceID, databaseID uuid.UUID) *Database {
pgConfig := getTestPostgresConfig()
pgConfig.Password = ""
return &Database{
@@ -805,7 +914,7 @@ func Test_DatabaseSensitiveDataLifecycle_AllTypes(t *testing.T) {
Mariadb: mariaConfig,
}
},
updateDatabase: func(workspaceID uuid.UUID, databaseID uuid.UUID) *Database {
updateDatabase: func(workspaceID, databaseID uuid.UUID) *Database {
mariaConfig := getTestMariadbConfig()
mariaConfig.Password = ""
return &Database{
@@ -841,7 +950,7 @@ func Test_DatabaseSensitiveDataLifecycle_AllTypes(t *testing.T) {
Mongodb: mongoConfig,
}
},
updateDatabase: func(workspaceID uuid.UUID, databaseID uuid.UUID) *Database {
updateDatabase: func(workspaceID, databaseID uuid.UUID) *Database {
mongoConfig := getTestMongodbConfig()
mongoConfig.Password = ""
return &Database{
@@ -1050,6 +1159,87 @@ func Test_TestConnection_PermissionsEnforced(t *testing.T) {
}
}
func Test_RegenerateAgentToken_ReturnsToken(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
defer workspaces_testing.RemoveTestWorkspace(workspace, router)
database := createTestDatabaseViaAPI("Test Database", workspace.ID, owner.Token, router)
defer RemoveTestDatabase(database)
var response map[string]string
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/databases/"+database.ID.String()+"/regenerate-token",
"Bearer "+owner.Token,
nil,
http.StatusOK,
&response,
)
assert.NotEmpty(t, response["token"])
assert.Len(t, response["token"], 32)
var updatedDatabase Database
test_utils.MakeGetRequestAndUnmarshal(
t,
router,
"/api/v1/databases/"+database.ID.String(),
"Bearer "+owner.Token,
http.StatusOK,
&updatedDatabase,
)
assert.True(t, updatedDatabase.IsAgentTokenGenerated)
}
func Test_VerifyAgentToken_WithValidToken_Succeeds(t *testing.T) {
router := createTestRouter()
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", owner, router)
defer workspaces_testing.RemoveTestWorkspace(workspace, router)
database := createTestDatabaseViaAPI("Test Database", workspace.ID, owner.Token, router)
defer RemoveTestDatabase(database)
var regenerateResponse map[string]string
test_utils.MakePostRequestAndUnmarshal(
t,
router,
"/api/v1/databases/"+database.ID.String()+"/regenerate-token",
"Bearer "+owner.Token,
nil,
http.StatusOK,
&regenerateResponse,
)
token := regenerateResponse["token"]
assert.NotEmpty(t, token)
w := workspaces_testing.MakeAPIRequest(
router,
"POST",
"/api/v1/databases/verify-token",
"",
VerifyAgentTokenRequest{Token: token},
)
assert.Equal(t, http.StatusOK, w.Code)
}
func Test_VerifyAgentToken_WithInvalidToken_Returns401(t *testing.T) {
router := createTestRouter()
w := workspaces_testing.MakeAPIRequest(
router,
"POST",
"/api/v1/databases/verify-token",
"",
VerifyAgentTokenRequest{Token: "invalidtoken00000000000000000000"},
)
assert.Equal(t, http.StatusUnauthorized, w.Code)
}
func createTestDatabaseViaAPI(
name string,
workspaceID uuid.UUID,
@@ -1101,11 +1291,20 @@ func createTestDatabaseViaAPI(
}
func createTestRouter() *gin.Engine {
router := workspaces_testing.CreateTestRouter(
workspaces_controllers.GetWorkspaceController(),
workspaces_controllers.GetMembershipController(),
GetDatabaseController(),
)
gin.SetMode(gin.TestMode)
router := gin.New()
v1 := router.Group("/api/v1")
protected := v1.Group("").Use(users_middleware.AuthMiddleware(users_services.GetUserService()))
workspaces_controllers.GetWorkspaceController().RegisterRoutes(protected.(*gin.RouterGroup))
workspaces_controllers.GetMembershipController().RegisterRoutes(protected.(*gin.RouterGroup))
GetDatabaseController().RegisterRoutes(protected.(*gin.RouterGroup))
GetDatabaseController().RegisterPublicRoutes(v1)
audit_logs.SetupDependencies()
return router
}
@@ -1118,13 +1317,14 @@ func getTestPostgresConfig() *postgresql.PostgresqlDatabase {
testDbName := "testdb"
return &postgresql.PostgresqlDatabase{
Version: tools.PostgresqlVersion16,
Host: config.GetEnv().TestLocalhost,
Port: port,
Username: "testuser",
Password: "testpassword",
Database: &testDbName,
CpuCount: 1,
BackupType: postgresql.PostgresBackupTypePgDump,
Version: tools.PostgresqlVersion16,
Host: config.GetEnv().TestLocalhost,
Port: port,
Username: "testuser",
Password: "testpassword",
Database: &testDbName,
CpuCount: 1,
}
}
@@ -1164,12 +1364,13 @@ func getTestMongodbConfig() *mongodb.MongodbDatabase {
return &mongodb.MongodbDatabase{
Version: tools.MongodbVersion7,
Host: config.GetEnv().TestLocalhost,
Port: port,
Port: &port,
Username: "root",
Password: "rootpassword",
Database: "testdb",
AuthDatabase: "admin",
IsHttps: false,
IsSrv: false,
CpuCount: 1,
}
}

Some files were not shown because too many files have changed in this diff Show More