diff --git a/.gitignore b/.gitignore
index 5ef6a52..4f1d653 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,41 +1,42 @@
-# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
-
-# dependencies
-/node_modules
-/.pnp
-.pnp.*
-.yarn/*
-!.yarn/patches
-!.yarn/plugins
-!.yarn/releases
-!.yarn/versions
-
-# testing
-/coverage
-
-# next.js
-/.next/
-/out/
-
-# production
-/build
-
-# misc
-.DS_Store
-*.pem
-
-# debug
-npm-debug.log*
-yarn-debug.log*
-yarn-error.log*
-.pnpm-debug.log*
-
-# env files (can opt-in for committing if needed)
-.env*
-
-# vercel
-.vercel
-
-# typescript
-*.tsbuildinfo
-next-env.d.ts
+# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
+
+# dependencies
+/node_modules
+/.pnp
+.pnp.*
+.yarn/*
+!.yarn/patches
+!.yarn/plugins
+!.yarn/releases
+!.yarn/versions
+
+# testing
+/coverage
+
+# next.js
+/.next/
+/out/
+
+# production
+/build
+
+# misc
+.DS_Store
+*.pem
+
+# debug
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+.pnpm-debug.log*
+
+# env files (can opt-in for committing if needed)
+.env*
+
+# vercel
+.vercel
+
+# typescript
+*.tsbuildinfo
+next-env.d.ts
+.claude/
diff --git a/app/components/DocsSidebarComponent.tsx b/app/components/DocsSidebarComponent.tsx
index 3610a58..535b141 100644
--- a/app/components/DocsSidebarComponent.tsx
+++ b/app/components/DocsSidebarComponent.tsx
@@ -13,6 +13,9 @@ const navItems: NavItem[] = [
{
title: "Installation",
href: "/installation",
+ children: [
+ { title: "Agent mode", href: "/installation/agent" },
+ ],
},
{
title: "Storages",
diff --git a/app/databasus-vs-barman/page.tsx b/app/databasus-vs-barman/page.tsx
index cb059ce..9020de8 100644
--- a/app/databasus-vs-barman/page.tsx
+++ b/app/databasus-vs-barman/page.tsx
@@ -81,13 +81,15 @@ export default function DatabasusVsBarmanPage() {
- Databasus and Barman are both PostgreSQL backup tools, but they
- take fundamentally different approaches. Databasus provides an
- intuitive web interface for logical backups with team
- collaboration features, while Barman (Backup and Recovery
- Manager) is a command-line tool focused on physical backups and
- Point-in-Time Recovery (PITR) for enterprise disaster recovery
- scenarios.
+ Databasus and Barman are both PostgreSQL backup tools that
+ support physical backups, WAL archiving and Point-in-Time
+ Recovery. Databasus provides an intuitive web interface with
+ both logical and physical backup capabilities, team
+ collaboration features and support for multiple database
+ engines. Barman (Backup and Recovery Manager) is a
+ command-line tool with advanced features like rsync-based
+ incremental backups, streaming replication integration and
+ Barman-to-Barman geo-redundancy.
| Team features |
@@ -311,6 +324,16 @@ export default function DatabasusVsWalGPage() {
running PostgreSQL, MySQL, MariaDB or MongoDB benefit from
centralized backup management.
+
+ DBAs and disaster recovery: Physical
+ backups, WAL archiving and PITR for mission-critical systems
+ with near-zero data loss requirements.
+
+
+ DevOps engineers: Agent mode integrates
+ into existing infrastructure, while the web UI and API
+ provide visibility and control without custom scripting.
+
WAL-G audience
@@ -335,9 +358,9 @@ export default function DatabasusVsWalGPage() {
integrate well.
- Users needing PITR: Those requiring
- second-precise Point-in-Time Recovery for mission-critical
- systems.
+ Extended database support: Teams needing
+ backup for MS SQL, FoundationDB or Greenplum alongside
+ PostgreSQL.
@@ -348,26 +371,33 @@ export default function DatabasusVsWalGPage() {
with distinct advantages:
- Databasus: Logical backups
+
+ Databasus: Logical + Physical backups
+
- Databasus uses pg_dump for logical backups,
- creating SQL representations of your data:
+ Databasus supports both logical and physical backup
+ strategies:
-
- Portable: Backups can be restored to
- different PostgreSQL versions or even different servers.
+ Logical backups (remote mode): Uses{" "}
+
pg_dump for portable backups that can be
+ restored to different PostgreSQL versions. No agent required.
+
+ -
+ Physical backups (agent mode): File-level
+ copies via
pg_basebackup with continuous WAL
+ archiving and Point-in-Time Recovery.
-
Efficient compression: Uses zstd (level 5)
- compression, reducing backup sizes by 4-8x with only ~20%
- runtime overhead.
+ for both backup types, reducing sizes by 4-8x.
-
- Read-only access: Only requires SELECT
- permissions, minimizing security risks.
+ Read-only access: Logical backups only
+ require SELECT permissions, minimizing security risks.
@@ -410,21 +440,25 @@ export default function DatabasusVsWalGPage() {
-
- Restore to any hour or day: With hourly,
- daily, weekly, monthly or cron backup schedules, you can
- restore to any backup point you've configured.
+ Point-in-Time Recovery: Restore to any
+ specific second using WAL replay via the agent.
+
+ -
+ Full cluster restore: Restore the entire
+ database cluster to a specific point in time from physical
+ backups.
+
+ -
+ Logical restore: Restore from scheduled
+ logical backups to any backup point.
-
One-click restore: Download and restore
- backups directly from the web interface.
+ logical backups directly from the web interface.
-
- Parallel restores: Utilize multiple CPU cores
- to speed up restoration of large backups.
-
- -
- Cross-version compatibility: Restore backups
- to different PostgreSQL versions when needed.
+ Cross-version compatibility: Logical
+ backups can be restored to different PostgreSQL versions.
@@ -451,17 +485,15 @@ export default function DatabasusVsWalGPage() {
- Note: For most
- applications, restoring to the nearest hour or day (as
- Databasus provides) is sufficient. Second-precise PITR is
- typically only required for mission-critical financial or
- transactional systems where every transaction must be
- recoverable.{" "}
+ Note: Both
+ tools support PITR. WAL-G additionally offers delta restore
+ (fetching only changed pages) and uses a custom streaming
+ protocol for slightly better performance at scale.{" "}
- Learn why Databasus doesn't support PITR →
+ Learn how Databasus supports PITR →
@@ -804,8 +836,9 @@ export default function DatabasusVsWalGPage() {
You want built-in scheduling without external cron setup
- Restoring to any hour or day meets your recovery
- requirements
+ You want to manage backups for multiple databases from a
+ single dashboard with scheduling, notifications and team
+ features
You want quick setup with minimal database expertise
Built-in backup encryption is important to you
@@ -822,48 +855,39 @@ export default function DatabasusVsWalGPage() {
-
- You manage multiple self-hosted database systems
- (PostgreSQL, MySQL, MongoDB, etc.) and want a unified tool
+ You need delta backups (changed pages only) for reduced
+ storage and transfer time
-
- You require second-precise Point-in-Time Recovery for
- mission-critical self-hosted systems
-
- -
- Delta backups are important for reducing storage and
- transfer time
+ You need support for MS SQL, FoundationDB or Greenplum
-
You prefer command-line tools and infrastructure-as-code
workflows
-
- You're comfortable setting up cron jobs and custom
- notification scripts
+ You want multiple compression algorithms (LZ4, LZMA,
+ Brotli, zstd) with fine-tuned control
-
Your team has DevOps expertise for CLI-based tool management
- -
- You're building a database platform and need to backup
- customer databases with PITR capabilities
-
- For comprehensive backup management, Databasus offers a more
- streamlined experience with its unified interface and built-in
- features — and works seamlessly with both self-hosted and
- cloud-managed databases. Databasus is suitable for comprehensive
- backup management of production databases, not just backups.
+ Both tools support physical backups, WAL archiving and PITR.
+ Databasus provides comprehensive backup management with its
+ web interface, team features and support for both logical and
+ physical backups — working seamlessly with both self-hosted
+ and cloud-managed databases.
- WAL-G remains an excellent choice for organizations with
- self-hosted databases who prefer CLI-based workflows and need
- advanced features like delta backups and precise PITR, or for
- teams building database platforms that need to provide PITR
- capabilities to their customers.
+ WAL-G remains an excellent choice for teams that prefer
+ CLI-based workflows and need its unique advantages: delta
+ backups (changed pages only), a custom streaming protocol for
+ slightly better performance and support for additional
+ database engines beyond PostgreSQL.
diff --git a/app/faq/page.tsx b/app/faq/page.tsx
index d48e826..92b36cd 100644
--- a/app/faq/page.tsx
+++ b/app/faq/page.tsx
@@ -49,10 +49,10 @@ export default function FAQPage() {
mainEntity: [
{
"@type": "Question",
- name: "Why does Databasus not use raw SQL dump format for PostgreSQL?",
+ name: "Why does Databasus not use raw SQL dump format for logical PostgreSQL backups?",
acceptedAnswer: {
"@type": "Answer",
- text: "Databasus uses the custom format with zstd compression because it provides the most efficient backup and restore speed after extensive testing. The custom format with zstd compression level 5 offers the optimal balance between backup creation speed, restore speed and file size.",
+ text: "For logical backups, Databasus uses pg_dump's custom format with zstd compression because it provides the most efficient backup and restore speed after extensive testing. The custom format with zstd compression level 5 offers the optimal balance between backup creation speed, restore speed and file size.",
},
},
{
@@ -65,10 +65,10 @@ export default function FAQPage() {
},
{
"@type": "Question",
- name: "Why doesn't Databasus support PITR (Point-in-Time Recovery)?",
+ name: "How does Databasus support PITR (Point-in-Time Recovery)?",
acceptedAnswer: {
"@type": "Answer",
- text: "Databasus intentionally focuses on logical backups rather than PITR for several practical reasons: PITR tools typically need to be installed on the same server as your database; incremental backups cannot be restored without direct access to the database storage drive; managed cloud databases don't allow restoring external PITR backups; cloud providers already offer native PITR capabilities; and for 99% of projects, hourly or daily logical backups provide adequate recovery points without the operational complexity of WAL archiving.",
+ text: "Databasus supports PITR through the Databasus agent — a lightweight Go binary that runs alongside your PostgreSQL database. The agent continuously streams compressed WAL segments to Databasus and performs periodic physical backups via pg_basebackup. To restore, run the agent's restore command with a target timestamp — it downloads the full backup and WAL segments from Databasus, configures PostgreSQL recovery mode, and replays WAL to the exact target time. Suitable for disaster recovery with near-zero data loss, databases in closed networks and large databases where physical backups are faster than logical dumps.",
},
},
{
@@ -118,11 +118,13 @@ export default function FAQPage() {
- Why does Databasus not use raw SQL dump format for PostgreSQL?
+ Why does Databasus not use raw SQL dump format for logical
+ PostgreSQL backups?
- Databasus uses the pg_dump's{" "}
+ For logical backups, Databasus uses{" "}
+ pg_dump's{" "}
custom format with{" "}
zstd compression at level 5 instead of the
plain SQL format because it provides the most efficient balance
@@ -170,53 +172,95 @@ export default function FAQPage() {
directory.
-
- Why doesn't Databasus support PITR (Point-in-Time
- Recovery)?
+
+ How does Databasus support PITR (Point-in-Time Recovery)?
- Databasus intentionally focuses on logical backups rather than
- PITR for several practical reasons:
+ Databasus supports PITR through the{" "}
+ Databasus agent — a lightweight Go binary that
+ runs alongside your PostgreSQL database. The agent connects
+ outbound to your Databasus instance, so the database never
+ needs to be exposed publicly.
-
- -
- Complex setup requirements — PITR tools
- typically need to be installed on the same server as your
- database, requiring direct filesystem access and careful
- configuration
-
- -
- Restoration limitations — incremental backups
- cannot be restored without direct access to the database
- storage drive
-
- -
- Cloud incompatibility — managed cloud
- databases (AWS RDS, Google Cloud SQL, Azure) don't allow
- restoring external PITR backups, making them useless for
- cloud-hosted PostgreSQL
-
- -
- Built-in cloud PITR — cloud providers already
- offer native PITR capabilities and even they typically default
- to hourly or daily granularity
-
- -
- Practical sufficiency — for 99% of projects,
- hourly or daily logical backups provide adequate recovery
- points without the operational complexity of WAL archiving
-
-
-
- So instead of second-by-second restoration complexity, Databasus
- prioritizes an intuitive UX for individuals and teams, making it
- the most reliable tool for managing multiple databases and day
- to day use.
+ How backups work:
+
+ -
+ The agent runs two concurrent processes:{" "}
+ WAL streaming and{" "}
+ periodic physical backups
+
+ -
+ WAL segments are compressed with zstd and continuously
+ uploaded to Databasus with gap detection to ensure chain
+ integrity
+
+ -
+ Physical backups are created via{" "}
+
pg_basebackup, streamed as compressed TAR
+ directly to Databasus — no intermediate files on disk
+
+ -
+ Full backups are triggered on schedule or automatically when
+ the WAL chain is broken
+
+
+
+
+ How restoration works:
+
+
+
+ -
+ Run{" "}
+
+ databasus-agent restore --target-dir <pgdata>
+ --target-time <timestamp>
+
+
+ -
+ The agent downloads the full backup and all required WAL
+ segments from Databasus
+
+ -
+ It extracts the basebackup, configures PostgreSQL recovery
+ mode (
recovery.signal,{" "}
+ restore_command,{" "}
+ recovery_target_time)
+
+ -
+ Start PostgreSQL — it replays WAL to the target time,
+ promotes to primary and resumes normal operations
+
+
+
+
+ Suitable for:
+
+
+
+ -
+ Disaster recovery with near-zero data loss — restore to any
+ second between backups
+
+ -
+ Self-hosted and on-premise databases where hourly or daily
+ logical backups are not granular enough
+
+ -
+ Databases in closed networks — the agent connects outbound
+ to Databasus, so no inbound access is needed
+
+ -
+ Large databases where physical backups are faster than
+ logical dumps
+
+
+
How is AI used in Databasus development?
diff --git a/app/installation/agent/page.tsx b/app/installation/agent/page.tsx
new file mode 100644
index 0000000..d700187
--- /dev/null
+++ b/app/installation/agent/page.tsx
@@ -0,0 +1,688 @@
+import type { Metadata } from "next";
+import { CopyButton } from "../../components/CopyButton";
+import DocsNavbarComponent from "../../components/DocsNavbarComponent";
+import DocsSidebarComponent from "../../components/DocsSidebarComponent";
+import DocTableOfContentComponent from "../../components/DocTableOfContentComponent";
+
+export const metadata: Metadata = {
+ title: "Agent Installation - Databasus Documentation",
+ description:
+ "Install the Databasus agent for physical backups, incremental backups, WAL archiving and Point-in-Time Recovery (PITR) of PostgreSQL databases.",
+ keywords: [
+ "Databasus agent",
+ "PostgreSQL physical backup",
+ "WAL archiving",
+ "PITR",
+ "Point-in-Time Recovery",
+ "pg_basebackup",
+ "incremental backup",
+ "disaster recovery",
+ "PostgreSQL agent",
+ "database backup agent",
+ ],
+ openGraph: {
+ title: "Agent Installation - Databasus Documentation",
+ description:
+ "Install the Databasus agent for physical backups, incremental backups, WAL archiving and Point-in-Time Recovery (PITR) of PostgreSQL databases.",
+ type: "article",
+ url: "https://databasus.com/installation/agent",
+ },
+ twitter: {
+ card: "summary",
+ title: "Agent Installation - Databasus Documentation",
+ description:
+ "Install the Databasus agent for physical backups, incremental backups, WAL archiving and Point-in-Time Recovery (PITR) of PostgreSQL databases.",
+ },
+ alternates: {
+ canonical: "https://databasus.com/installation/agent",
+ },
+ robots: "index, follow",
+};
+
+export default function AgentInstallationPage() {
+ const downloadCommand = `curl -L -o databasus-agent "/api/v1/system/agent?arch=" && chmod +x databasus-agent`;
+
+ const postgresqlConf = `wal_level = replica
+archive_mode = on
+archive_command = 'cp %p /%f.tmp && mv /%f.tmp /%f'`;
+
+ const postgresqlConfDocker = `wal_level = replica
+archive_mode = on
+archive_command = 'cp %p /wal-queue/%f.tmp && mv /wal-queue/%f.tmp /wal-queue/%f'`;
+
+ const pgHbaEntry = `host replication all 127.0.0.1/32 md5`;
+
+ const grantReplication = `ALTER ROLE WITH REPLICATION;`;
+
+ const createWalDir = `mkdir -p /opt/databasus/wal-queue`;
+
+ const walDirPermissions = `chown postgres:postgres /opt/databasus/wal-queue
+chmod 755 /opt/databasus/wal-queue`;
+
+ const dockerVolumeExample = `# In your docker run command:
+docker run ... -v /opt/databasus/wal-queue:/wal-queue ...
+
+# Or in docker-compose.yml:
+volumes:
+ - /opt/databasus/wal-queue:/wal-queue`;
+
+ const dockerWalDirPermissions = `# Inside the container (or via docker exec):
+chown postgres:postgres /wal-queue`;
+
+ const startCommandHost = `./databasus-agent start \\
+ --databasus-host= \\
+ --db-id= \\
+ --token= \\
+ --pg-host=localhost \\
+ --pg-port=5432 \\
+ --pg-user= \\
+ --pg-password= \\
+ --pg-type=host \\
+ --pg-wal-dir=/opt/databasus/wal-queue`;
+
+ const startCommandFolder = `./databasus-agent start \\
+ --databasus-host= \\
+ --db-id= \\
+ --token= \\
+ --pg-host=localhost \\
+ --pg-port=5432 \\
+ --pg-user= \\
+ --pg-password= \\
+ --pg-type=host \\
+ --pg-host-bin-dir= \\
+ --pg-wal-dir=/opt/databasus/wal-queue`;
+
+ const startCommandDocker = `./databasus-agent start \\
+ --databasus-host= \\
+ --db-id= \\
+ --token= \\
+ --pg-host=localhost \\
+ --pg-port=5432 \\
+ --pg-user= \\
+ --pg-password= \\
+ --pg-type=docker \\
+ --pg-docker-container-name= \\
+ --pg-wal-dir=/opt/databasus/wal-queue`;
+
+ const restoreCommand = `./databasus-agent restore \\
+ --databasus-host= \\
+ --db-id= \\
+ --token= \\
+ --backup-id= \\
+ --target-dir=`;
+
+ const restoreCommandDocker = `./databasus-agent restore \\
+ --databasus-host= \\
+ --db-id= \\
+ --token= \\
+ --backup-id= \\
+ --pg-type=docker \\
+ --target-dir=`;
+
+ const restoreCommandPitr = `./databasus-agent restore \\
+ --databasus-host= \\
+ --db-id= \\
+ --token= \\
+ --backup-id= \\
+ --target-dir= \\
+ --target-time=`;
+
+ const archiveCommandCleanup = `# In /postgresql.auto.conf, remove or comment out:
+# archive_mode = on
+# archive_command = '...'`;
+
+ return (
+ <>
+ {/* JSON-LD Structured Data */}
+
+
+
+
+
+ {/* Sidebar */}
+
+
+ {/* Main Content */}
+
+
+
+ Agent mode
+
+
+ The Databasus agent enables physical backups, incremental
+ backups, WAL archiving and Point-in-Time Recovery (PITR) for
+ PostgreSQL databases.
+
+
+ {/* When to use */}
+ When to use the agent
+
+
+ For most databases,{" "}
+ remote backups are the simplest option.
+ Databasus connects directly to the database over the network,
+ performs logical backups using pg_dump, and requires no
+ additional software on the database server. Remote backups work
+ with cloud-managed databases (RDS, Cloud SQL, Supabase) and
+ self-hosted instances alike.
+
+
+
+ The agent is designed for scenarios where remote backups are
+ not sufficient:
+
+
+
+ -
+ Disaster recovery with PITR — restore to any
+ second between backups with near-zero data loss
+
+ -
+ Physical backups — file-level copy of the
+ entire database cluster, faster backup and restore for large
+ datasets
+
+ -
+ Databases not exposed publicly — the agent
+ connects outbound to Databasus, so the database never needs a
+ public endpoint
+
+ -
+ Incremental backups — continuous WAL segment
+ archiving combined with periodic base backups
+
+
+
+ {/* In-app guided setup */}
+ In-app guided setup
+
+
+ Databasus provides interactive installation and restore
+ instructions directly in the UI. When you open the agent
+ settings for a database, all commands are pre-filled with your
+ specific values: architecture, database ID, agent token,
+ Databasus host, and PostgreSQL deployment type. You can copy
+ each command and run it on your server.
+
+
+
+ The documentation below covers the same steps for reference and
+ for users who prefer to follow a guide outside the UI.
+
+
+ {/* Requirements */}
+ Requirements
+
+
+ -
+ PostgreSQL 15 or newer
+
+ -
+ Linux (amd64 or arm64)
+
+ -
+ Network access from the agent to your
+ Databasus instance (outbound only — the database does not need
+ to be reachable from Databasus)
+
+
+
+ {/* Installation */}
+ Installation
+
+ Step 1 — Download the agent
+
+
+ Download the agent binary on the server where PostgreSQL runs.
+ Replace <DATABASUS_HOST> with your Databasus
+ instance URL and <ARCH> with{" "}
+ amd64 or arm64.
+
+
+
+
+ {downloadCommand}
+
+
+
+
+
+
+
+ Step 2 — Configure postgresql.conf
+
+
+
+ Add or update these settings in your{" "}
+ postgresql.conf, then{" "}
+ restart PostgreSQL.
+
+
+
+ For host installations (replace{" "}
+ <WAL_QUEUE_DIR> with the actual path, e.g.{" "}
+ /opt/databasus/wal-queue):
+
+
+
+
+ {postgresqlConf}
+
+
+
+
+
+
+
+ For Docker installations, the{" "}
+ archive_command path (/wal-queue) is
+ the path inside the container. It must match
+ the volume mount target — see Step 5.
+
+
+
+
+ {postgresqlConfDocker}
+
+
+
+
+
+
+ Step 3 — Configure pg_hba.conf
+
+
+ Add this line to pg_hba.conf. This is required for{" "}
+ pg_basebackup to take full backups — not for
+ streaming replication. Adjust the address and auth method as
+ needed, then reload PostgreSQL.
+
+
+
+
+ {pgHbaEntry}
+
+
+
+
+
+
+
+ Step 4 — Grant replication privilege
+
+
+
+ This is a PostgreSQL requirement for running{" "}
+ pg_basebackup — it does not set up a replica.
+
+
+
+
+ {grantReplication}
+
+
+
+
+
+
+
+ Step 5 — Create WAL queue directory
+
+
+
+ PostgreSQL places WAL archive files here for the agent to
+ upload.
+
+
+
+
+ {createWalDir}
+
+
+
+
+
+
+
+ Ensure the directory is writable by PostgreSQL and readable by
+ the agent:
+
+
+
+
+ {walDirPermissions}
+
+
+
+
+
+
+
+ For Docker installations, the WAL queue
+ directory must be a volume mount shared between the PostgreSQL
+ container and the host. The agent reads WAL files from the host
+ path, while PostgreSQL writes to the container path via{" "}
+ archive_command.
+
+
+
+
+ {dockerVolumeExample}
+
+
+
+
+
+
+
+ Ensure the directory inside the container is owned by the{" "}
+ postgres user:
+
+
+
+
+ {dockerWalDirPermissions}
+
+
+
+
+
+
+ Step 6 — Start the agent
+
+
+ Replace placeholders in <ANGLE_BRACKETS> with
+ your actual values.
+
+
+
+ System-wide PostgreSQL (pg_basebackup available
+ in PATH):
+
+
+
+
+ {startCommandHost}
+
+
+
+
+
+
+
+ PostgreSQL in a specific folder (e.g.{" "}
+ /usr/lib/postgresql/17/bin):
+
+
+
+
+ {startCommandFolder}
+
+
+
+
+
+
+
+ Docker (use the PostgreSQL port{" "}
+ inside the container, usually 5432, not the
+ host-mapped port):
+
+
+
+
+ {startCommandDocker}
+
+
+
+
+
+
+ After installation
+
+
+ -
+ The agent runs in the background after{" "}
+
start
+
+ -
+ Check status:
./databasus-agent status
+
+ -
+ View logs:
databasus.log in the working directory
+
+ -
+ Stop the agent:
./databasus-agent stop
+
+
+
+ {/* Restore */}
+ Restore from agent backup
+
+
+ Restore a physical or incremental backup to a target directory.
+ For Point-in-Time Recovery, add the{" "}
+ --target-time flag to restore to a specific moment.
+
+
+ Step 1 — Download the agent
+
+
+ Download the agent binary on the server where you want to
+ restore (same command as installation Step 1).
+
+
+
+
+ {downloadCommand}
+
+
+
+
+
+
+ Step 2 — Stop PostgreSQL
+
+
+ PostgreSQL must be stopped before restoring. The target directory
+ must be empty.
+
+
+
+
+ pg_ctl -D <PGDATA_DIR> stop
+
+
+
+ For Docker:
+
+
+
+ docker stop <CONTAINER_NAME>
+
+
+
+ Step 3 — Run restore
+
+
+ Replace <YOUR_AGENT_TOKEN> with your agent
+ token and <PGDATA_DIR> with the path to an
+ empty PostgreSQL data directory.
+
+
+
+ Host installation:
+
+
+
+
+ {restoreCommand}
+
+
+
+
+
+
+
+ Docker installation (
+ <HOST_PGDATA_PATH> is the path on the host
+ that will be mounted as the container's pgdata volume):
+
+
+
+
+ {restoreCommandDocker}
+
+
+
+
+
+
+
+ For Point-in-Time Recovery (PITR), add{" "}
+ --target-time with an RFC 3339 timestamp (e.g.{" "}
+ 2025-01-15T14:30:00Z):
+
+
+
+
+ {restoreCommandPitr}
+
+
+
+
+
+
+
+ Step 4 — Handle archive_command
+
+
+
+ The restored backup includes the original{" "}
+ archive_command configuration. PostgreSQL will fail
+ to archive WAL files after recovery unless you either:
+
+
+
+ -
+ Re-attach the agent — mount the WAL queue
+ directory and start the Databasus agent on the restored
+ instance, same as the original setup.
+
+ -
+ Disable archiving — if you don't need
+ continuous backups yet, comment out or reset the archive
+ settings in
postgresql.auto.conf:
+
+
+
+
+
+ {archiveCommandCleanup}
+
+
+
+
+
+
+ Step 5 — Start PostgreSQL
+
+
+ Start PostgreSQL to begin WAL recovery. It will automatically
+ replay WAL segments.
+
+
+
+
+ pg_ctl -D <PGDATA_DIR> start
+
+
+
+ For Docker:
+
+
+
+ docker start <CONTAINER_NAME>
+
+
+
+ Step 6 — Clean up
+
+
+ After recovery completes, remove the WAL restore directory:
+
+
+
+
+ rm -rf <PGDATA_DIR>/databasus-wal-restore/
+
+
+
+ {/* How it works */}
+ How it works
+
+
+ The Databasus agent is a lightweight Go binary that runs two
+ concurrent processes:
+
+
+
+ -
+ WAL streaming — picks up WAL segment files
+ from the queue directory approximately every 10 seconds and
+ uploads them to Databasus
+
+ -
+ Periodic base backups — runs{" "}
+
pg_basebackup on the configured schedule to
+ create full physical backups of the database cluster
+
+
+
+
+ During restoration, the agent downloads the base backup and all
+ relevant WAL segments, then configures{" "}
+ recovery.signal and restore_command in{" "}
+ postgresql.auto.conf. When PostgreSQL starts, it
+ replays the WAL segments to reach the target recovery point.
+
+
+
+ The agent always initiates the connection to Databasus
+ (outbound). The database server does not need to accept incoming
+ connections from Databasus, making it suitable for private
+ networks and firewalled environments.
+
+
+
+
+
+ {/* Table of Contents */}
+
+
+ >
+ );
+}
diff --git a/app/installation/page.tsx b/app/installation/page.tsx
index 9b90962..720c499 100644
--- a/app/installation/page.tsx
+++ b/app/installation/page.tsx
@@ -460,6 +460,37 @@ sudo curl -sSL https://raw.githubusercontent.com/databasus/databasus/refs/heads/
server's IP address before starting the services.
+
+ Agent mode (physical backups & PITR)
+
+
+
+ The installation above sets up the Databasus server, which
+ handles remote (logical) backups out of the box. For most
+ databases, remote backups are the simplest option — no
+ additional software is required on the database server.
+
+
+
+ For physical backups, incremental backups and Point-in-Time
+ Recovery (PITR), Databasus uses a lightweight agent that runs
+ alongside your PostgreSQL server. The agent is designed for
+ advanced scenarios: disaster recovery with near-zero data loss,
+ databases that cannot be exposed publicly, or large datasets
+ where file-level backups are faster than logical dumps.
+
+
+
+ See the{" "}
+
+ Agent installation guide
+ {" "}
+ for setup instructions.
+
+
Getting started
After installation:
diff --git a/app/page.tsx b/app/page.tsx
index cd0c496..6482c51 100644
--- a/app/page.tsx
+++ b/app/page.tsx
@@ -188,7 +188,7 @@ export default function Index() {
name: "How is Databasus different from PgBackRest, Barman or pg_dump?",
acceptedAnswer: {
"@type": "Answer",
- text: "Databasus provides a modern, user-friendly web interface instead of complex configuration files and command-line tools. While PgBackRest and Barman require extensive configuration and command-line expertise, Databasus offers intuitive point-and-click setup. Unlike raw pg_dump scripts, it includes built-in scheduling, compression, multiple storage destinations, health monitoring and real-time notifications — all managed through a simple web UI.",
+ text: "Databasus prefers simplicity — it provides a modern web interface to manage backups for many databases at once, with built-in scheduling, compression, multiple storage destinations, health monitoring and real-time notifications. At the same time, Databasus also works in agent mode for disaster recovery with WAL archiving and Point-in-Time Recovery. The agent connects from closed networks to the Databasus instance and streams backups, so databases that are not publicly exposed can still be backed up and managed from a single dashboard.",
},
},
{
@@ -207,6 +207,14 @@ export default function Index() {
text: "Yes! You can restore backups directly from storage (like S3, Google Drive, etc.) without Databasus itself. There is no vendor lock-in, even on this open source tool. With just your secret.key file, you can decrypt and restore any backup manually using standard database tools. This means if your Databasus instance is unavailable or you lose access to it, your backups remain fully recoverable.",
},
},
+ {
+ "@type": "Question",
+ name: "What backup types does Databasus support?",
+ acceptedAnswer: {
+ "@type": "Answer",
+ text: "Databasus supports logical, physical, WAL archiving and Point-in-Time Recovery (PITR). In remote mode, Databasus connects to the database over the network and performs logical backups — no agent needed. In agent mode, a lightweight Go agent runs alongside the database and connects to the Databasus instance, enabling physical backups with continuous WAL archiving and PITR for disaster recovery. Because the agent connects to Databasus, you can manage incremental backups for many databases from a single dashboard.",
+ },
+ },
],
}),
}}
@@ -800,7 +808,7 @@ export default function Index() {
- {/* Card 11: Suitable for clouds */}
+ {/* Card 11: Backup types and modes */}
11
@@ -808,16 +816,16 @@ export default function Index() {
- Suitable both for self-hosted and cloud databases
+ Logical, physical and incremental backups with PITR
- Databasus works seamlessly with cloud-hosted databases
- including AWS RDS, Google Cloud SQL, Azure Database. This is
- why Databasus does not support incremental backups: clouds
- already have them, but usually you cannot restore external
- PITR backup back to the cloud. Hourly and daily granularity
- are enough for 99% of even large enterprise projects
+ Databasus supports logical, physical and incremental backups
+ with Point-in-Time Recovery. This makes Databasus suitable for
+ disaster recovery with WAL archiving and PITR, and works
+ equally well with self-hosted and cloud databases — use remote
+ mode for cloud-managed or publicly accessible DBs, and agent
+ mode for closed networks and host-installed databases
@@ -1322,16 +1330,21 @@ export default function Index() {
question="How is Databasus different from PgBackRest, Barman or pg_dump? Where can I read comparisons?"
answer={
<>
- Unlike other tools, Databasus provides a way to manage many
- databases for teams in a modern, user-friendly web interface
- (instead of complex configuration files and command-line
- tools). While PgBackRest and Barman just backups single
- database, require extensive configuration and command-line
- expertise — Databasus offers way to setup full backup
- management for many databases. Unlike raw pg_dump scripts, it
- includes built-in scheduling, compression, multiple storage
- destinations, health monitoring and real-time notifications —
- all managed through a simple web UI.
+ Databasus prefers simplicity — it provides a modern web
+ interface to manage backups for many databases at once,
+ instead of complex configuration files and command-line tools.
+ Unlike raw pg_dump scripts, it includes built-in scheduling,
+ compression, multiple storage destinations, health monitoring
+ and real-time notifications — all managed through a simple web
+ UI.
+
+
+ At the same time, Databasus also works in agent mode — similar
+ to WAL-G or pgBackRest — for disaster recovery with WAL
+ archiving and Point-in-Time Recovery. The agent connects from
+ closed networks to the Databasus instance, so databases that
+ are not publicly exposed can still be backed up and managed
+ from a single dashboard.
We have detailed comparison pages for popular backup tools:{" "}
@@ -1415,7 +1428,7 @@ export default function Index() {
- Databasus intentionally focuses on logical backups rather than
- PITR for several practical reasons:
-
- -
- Complex setup requirements — PITR tools
- typically need to be installed on the same server as your
- database, requiring direct filesystem access and careful
- configuration. You can backup only single database
- (Databasus allows many)
-
- -
- Restoration limitations — incremental
- backups cannot be restored without direct access to the
- database storage drive
-
- -
- Cloud incompatibility — managed cloud
- databases (AWS RDS, Google Cloud SQL, Azure, Supabase)
- don't allow restoring external PITR backups, making
- them useless for cloud-hosted PostgreSQL
-
- -
- Built-in cloud PITR — cloud providers
- already offer native PITR capabilities, and even they
- typically default to hourly or daily granularity
-
- -
- Practical sufficiency — for 99% of
- projects, hourly or daily logical backups provide adequate
- recovery points without the operational complexity of WAL
- archiving
-
-
+ Databasus supports logical, physical, WAL archiving and
+ Point-in-Time Recovery (PITR) — so it suits both those who
+ want simple remote backups and those who need a solid disaster
+ recovery tool.
- So instead of second-by-second restoration complexity,
- Databasus prioritizes an intuitive UX for individuals and
- teams, making it the most reliable tool for managing multiple
- databases and day to day use
+
+ Remote mode — Databasus connects to the
+ database over the network and performs logical backups (like
+ pg_dump). No agent or additional software required. Ideal for
+ cloud-managed and publicly accessible databases.
+
+
+ Agent mode — a lightweight agent runs
+ alongside the database and connects to the Databasus instance.
+ This enables physical backups with continuous WAL archiving
+ and PITR — designed for disaster recovery and near-zero data
+ loss. The agent streams backups directly to Databasus, so the
+ database never needs to be exposed publicly.
+
+
+ Because the agent connects to the Databasus instance, you can
+ manage incremental backups for many databases from a single
+ dashboard — unlike standalone tools like WAL-G or pgBackRest
+ where each database is managed separately.
>
}
/>
diff --git a/app/pgdump-alternative/page.tsx b/app/pgdump-alternative/page.tsx
index 6d71ef8..1a1ece4 100644
--- a/app/pgdump-alternative/page.tsx
+++ b/app/pgdump-alternative/page.tsx
@@ -80,12 +80,14 @@ export default function PgDumpAlternativePage() {
pg_dump Alternative
- Databasus is a PostgreSQL backup tool built on top of{" "}
+ For logical backups, Databasus is built on top of{" "}
pg_dump. Rather than replacing pg_dump
, Databasus extends its capabilities with backups management, a
web interface, automated scheduling, cloud storage integration,
notifications, team collaboration features and built-in
- encryption.
+ encryption. Beyond logical backups, Databasus also supports
+ physical backups, incremental backups with WAL archiving and
+ Point-in-Time Recovery.
Quick comparison
@@ -172,6 +174,26 @@ export default function PgDumpAlternativePage() {
❌ None |
✅ Built-in health checks |
|
+
+ These features are available through the Databasus agent — a
+ lightweight binary that runs alongside the database and connects
+ to the Databasus instance. The database never needs to be
+ exposed publicly, making it suitable for closed networks and
+ self-hosted databases.
+
+