diff --git a/.dockerignore b/.dockerignore index 3a15897..a2a9da0 100644 --- a/.dockerignore +++ b/.dockerignore @@ -12,6 +12,7 @@ backend/tools backend/mysqldata backend/pgdata backend/mariadbdata +backend/mongodbdata backend/temp backend/images backend/bin @@ -59,7 +60,10 @@ deploy LICENSE CITATION.cff *.md -assets + +# Assets - exclude SVGs but keep tools +assets/*.svg +assets/tools/download_postgresql.sh # Python cache **/__pycache__ diff --git a/.github/workflows/ci-release.yml b/.github/workflows/ci-release.yml index cd08bfa..e66563e 100644 --- a/.github/workflows/ci-release.yml +++ b/.github/workflows/ci-release.yml @@ -212,6 +212,14 @@ jobs: TEST_SUPABASE_USERNAME=${{ secrets.TEST_SUPABASE_USERNAME }} TEST_SUPABASE_PASSWORD=${{ secrets.TEST_SUPABASE_PASSWORD }} TEST_SUPABASE_DATABASE=${{ secrets.TEST_SUPABASE_DATABASE }} + # testing MongoDB + TEST_MONGODB_40_PORT=27040 + TEST_MONGODB_42_PORT=27042 + TEST_MONGODB_44_PORT=27044 + TEST_MONGODB_50_PORT=27050 + TEST_MONGODB_60_PORT=27060 + TEST_MONGODB_70_PORT=27070 + TEST_MONGODB_80_PORT=27080 EOF - name: Start test containers @@ -276,6 +284,22 @@ jobs: echo "Waiting for MariaDB 12.0..." timeout 120 bash -c 'until docker exec test-mariadb-120 healthcheck.sh --connect --innodb_initialized 2>/dev/null; do sleep 2; done' + # Wait for MongoDB containers + echo "Waiting for MongoDB 4.0..." + timeout 120 bash -c 'until docker exec test-mongodb-40 mongo --eval "db.adminCommand(\"ping\")" -u root -p rootpassword --authenticationDatabase admin 2>/dev/null; do sleep 2; done' + echo "Waiting for MongoDB 4.2..." + timeout 120 bash -c 'until docker exec test-mongodb-42 mongo --eval "db.adminCommand(\"ping\")" -u root -p rootpassword --authenticationDatabase admin 2>/dev/null; do sleep 2; done' + echo "Waiting for MongoDB 4.4..." + timeout 120 bash -c 'until docker exec test-mongodb-44 mongo --eval "db.adminCommand(\"ping\")" -u root -p rootpassword --authenticationDatabase admin 2>/dev/null; do sleep 2; done' + echo "Waiting for MongoDB 5.0..." + timeout 120 bash -c 'until docker exec test-mongodb-50 mongosh --eval "db.adminCommand(\"ping\")" -u root -p rootpassword --authenticationDatabase admin 2>/dev/null; do sleep 2; done' + echo "Waiting for MongoDB 6.0..." + timeout 120 bash -c 'until docker exec test-mongodb-60 mongosh --eval "db.adminCommand(\"ping\")" -u root -p rootpassword --authenticationDatabase admin 2>/dev/null; do sleep 2; done' + echo "Waiting for MongoDB 7.0..." + timeout 120 bash -c 'until docker exec test-mongodb-70 mongosh --eval "db.adminCommand(\"ping\")" -u root -p rootpassword --authenticationDatabase admin 2>/dev/null; do sleep 2; done' + echo "Waiting for MongoDB 8.0..." + timeout 120 bash -c 'until docker exec test-mongodb-80 mongosh --eval "db.adminCommand(\"ping\")" -u root -p rootpassword --authenticationDatabase admin 2>/dev/null; do sleep 2; done' + - name: Create data and temp directories run: | # Create directories that are used for backups and restore @@ -304,6 +328,13 @@ jobs: path: backend/tools/mariadb key: mariadb-clients-106-121-v1 + - name: Cache MongoDB Database Tools + id: cache-mongodb + uses: actions/cache@v4 + with: + path: backend/tools/mongodb + key: mongodb-database-tools-100.10.0-v1 + - name: Install MySQL dependencies run: | sudo apt-get update -qq @@ -311,8 +342,8 @@ jobs: sudo ln -sf /usr/lib/x86_64-linux-gnu/libncurses.so.6 /usr/lib/x86_64-linux-gnu/libncurses.so.5 sudo ln -sf /usr/lib/x86_64-linux-gnu/libtinfo.so.6 /usr/lib/x86_64-linux-gnu/libtinfo.so.5 - - name: Install PostgreSQL, MySQL and MariaDB client tools - if: steps.cache-postgres.outputs.cache-hit != 'true' || steps.cache-mysql.outputs.cache-hit != 'true' || steps.cache-mariadb.outputs.cache-hit != 'true' + - name: Install PostgreSQL, MySQL, MariaDB and MongoDB client tools + if: steps.cache-postgres.outputs.cache-hit != 'true' || steps.cache-mysql.outputs.cache-hit != 'true' || steps.cache-mariadb.outputs.cache-hit != 'true' || steps.cache-mongodb.outputs.cache-hit != 'true' run: | chmod +x backend/tools/download_linux.sh cd backend/tools @@ -354,6 +385,18 @@ jobs: echo "MariaDB 12.1 client tools NOT found" fi + - name: Verify MongoDB Database Tools exist + run: | + cd backend/tools + echo "Checking MongoDB Database Tools..." + if [ -f "mongodb/bin/mongodump" ]; then + echo "MongoDB Database Tools found" + ls -la mongodb/bin/ + mongodb/bin/mongodump --version || true + else + echo "MongoDB Database Tools NOT found" + fi + - name: Run database migrations run: | cd backend @@ -363,7 +406,7 @@ jobs: - name: Run Go tests run: | cd backend - go test -p=1 -count=1 -failfast ./internal/... + go test -p=1 -count=1 -failfast -timeout 10m ./internal/... - name: Stop test containers if: always() diff --git a/Dockerfile b/Dockerfile index 5ef78fa..ff3cb15 100644 --- a/Dockerfile +++ b/Dockerfile @@ -77,100 +77,108 @@ ENV APP_VERSION=$APP_VERSION # Set production mode for Docker containers ENV ENV_MODE=production -# Install PostgreSQL server and client tools (versions 12-18), MySQL client tools (5.7, 8.0, 8.4), MariaDB client tools, and rclone -# Note: MySQL 5.7 is only available for x86_64, MySQL 8.0+ supports both x86_64 and ARM64 -# Note: MySQL binaries require libncurses5 for terminal handling -# Note: MariaDB uses a single client version (12.1) that is backward compatible with all server versions +# ========= STEP 1: Install base packages ========= +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + wget ca-certificates gnupg lsb-release sudo gosu curl unzip xz-utils libncurses5 +RUN rm -rf /var/lib/apt/lists/* + +# ========= Install PostgreSQL client binaries (versions 12-18) ========= +# Pre-downloaded binaries from assets/tools/ - no network download needed ARG TARGETARCH -RUN apt-get update && apt-get install -y --no-install-recommends \ - wget ca-certificates gnupg lsb-release sudo gosu curl unzip xz-utils libncurses5 && \ - # Add PostgreSQL repository - wget -qO- https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - && \ +RUN mkdir -p /usr/lib/postgresql/12/bin /usr/lib/postgresql/13/bin \ + /usr/lib/postgresql/14/bin /usr/lib/postgresql/15/bin \ + /usr/lib/postgresql/16/bin /usr/lib/postgresql/17/bin \ + /usr/lib/postgresql/18/bin + +# Copy pre-downloaded PostgreSQL binaries based on architecture +COPY assets/tools/x64/postgresql/ /tmp/pg-x64/ +COPY assets/tools/arm/postgresql/ /tmp/pg-arm/ +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + cp -r /tmp/pg-x64/postgresql-12/bin/* /usr/lib/postgresql/12/bin/ && \ + cp -r /tmp/pg-x64/postgresql-13/bin/* /usr/lib/postgresql/13/bin/ && \ + cp -r /tmp/pg-x64/postgresql-14/bin/* /usr/lib/postgresql/14/bin/ && \ + cp -r /tmp/pg-x64/postgresql-15/bin/* /usr/lib/postgresql/15/bin/ && \ + cp -r /tmp/pg-x64/postgresql-16/bin/* /usr/lib/postgresql/16/bin/ && \ + cp -r /tmp/pg-x64/postgresql-17/bin/* /usr/lib/postgresql/17/bin/ && \ + cp -r /tmp/pg-x64/postgresql-18/bin/* /usr/lib/postgresql/18/bin/; \ + elif [ "$TARGETARCH" = "arm64" ]; then \ + cp -r /tmp/pg-arm/postgresql-12/bin/* /usr/lib/postgresql/12/bin/ && \ + cp -r /tmp/pg-arm/postgresql-13/bin/* /usr/lib/postgresql/13/bin/ && \ + cp -r /tmp/pg-arm/postgresql-14/bin/* /usr/lib/postgresql/14/bin/ && \ + cp -r /tmp/pg-arm/postgresql-15/bin/* /usr/lib/postgresql/15/bin/ && \ + cp -r /tmp/pg-arm/postgresql-16/bin/* /usr/lib/postgresql/16/bin/ && \ + cp -r /tmp/pg-arm/postgresql-17/bin/* /usr/lib/postgresql/17/bin/ && \ + cp -r /tmp/pg-arm/postgresql-18/bin/* /usr/lib/postgresql/18/bin/; \ + fi && \ + rm -rf /tmp/pg-x64 /tmp/pg-arm && \ + chmod +x /usr/lib/postgresql/*/bin/* + +# Install PostgreSQL 17 server (needed for internal database) +# Add PostgreSQL repository for server installation only +RUN wget -qO- https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - && \ echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" \ > /etc/apt/sources.list.d/pgdg.list && \ apt-get update && \ - # Install PostgreSQL - apt-get install -y --no-install-recommends \ - postgresql-17 postgresql-18 postgresql-client-12 postgresql-client-13 postgresql-client-14 postgresql-client-15 \ - postgresql-client-16 postgresql-client-17 postgresql-client-18 rclone && \ - # Create MySQL directories - mkdir -p /usr/local/mysql-5.7/bin /usr/local/mysql-8.0/bin /usr/local/mysql-8.4/bin && \ - # Download and install MySQL client tools (architecture-aware) - # MySQL 5.7: Only available for x86_64 - if [ "$TARGETARCH" = "amd64" ]; then \ - wget -q https://dev.mysql.com/get/Downloads/MySQL-5.7/mysql-5.7.44-linux-glibc2.12-x86_64.tar.gz -O /tmp/mysql57.tar.gz && \ - tar -xzf /tmp/mysql57.tar.gz -C /tmp && \ - cp /tmp/mysql-5.7.*/bin/mysql /usr/local/mysql-5.7/bin/ && \ - cp /tmp/mysql-5.7.*/bin/mysqldump /usr/local/mysql-5.7/bin/ && \ - rm -rf /tmp/mysql-5.7.* /tmp/mysql57.tar.gz; \ - else \ - echo "MySQL 5.7 not available for $TARGETARCH, skipping..."; \ - fi && \ - # MySQL 8.0: Available for both x86_64 and ARM64 - if [ "$TARGETARCH" = "amd64" ]; then \ - wget -q https://dev.mysql.com/get/Downloads/MySQL-8.0/mysql-8.0.40-linux-glibc2.17-x86_64-minimal.tar.xz -O /tmp/mysql80.tar.xz; \ - elif [ "$TARGETARCH" = "arm64" ]; then \ - wget -q https://dev.mysql.com/get/Downloads/MySQL-8.0/mysql-8.0.40-linux-glibc2.17-aarch64-minimal.tar.xz -O /tmp/mysql80.tar.xz; \ - fi && \ - tar -xJf /tmp/mysql80.tar.xz -C /tmp && \ - cp /tmp/mysql-8.0.*/bin/mysql /usr/local/mysql-8.0/bin/ && \ - cp /tmp/mysql-8.0.*/bin/mysqldump /usr/local/mysql-8.0/bin/ && \ - rm -rf /tmp/mysql-8.0.* /tmp/mysql80.tar.xz && \ - # MySQL 8.4: Available for both x86_64 and ARM64 - if [ "$TARGETARCH" = "amd64" ]; then \ - wget -q https://dev.mysql.com/get/Downloads/MySQL-8.4/mysql-8.4.3-linux-glibc2.17-x86_64-minimal.tar.xz -O /tmp/mysql84.tar.xz; \ - elif [ "$TARGETARCH" = "arm64" ]; then \ - wget -q https://dev.mysql.com/get/Downloads/MySQL-8.4/mysql-8.4.3-linux-glibc2.17-aarch64-minimal.tar.xz -O /tmp/mysql84.tar.xz; \ - fi && \ - tar -xJf /tmp/mysql84.tar.xz -C /tmp && \ - cp /tmp/mysql-8.4.*/bin/mysql /usr/local/mysql-8.4/bin/ && \ - cp /tmp/mysql-8.4.*/bin/mysqldump /usr/local/mysql-8.4/bin/ && \ - rm -rf /tmp/mysql-8.4.* /tmp/mysql84.tar.xz && \ - # Make MySQL binaries executable (ignore errors for empty dirs on ARM64) - chmod +x /usr/local/mysql-*/bin/* 2>/dev/null || true && \ - # Create MariaDB directories for both versions - # MariaDB uses two client versions: - # - 10.6 (legacy): For older servers (5.5, 10.1) that don't have generation_expression column - # - 12.1 (modern): For newer servers (10.2+) - mkdir -p /usr/local/mariadb-10.6/bin /usr/local/mariadb-12.1/bin && \ - # Download and install MariaDB 10.6 client tools (legacy - for older servers) - if [ "$TARGETARCH" = "amd64" ]; then \ - wget -q https://archive.mariadb.org/mariadb-10.6.21/bintar-linux-systemd-x86_64/mariadb-10.6.21-linux-systemd-x86_64.tar.gz -O /tmp/mariadb106.tar.gz && \ - tar -xzf /tmp/mariadb106.tar.gz -C /tmp && \ - cp /tmp/mariadb-10.6.*/bin/mariadb /usr/local/mariadb-10.6/bin/ && \ - cp /tmp/mariadb-10.6.*/bin/mariadb-dump /usr/local/mariadb-10.6/bin/ && \ - rm -rf /tmp/mariadb-10.6.* /tmp/mariadb106.tar.gz; \ - elif [ "$TARGETARCH" = "arm64" ]; then \ - # For ARM64, install MariaDB 10.6 client from official repository - curl -fsSL https://mariadb.org/mariadb_release_signing_key.asc | gpg --dearmor -o /usr/share/keyrings/mariadb-keyring.gpg && \ - echo "deb [signed-by=/usr/share/keyrings/mariadb-keyring.gpg] https://mirror.mariadb.org/repo/10.6/debian $(lsb_release -cs) main" > /etc/apt/sources.list.d/mariadb106.list && \ - apt-get update && \ - apt-get install -y --no-install-recommends mariadb-client && \ - cp /usr/bin/mariadb /usr/local/mariadb-10.6/bin/mariadb && \ - cp /usr/bin/mariadb-dump /usr/local/mariadb-10.6/bin/mariadb-dump && \ - apt-get remove -y mariadb-client && \ - rm /etc/apt/sources.list.d/mariadb106.list; \ - fi && \ - # Download and install MariaDB 12.1 client tools (modern - for newer servers) - if [ "$TARGETARCH" = "amd64" ]; then \ - wget -q https://archive.mariadb.org/mariadb-12.1.2/bintar-linux-systemd-x86_64/mariadb-12.1.2-linux-systemd-x86_64.tar.gz -O /tmp/mariadb121.tar.gz && \ - tar -xzf /tmp/mariadb121.tar.gz -C /tmp && \ - cp /tmp/mariadb-12.1.*/bin/mariadb /usr/local/mariadb-12.1/bin/ && \ - cp /tmp/mariadb-12.1.*/bin/mariadb-dump /usr/local/mariadb-12.1/bin/ && \ - rm -rf /tmp/mariadb-12.1.* /tmp/mariadb121.tar.gz; \ - elif [ "$TARGETARCH" = "arm64" ]; then \ - # For ARM64, install MariaDB 12.1 client from official repository - echo "deb [signed-by=/usr/share/keyrings/mariadb-keyring.gpg] https://mirror.mariadb.org/repo/12.1/debian $(lsb_release -cs) main" > /etc/apt/sources.list.d/mariadb121.list && \ - apt-get update && \ - apt-get install -y --no-install-recommends mariadb-client && \ - cp /usr/bin/mariadb /usr/local/mariadb-12.1/bin/mariadb && \ - cp /usr/bin/mariadb-dump /usr/local/mariadb-12.1/bin/mariadb-dump; \ - fi && \ - # Make MariaDB binaries executable - chmod +x /usr/local/mariadb-*/bin/* 2>/dev/null || true && \ - # Cleanup + apt-get install -y --no-install-recommends postgresql-17 && \ rm -rf /var/lib/apt/lists/* +# ========= Install rclone ========= +RUN apt-get update && \ + apt-get install -y --no-install-recommends rclone && \ + rm -rf /var/lib/apt/lists/* + +# Create directories for all database clients +RUN mkdir -p /usr/local/mysql-5.7/bin /usr/local/mysql-8.0/bin /usr/local/mysql-8.4/bin \ + /usr/local/mariadb-10.6/bin /usr/local/mariadb-12.1/bin \ + /usr/local/mongodb-database-tools/bin + +# ========= Install MySQL clients (5.7, 8.0, 8.4) ========= +# Pre-downloaded binaries from assets/tools/ - no network download needed +# Note: MySQL 5.7 is only available for x86_64 +# Note: MySQL binaries require libncurses5 for terminal handling +COPY assets/tools/x64/mysql/ /tmp/mysql-x64/ +COPY assets/tools/arm/mysql/ /tmp/mysql-arm/ +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + cp /tmp/mysql-x64/mysql-5.7/bin/* /usr/local/mysql-5.7/bin/ && \ + cp /tmp/mysql-x64/mysql-8.0/bin/* /usr/local/mysql-8.0/bin/ && \ + cp /tmp/mysql-x64/mysql-8.4/bin/* /usr/local/mysql-8.4/bin/; \ + elif [ "$TARGETARCH" = "arm64" ]; then \ + echo "MySQL 5.7 not available for arm64, skipping..." && \ + cp /tmp/mysql-arm/mysql-8.0/bin/* /usr/local/mysql-8.0/bin/ && \ + cp /tmp/mysql-arm/mysql-8.4/bin/* /usr/local/mysql-8.4/bin/; \ + fi && \ + rm -rf /tmp/mysql-x64 /tmp/mysql-arm && \ + chmod +x /usr/local/mysql-*/bin/* + +# ========= Install MariaDB clients (10.6, 12.1) ========= +# Pre-downloaded binaries from assets/tools/ - no network download needed +# 10.6 (legacy): For older servers (5.5, 10.1) that don't have generation_expression column +# 12.1 (modern): For newer servers (10.2+) +COPY assets/tools/x64/mariadb/ /tmp/mariadb-x64/ +COPY assets/tools/arm/mariadb/ /tmp/mariadb-arm/ +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + cp /tmp/mariadb-x64/mariadb-10.6/bin/* /usr/local/mariadb-10.6/bin/ && \ + cp /tmp/mariadb-x64/mariadb-12.1/bin/* /usr/local/mariadb-12.1/bin/; \ + elif [ "$TARGETARCH" = "arm64" ]; then \ + cp /tmp/mariadb-arm/mariadb-10.6/bin/* /usr/local/mariadb-10.6/bin/ && \ + cp /tmp/mariadb-arm/mariadb-12.1/bin/* /usr/local/mariadb-12.1/bin/; \ + fi && \ + rm -rf /tmp/mariadb-x64 /tmp/mariadb-arm && \ + chmod +x /usr/local/mariadb-*/bin/* + +# ========= Install MongoDB Database Tools ========= +# Note: MongoDB Database Tools are backward compatible - single version supports all server versions (4.0-8.0) +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + wget -q https://fastdl.mongodb.org/tools/db/mongodb-database-tools-debian12-x86_64-100.10.0.deb -O /tmp/mongodb-database-tools.deb; \ + elif [ "$TARGETARCH" = "arm64" ]; then \ + wget -q https://fastdl.mongodb.org/tools/db/mongodb-database-tools-debian12-aarch64-100.10.0.deb -O /tmp/mongodb-database-tools.deb; \ + fi && \ + dpkg -i /tmp/mongodb-database-tools.deb && \ + rm /tmp/mongodb-database-tools.deb && \ + ln -sf /usr/bin/mongodump /usr/local/mongodb-database-tools/bin/mongodump && \ + ln -sf /usr/bin/mongorestore /usr/local/mongodb-database-tools/bin/mongorestore + # Create postgres user and set up directories RUN useradd -m -s /bin/bash postgres || true && \ mkdir -p /postgresus-data/pgdata && \ diff --git a/assets/tools/arm/mariadb/mariadb-10.6/bin/mariadb b/assets/tools/arm/mariadb/mariadb-10.6/bin/mariadb new file mode 100644 index 0000000..eefb2bf Binary files /dev/null and b/assets/tools/arm/mariadb/mariadb-10.6/bin/mariadb differ diff --git a/assets/tools/arm/mariadb/mariadb-10.6/bin/mariadb-dump b/assets/tools/arm/mariadb/mariadb-10.6/bin/mariadb-dump new file mode 100644 index 0000000..6d8f4f6 Binary files /dev/null and b/assets/tools/arm/mariadb/mariadb-10.6/bin/mariadb-dump differ diff --git a/assets/tools/arm/mariadb/mariadb-12.1/bin/mariadb b/assets/tools/arm/mariadb/mariadb-12.1/bin/mariadb new file mode 100644 index 0000000..eef87e9 Binary files /dev/null and b/assets/tools/arm/mariadb/mariadb-12.1/bin/mariadb differ diff --git a/assets/tools/arm/mariadb/mariadb-12.1/bin/mariadb-dump b/assets/tools/arm/mariadb/mariadb-12.1/bin/mariadb-dump new file mode 100644 index 0000000..5d36ed5 Binary files /dev/null and b/assets/tools/arm/mariadb/mariadb-12.1/bin/mariadb-dump differ diff --git a/assets/tools/arm/mysql/mysql-8.0/bin/mysql b/assets/tools/arm/mysql/mysql-8.0/bin/mysql new file mode 100644 index 0000000..9b3ace0 Binary files /dev/null and b/assets/tools/arm/mysql/mysql-8.0/bin/mysql differ diff --git a/assets/tools/arm/mysql/mysql-8.0/bin/mysqldump b/assets/tools/arm/mysql/mysql-8.0/bin/mysqldump new file mode 100644 index 0000000..c14203b Binary files /dev/null and b/assets/tools/arm/mysql/mysql-8.0/bin/mysqldump differ diff --git a/assets/tools/arm/mysql/mysql-8.4/bin/mysql b/assets/tools/arm/mysql/mysql-8.4/bin/mysql new file mode 100644 index 0000000..ce6bdfd Binary files /dev/null and b/assets/tools/arm/mysql/mysql-8.4/bin/mysql differ diff --git a/assets/tools/arm/mysql/mysql-8.4/bin/mysqldump b/assets/tools/arm/mysql/mysql-8.4/bin/mysqldump new file mode 100644 index 0000000..f947a62 Binary files /dev/null and b/assets/tools/arm/mysql/mysql-8.4/bin/mysqldump differ diff --git a/assets/tools/arm/postgresql/postgresql-12/bin/createdb b/assets/tools/arm/postgresql/postgresql-12/bin/createdb new file mode 100644 index 0000000..808e14d Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-12/bin/createdb differ diff --git a/assets/tools/arm/postgresql/postgresql-12/bin/dropdb b/assets/tools/arm/postgresql/postgresql-12/bin/dropdb new file mode 100644 index 0000000..1e2fbb1 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-12/bin/dropdb differ diff --git a/assets/tools/arm/postgresql/postgresql-12/bin/pg_dump b/assets/tools/arm/postgresql/postgresql-12/bin/pg_dump new file mode 100644 index 0000000..c4951e2 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-12/bin/pg_dump differ diff --git a/assets/tools/arm/postgresql/postgresql-12/bin/pg_dumpall b/assets/tools/arm/postgresql/postgresql-12/bin/pg_dumpall new file mode 100644 index 0000000..ec37062 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-12/bin/pg_dumpall differ diff --git a/assets/tools/arm/postgresql/postgresql-12/bin/pg_restore b/assets/tools/arm/postgresql/postgresql-12/bin/pg_restore new file mode 100644 index 0000000..d792101 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-12/bin/pg_restore differ diff --git a/assets/tools/arm/postgresql/postgresql-12/bin/psql b/assets/tools/arm/postgresql/postgresql-12/bin/psql new file mode 100644 index 0000000..bbf28d4 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-12/bin/psql differ diff --git a/assets/tools/arm/postgresql/postgresql-13/bin/createdb b/assets/tools/arm/postgresql/postgresql-13/bin/createdb new file mode 100644 index 0000000..6c61fe5 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-13/bin/createdb differ diff --git a/assets/tools/arm/postgresql/postgresql-13/bin/dropdb b/assets/tools/arm/postgresql/postgresql-13/bin/dropdb new file mode 100644 index 0000000..f709466 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-13/bin/dropdb differ diff --git a/assets/tools/arm/postgresql/postgresql-13/bin/pg_dump b/assets/tools/arm/postgresql/postgresql-13/bin/pg_dump new file mode 100644 index 0000000..60118c7 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-13/bin/pg_dump differ diff --git a/assets/tools/arm/postgresql/postgresql-13/bin/pg_dumpall b/assets/tools/arm/postgresql/postgresql-13/bin/pg_dumpall new file mode 100644 index 0000000..fc6e49b Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-13/bin/pg_dumpall differ diff --git a/assets/tools/arm/postgresql/postgresql-13/bin/pg_restore b/assets/tools/arm/postgresql/postgresql-13/bin/pg_restore new file mode 100644 index 0000000..3b4fb8c Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-13/bin/pg_restore differ diff --git a/assets/tools/arm/postgresql/postgresql-13/bin/psql b/assets/tools/arm/postgresql/postgresql-13/bin/psql new file mode 100644 index 0000000..377be0a Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-13/bin/psql differ diff --git a/assets/tools/arm/postgresql/postgresql-14/bin/createdb b/assets/tools/arm/postgresql/postgresql-14/bin/createdb new file mode 100644 index 0000000..03a9e2b Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-14/bin/createdb differ diff --git a/assets/tools/arm/postgresql/postgresql-14/bin/dropdb b/assets/tools/arm/postgresql/postgresql-14/bin/dropdb new file mode 100644 index 0000000..2ff6d5c Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-14/bin/dropdb differ diff --git a/assets/tools/arm/postgresql/postgresql-14/bin/pg_dump b/assets/tools/arm/postgresql/postgresql-14/bin/pg_dump new file mode 100644 index 0000000..7c6bcad Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-14/bin/pg_dump differ diff --git a/assets/tools/arm/postgresql/postgresql-14/bin/pg_dumpall b/assets/tools/arm/postgresql/postgresql-14/bin/pg_dumpall new file mode 100644 index 0000000..9514847 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-14/bin/pg_dumpall differ diff --git a/assets/tools/arm/postgresql/postgresql-14/bin/pg_restore b/assets/tools/arm/postgresql/postgresql-14/bin/pg_restore new file mode 100644 index 0000000..6df17ae Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-14/bin/pg_restore differ diff --git a/assets/tools/arm/postgresql/postgresql-14/bin/psql b/assets/tools/arm/postgresql/postgresql-14/bin/psql new file mode 100644 index 0000000..8616815 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-14/bin/psql differ diff --git a/assets/tools/arm/postgresql/postgresql-15/bin/createdb b/assets/tools/arm/postgresql/postgresql-15/bin/createdb new file mode 100644 index 0000000..9f37fd9 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-15/bin/createdb differ diff --git a/assets/tools/arm/postgresql/postgresql-15/bin/dropdb b/assets/tools/arm/postgresql/postgresql-15/bin/dropdb new file mode 100644 index 0000000..9ebc3e9 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-15/bin/dropdb differ diff --git a/assets/tools/arm/postgresql/postgresql-15/bin/pg_dump b/assets/tools/arm/postgresql/postgresql-15/bin/pg_dump new file mode 100644 index 0000000..8eecace Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-15/bin/pg_dump differ diff --git a/assets/tools/arm/postgresql/postgresql-15/bin/pg_dumpall b/assets/tools/arm/postgresql/postgresql-15/bin/pg_dumpall new file mode 100644 index 0000000..8035530 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-15/bin/pg_dumpall differ diff --git a/assets/tools/arm/postgresql/postgresql-15/bin/pg_restore b/assets/tools/arm/postgresql/postgresql-15/bin/pg_restore new file mode 100644 index 0000000..6ad5e4b Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-15/bin/pg_restore differ diff --git a/assets/tools/arm/postgresql/postgresql-15/bin/psql b/assets/tools/arm/postgresql/postgresql-15/bin/psql new file mode 100644 index 0000000..ce11939 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-15/bin/psql differ diff --git a/assets/tools/arm/postgresql/postgresql-16/bin/createdb b/assets/tools/arm/postgresql/postgresql-16/bin/createdb new file mode 100644 index 0000000..96b07b8 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-16/bin/createdb differ diff --git a/assets/tools/arm/postgresql/postgresql-16/bin/dropdb b/assets/tools/arm/postgresql/postgresql-16/bin/dropdb new file mode 100644 index 0000000..98d8859 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-16/bin/dropdb differ diff --git a/assets/tools/arm/postgresql/postgresql-16/bin/pg_dump b/assets/tools/arm/postgresql/postgresql-16/bin/pg_dump new file mode 100644 index 0000000..88ebd97 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-16/bin/pg_dump differ diff --git a/assets/tools/arm/postgresql/postgresql-16/bin/pg_dumpall b/assets/tools/arm/postgresql/postgresql-16/bin/pg_dumpall new file mode 100644 index 0000000..5c93c2d Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-16/bin/pg_dumpall differ diff --git a/assets/tools/arm/postgresql/postgresql-16/bin/pg_restore b/assets/tools/arm/postgresql/postgresql-16/bin/pg_restore new file mode 100644 index 0000000..eee24bb Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-16/bin/pg_restore differ diff --git a/assets/tools/arm/postgresql/postgresql-16/bin/psql b/assets/tools/arm/postgresql/postgresql-16/bin/psql new file mode 100644 index 0000000..e96c287 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-16/bin/psql differ diff --git a/assets/tools/arm/postgresql/postgresql-17/bin/createdb b/assets/tools/arm/postgresql/postgresql-17/bin/createdb new file mode 100644 index 0000000..322622a Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-17/bin/createdb differ diff --git a/assets/tools/arm/postgresql/postgresql-17/bin/dropdb b/assets/tools/arm/postgresql/postgresql-17/bin/dropdb new file mode 100644 index 0000000..66a6aeb Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-17/bin/dropdb differ diff --git a/assets/tools/arm/postgresql/postgresql-17/bin/pg_dump b/assets/tools/arm/postgresql/postgresql-17/bin/pg_dump new file mode 100644 index 0000000..2eb8f0e Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-17/bin/pg_dump differ diff --git a/assets/tools/arm/postgresql/postgresql-17/bin/pg_dumpall b/assets/tools/arm/postgresql/postgresql-17/bin/pg_dumpall new file mode 100644 index 0000000..3b6c850 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-17/bin/pg_dumpall differ diff --git a/assets/tools/arm/postgresql/postgresql-17/bin/pg_restore b/assets/tools/arm/postgresql/postgresql-17/bin/pg_restore new file mode 100644 index 0000000..87a71cc Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-17/bin/pg_restore differ diff --git a/assets/tools/arm/postgresql/postgresql-17/bin/psql b/assets/tools/arm/postgresql/postgresql-17/bin/psql new file mode 100644 index 0000000..d981a9c Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-17/bin/psql differ diff --git a/assets/tools/arm/postgresql/postgresql-18/bin/createdb b/assets/tools/arm/postgresql/postgresql-18/bin/createdb new file mode 100644 index 0000000..772ca3f Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-18/bin/createdb differ diff --git a/assets/tools/arm/postgresql/postgresql-18/bin/dropdb b/assets/tools/arm/postgresql/postgresql-18/bin/dropdb new file mode 100644 index 0000000..dca3913 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-18/bin/dropdb differ diff --git a/assets/tools/arm/postgresql/postgresql-18/bin/pg_dump b/assets/tools/arm/postgresql/postgresql-18/bin/pg_dump new file mode 100644 index 0000000..e770af7 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-18/bin/pg_dump differ diff --git a/assets/tools/arm/postgresql/postgresql-18/bin/pg_dumpall b/assets/tools/arm/postgresql/postgresql-18/bin/pg_dumpall new file mode 100644 index 0000000..9355b3a Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-18/bin/pg_dumpall differ diff --git a/assets/tools/arm/postgresql/postgresql-18/bin/pg_restore b/assets/tools/arm/postgresql/postgresql-18/bin/pg_restore new file mode 100644 index 0000000..d4a58fe Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-18/bin/pg_restore differ diff --git a/assets/tools/arm/postgresql/postgresql-18/bin/psql b/assets/tools/arm/postgresql/postgresql-18/bin/psql new file mode 100644 index 0000000..db4c8e2 Binary files /dev/null and b/assets/tools/arm/postgresql/postgresql-18/bin/psql differ diff --git a/assets/tools/x64/mariadb/mariadb-10.6/bin/mariadb b/assets/tools/x64/mariadb/mariadb-10.6/bin/mariadb new file mode 100644 index 0000000..b790a7c Binary files /dev/null and b/assets/tools/x64/mariadb/mariadb-10.6/bin/mariadb differ diff --git a/assets/tools/x64/mariadb/mariadb-10.6/bin/mariadb-dump b/assets/tools/x64/mariadb/mariadb-10.6/bin/mariadb-dump new file mode 100644 index 0000000..f73438d Binary files /dev/null and b/assets/tools/x64/mariadb/mariadb-10.6/bin/mariadb-dump differ diff --git a/assets/tools/x64/mariadb/mariadb-12.1/bin/mariadb b/assets/tools/x64/mariadb/mariadb-12.1/bin/mariadb new file mode 100644 index 0000000..1eeaf6c Binary files /dev/null and b/assets/tools/x64/mariadb/mariadb-12.1/bin/mariadb differ diff --git a/assets/tools/x64/mariadb/mariadb-12.1/bin/mariadb-dump b/assets/tools/x64/mariadb/mariadb-12.1/bin/mariadb-dump new file mode 100644 index 0000000..c815a22 Binary files /dev/null and b/assets/tools/x64/mariadb/mariadb-12.1/bin/mariadb-dump differ diff --git a/assets/tools/x64/mysql/mysql-5.7/bin/mysql b/assets/tools/x64/mysql/mysql-5.7/bin/mysql new file mode 100644 index 0000000..86a7185 Binary files /dev/null and b/assets/tools/x64/mysql/mysql-5.7/bin/mysql differ diff --git a/assets/tools/x64/mysql/mysql-5.7/bin/mysqldump b/assets/tools/x64/mysql/mysql-5.7/bin/mysqldump new file mode 100644 index 0000000..7a728b5 Binary files /dev/null and b/assets/tools/x64/mysql/mysql-5.7/bin/mysqldump differ diff --git a/assets/tools/x64/mysql/mysql-8.0/bin/mysql b/assets/tools/x64/mysql/mysql-8.0/bin/mysql new file mode 100644 index 0000000..57c5a77 Binary files /dev/null and b/assets/tools/x64/mysql/mysql-8.0/bin/mysql differ diff --git a/assets/tools/x64/mysql/mysql-8.0/bin/mysqldump b/assets/tools/x64/mysql/mysql-8.0/bin/mysqldump new file mode 100644 index 0000000..f193675 Binary files /dev/null and b/assets/tools/x64/mysql/mysql-8.0/bin/mysqldump differ diff --git a/assets/tools/x64/mysql/mysql-8.4/bin/mysql b/assets/tools/x64/mysql/mysql-8.4/bin/mysql new file mode 100644 index 0000000..b8d0bcd Binary files /dev/null and b/assets/tools/x64/mysql/mysql-8.4/bin/mysql differ diff --git a/assets/tools/x64/mysql/mysql-8.4/bin/mysqldump b/assets/tools/x64/mysql/mysql-8.4/bin/mysqldump new file mode 100644 index 0000000..211fb2d Binary files /dev/null and b/assets/tools/x64/mysql/mysql-8.4/bin/mysqldump differ diff --git a/assets/tools/x64/postgresql/postgresql-12/bin/createdb b/assets/tools/x64/postgresql/postgresql-12/bin/createdb new file mode 100644 index 0000000..1d08a34 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-12/bin/createdb differ diff --git a/assets/tools/x64/postgresql/postgresql-12/bin/dropdb b/assets/tools/x64/postgresql/postgresql-12/bin/dropdb new file mode 100644 index 0000000..01b68d0 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-12/bin/dropdb differ diff --git a/assets/tools/x64/postgresql/postgresql-12/bin/pg_dump b/assets/tools/x64/postgresql/postgresql-12/bin/pg_dump new file mode 100644 index 0000000..d2016e3 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-12/bin/pg_dump differ diff --git a/assets/tools/x64/postgresql/postgresql-12/bin/pg_dumpall b/assets/tools/x64/postgresql/postgresql-12/bin/pg_dumpall new file mode 100644 index 0000000..0404ab5 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-12/bin/pg_dumpall differ diff --git a/assets/tools/x64/postgresql/postgresql-12/bin/pg_restore b/assets/tools/x64/postgresql/postgresql-12/bin/pg_restore new file mode 100644 index 0000000..1084f53 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-12/bin/pg_restore differ diff --git a/assets/tools/x64/postgresql/postgresql-12/bin/psql b/assets/tools/x64/postgresql/postgresql-12/bin/psql new file mode 100644 index 0000000..42218f9 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-12/bin/psql differ diff --git a/assets/tools/x64/postgresql/postgresql-13/bin/createdb b/assets/tools/x64/postgresql/postgresql-13/bin/createdb new file mode 100644 index 0000000..c1a2c0f Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-13/bin/createdb differ diff --git a/assets/tools/x64/postgresql/postgresql-13/bin/dropdb b/assets/tools/x64/postgresql/postgresql-13/bin/dropdb new file mode 100644 index 0000000..7378aaa Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-13/bin/dropdb differ diff --git a/assets/tools/x64/postgresql/postgresql-13/bin/pg_dump b/assets/tools/x64/postgresql/postgresql-13/bin/pg_dump new file mode 100644 index 0000000..60690d5 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-13/bin/pg_dump differ diff --git a/assets/tools/x64/postgresql/postgresql-13/bin/pg_dumpall b/assets/tools/x64/postgresql/postgresql-13/bin/pg_dumpall new file mode 100644 index 0000000..8aecae1 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-13/bin/pg_dumpall differ diff --git a/assets/tools/x64/postgresql/postgresql-13/bin/pg_restore b/assets/tools/x64/postgresql/postgresql-13/bin/pg_restore new file mode 100644 index 0000000..f13321a Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-13/bin/pg_restore differ diff --git a/assets/tools/x64/postgresql/postgresql-13/bin/psql b/assets/tools/x64/postgresql/postgresql-13/bin/psql new file mode 100644 index 0000000..c96b260 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-13/bin/psql differ diff --git a/assets/tools/x64/postgresql/postgresql-14/bin/createdb b/assets/tools/x64/postgresql/postgresql-14/bin/createdb new file mode 100644 index 0000000..054e17d Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-14/bin/createdb differ diff --git a/assets/tools/x64/postgresql/postgresql-14/bin/dropdb b/assets/tools/x64/postgresql/postgresql-14/bin/dropdb new file mode 100644 index 0000000..c7b9fd4 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-14/bin/dropdb differ diff --git a/assets/tools/x64/postgresql/postgresql-14/bin/pg_dump b/assets/tools/x64/postgresql/postgresql-14/bin/pg_dump new file mode 100644 index 0000000..955b243 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-14/bin/pg_dump differ diff --git a/assets/tools/x64/postgresql/postgresql-14/bin/pg_dumpall b/assets/tools/x64/postgresql/postgresql-14/bin/pg_dumpall new file mode 100644 index 0000000..4f2f012 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-14/bin/pg_dumpall differ diff --git a/assets/tools/x64/postgresql/postgresql-14/bin/pg_restore b/assets/tools/x64/postgresql/postgresql-14/bin/pg_restore new file mode 100644 index 0000000..355c315 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-14/bin/pg_restore differ diff --git a/assets/tools/x64/postgresql/postgresql-14/bin/psql b/assets/tools/x64/postgresql/postgresql-14/bin/psql new file mode 100644 index 0000000..4819235 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-14/bin/psql differ diff --git a/assets/tools/x64/postgresql/postgresql-15/bin/createdb b/assets/tools/x64/postgresql/postgresql-15/bin/createdb new file mode 100644 index 0000000..8c6c32e Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-15/bin/createdb differ diff --git a/assets/tools/x64/postgresql/postgresql-15/bin/dropdb b/assets/tools/x64/postgresql/postgresql-15/bin/dropdb new file mode 100644 index 0000000..6c0764d Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-15/bin/dropdb differ diff --git a/assets/tools/x64/postgresql/postgresql-15/bin/pg_dump b/assets/tools/x64/postgresql/postgresql-15/bin/pg_dump new file mode 100644 index 0000000..8881dab Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-15/bin/pg_dump differ diff --git a/assets/tools/x64/postgresql/postgresql-15/bin/pg_dumpall b/assets/tools/x64/postgresql/postgresql-15/bin/pg_dumpall new file mode 100644 index 0000000..1508e48 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-15/bin/pg_dumpall differ diff --git a/assets/tools/x64/postgresql/postgresql-15/bin/pg_restore b/assets/tools/x64/postgresql/postgresql-15/bin/pg_restore new file mode 100644 index 0000000..cbd44ba Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-15/bin/pg_restore differ diff --git a/assets/tools/x64/postgresql/postgresql-15/bin/psql b/assets/tools/x64/postgresql/postgresql-15/bin/psql new file mode 100644 index 0000000..260408d Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-15/bin/psql differ diff --git a/assets/tools/x64/postgresql/postgresql-16/bin/createdb b/assets/tools/x64/postgresql/postgresql-16/bin/createdb new file mode 100644 index 0000000..77b6dda Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-16/bin/createdb differ diff --git a/assets/tools/x64/postgresql/postgresql-16/bin/dropdb b/assets/tools/x64/postgresql/postgresql-16/bin/dropdb new file mode 100644 index 0000000..bd99ab2 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-16/bin/dropdb differ diff --git a/assets/tools/x64/postgresql/postgresql-16/bin/pg_dump b/assets/tools/x64/postgresql/postgresql-16/bin/pg_dump new file mode 100644 index 0000000..8d20464 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-16/bin/pg_dump differ diff --git a/assets/tools/x64/postgresql/postgresql-16/bin/pg_dumpall b/assets/tools/x64/postgresql/postgresql-16/bin/pg_dumpall new file mode 100644 index 0000000..09d51c7 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-16/bin/pg_dumpall differ diff --git a/assets/tools/x64/postgresql/postgresql-16/bin/pg_restore b/assets/tools/x64/postgresql/postgresql-16/bin/pg_restore new file mode 100644 index 0000000..929e15a Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-16/bin/pg_restore differ diff --git a/assets/tools/x64/postgresql/postgresql-16/bin/psql b/assets/tools/x64/postgresql/postgresql-16/bin/psql new file mode 100644 index 0000000..3855edd Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-16/bin/psql differ diff --git a/assets/tools/x64/postgresql/postgresql-17/bin/createdb b/assets/tools/x64/postgresql/postgresql-17/bin/createdb new file mode 100644 index 0000000..94318b6 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-17/bin/createdb differ diff --git a/assets/tools/x64/postgresql/postgresql-17/bin/dropdb b/assets/tools/x64/postgresql/postgresql-17/bin/dropdb new file mode 100644 index 0000000..de931d2 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-17/bin/dropdb differ diff --git a/assets/tools/x64/postgresql/postgresql-17/bin/pg_dump b/assets/tools/x64/postgresql/postgresql-17/bin/pg_dump new file mode 100644 index 0000000..51a235d Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-17/bin/pg_dump differ diff --git a/assets/tools/x64/postgresql/postgresql-17/bin/pg_dumpall b/assets/tools/x64/postgresql/postgresql-17/bin/pg_dumpall new file mode 100644 index 0000000..de33c63 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-17/bin/pg_dumpall differ diff --git a/assets/tools/x64/postgresql/postgresql-17/bin/pg_restore b/assets/tools/x64/postgresql/postgresql-17/bin/pg_restore new file mode 100644 index 0000000..c7c75c3 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-17/bin/pg_restore differ diff --git a/assets/tools/x64/postgresql/postgresql-17/bin/psql b/assets/tools/x64/postgresql/postgresql-17/bin/psql new file mode 100644 index 0000000..01b099e Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-17/bin/psql differ diff --git a/assets/tools/x64/postgresql/postgresql-18/bin/createdb b/assets/tools/x64/postgresql/postgresql-18/bin/createdb new file mode 100644 index 0000000..aa81113 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-18/bin/createdb differ diff --git a/assets/tools/x64/postgresql/postgresql-18/bin/dropdb b/assets/tools/x64/postgresql/postgresql-18/bin/dropdb new file mode 100644 index 0000000..c5b1d31 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-18/bin/dropdb differ diff --git a/assets/tools/x64/postgresql/postgresql-18/bin/pg_dump b/assets/tools/x64/postgresql/postgresql-18/bin/pg_dump new file mode 100644 index 0000000..0fa75a3 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-18/bin/pg_dump differ diff --git a/assets/tools/x64/postgresql/postgresql-18/bin/pg_dumpall b/assets/tools/x64/postgresql/postgresql-18/bin/pg_dumpall new file mode 100644 index 0000000..f9d2b68 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-18/bin/pg_dumpall differ diff --git a/assets/tools/x64/postgresql/postgresql-18/bin/pg_restore b/assets/tools/x64/postgresql/postgresql-18/bin/pg_restore new file mode 100644 index 0000000..6e041e7 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-18/bin/pg_restore differ diff --git a/assets/tools/x64/postgresql/postgresql-18/bin/psql b/assets/tools/x64/postgresql/postgresql-18/bin/psql new file mode 100644 index 0000000..8460c00 Binary files /dev/null and b/assets/tools/x64/postgresql/postgresql-18/bin/psql differ diff --git a/backend/.env.development.example b/backend/.env.development.example index 3093a91..6b1fa64 100644 --- a/backend/.env.development.example +++ b/backend/.env.development.example @@ -59,4 +59,12 @@ TEST_MARIADB_106_PORT=33106 TEST_MARIADB_1011_PORT=33111 TEST_MARIADB_114_PORT=33114 TEST_MARIADB_118_PORT=33118 -TEST_MARIADB_120_PORT=33120 \ No newline at end of file +TEST_MARIADB_120_PORT=33120 +# testing MongoDB +TEST_MONGODB_40_PORT=27040 +TEST_MONGODB_42_PORT=27042 +TEST_MONGODB_44_PORT=27044 +TEST_MONGODB_50_PORT=27050 +TEST_MONGODB_60_PORT=27060 +TEST_MONGODB_70_PORT=27070 +TEST_MONGODB_80_PORT=27080 \ No newline at end of file diff --git a/backend/MARIADB_PLAN.MD b/backend/MARIADB_PLAN.MD new file mode 100644 index 0000000..7720bf9 --- /dev/null +++ b/backend/MARIADB_PLAN.MD @@ -0,0 +1,2154 @@ +# MariaDB Implementation Plan + +## Overview + +This document outlines the implementation plan for adding MariaDB backup and restore support to Postgresus. The implementation will follow the existing PostgreSQL and MySQL architecture patterns, with MariaDB as a separate database type. + +## Supported MariaDB Versions + +| Version | Status | EOL Date | Support | +| ----------------- | ------ | ------------- | ------- | +| **MariaDB 5.5** | EOL | April 2020 | ✅ Yes | +| **MariaDB 10.1** | EOL | October 2020 | ✅ Yes | +| **MariaDB 10.2** | EOL | May 2022 | ✅ Yes | +| **MariaDB 10.3** | EOL | May 2023 | ✅ Yes | +| **MariaDB 10.4** | EOL | June 2024 | ✅ Yes | +| **MariaDB 10.5** | EOL | June 2025 | ✅ Yes | +| **MariaDB 10.6** | LTS | July 2026 | ✅ Yes | +| **MariaDB 10.11** | LTS | February 2028 | ✅ Yes | +| **MariaDB 11.4** | LTS | May 2029 | ✅ Yes | +| **MariaDB 11.8** | LTS | June 2028 | ✅ Yes | +| **MariaDB 12.0** | Stable | TBD (~2028) | ✅ Yes | + +**Notes:** + +- MariaDB versions 8 and 9 don't exist. MariaDB versioning went from 5.5 → 10.0 to differentiate from MySQL. +- MariaDB 5.5 and 10.1-10.5 reached EOL but are still supported for legacy systems that cannot be upgraded. +- MariaDB 12.0 was declared stable in March 2025. LTS version 12.3 is expected in 2026. + +## Key Decisions + +| Aspect | Decision | +| ------------------ | -------------------------------------------------------------------------- | +| **Architecture** | Separate `DatabaseTypeMariadb` (not reusing MySQL) | +| **Client tools** | Single latest client (12.0) - backward compatible with all server versions | +| **Backup tool** | Native `mariadb-dump` | +| **Restore tool** | Native `mariadb` client | +| **Auto-detection** | Parse "MariaDB" from `SELECT VERSION()` | +| **Cross-restore** | No (MariaDB → MariaDB only) | +| **ARM64** | Full support (all versions) | + +--- + +## File Structure + +### New Files to Create + +``` +backend/internal/features/databases/databases/mariadb/ +├── model.go # MariadbDatabase struct and methods +├── readonly_user.go # Read-only user creation (MariaDB-specific) + +backend/internal/features/backups/backups/usecases/mariadb/ +├── create_backup_uc.go # MariaDB backup use case +├── di.go # Dependency injection + +backend/internal/features/restores/usecases/mariadb/ +├── restore_backup_uc.go # MariaDB restore use case +├── di.go # Dependency injection + +backend/internal/util/tools/ +├── mariadb.go # MariaDB executable helpers and version enums + +backend/internal/features/tests/ +├── mariadb_backup_restore_test.go # Integration tests + +backend/migrations/ +├── XXXXXX_add_mariadb_databases_table.up.sql +├── XXXXXX_add_mariadb_databases_table.down.sql +``` + +### Files to Modify + +``` +backend/internal/features/databases/ +├── enums.go # Add DatabaseTypeMariadb +├── model.go # Add Mariadb field, update methods +├── service.go # Handle MariaDB in CopyDatabase, IsUserReadOnly, CreateReadOnlyUser +├── repository.go # Preload MariaDB relation + +backend/internal/features/backups/backups/usecases/ +├── create_backup_uc.go # Add MariaDB case +├── di.go # Wire MariaDB use case + +backend/internal/features/restores/usecases/ +├── restore_backup_uc.go # Add MariaDB case +├── di.go # Wire MariaDB use case + +backend/internal/features/restores/ +├── service.go # Add MariaDB validation, version compatibility +├── dto.go # Add MariadbDatabase field to RestoreBackupRequest + +backend/internal/config/ +├── config.go # Add MariaDB test ports, verify MariaDB installation + +backend/tools/ +├── download_linux.sh # Add MariaDB client download +├── download_macos.sh # Add MariaDB client download +├── download_windows.bat # Add MariaDB client download +├── readme.md # Update with MariaDB instructions + +Dockerfile # Add MariaDB client installation +docker-compose.yml.example # Add MariaDB test containers +.github/workflows/ci-release.yml # Add MariaDB test setup +``` + +--- + +## Database Type Enum + +### `backend/internal/features/databases/enums.go` + +```go +const ( + DatabaseTypePostgres DatabaseType = "POSTGRES" + DatabaseTypeMysql DatabaseType = "MYSQL" + DatabaseTypeMariadb DatabaseType = "MARIADB" +) +``` + +--- + +## MariaDB Version Enums + +### `backend/internal/util/tools/mariadb.go` + +```go +package tools + +import ( + "fmt" + "log/slog" + "os" + "path/filepath" + "runtime" + + env_utils "postgresus-backend/internal/util/env" +) + +type MariadbVersion string + +const ( + MariadbVersion55 MariadbVersion = "5.5" + MariadbVersion101 MariadbVersion = "10.1" + MariadbVersion102 MariadbVersion = "10.2" + MariadbVersion103 MariadbVersion = "10.3" + MariadbVersion104 MariadbVersion = "10.4" + MariadbVersion105 MariadbVersion = "10.5" + MariadbVersion106 MariadbVersion = "10.6" + MariadbVersion1011 MariadbVersion = "10.11" + MariadbVersion114 MariadbVersion = "11.4" + MariadbVersion118 MariadbVersion = "11.8" + MariadbVersion120 MariadbVersion = "12.0" +) + +type MariadbExecutable string + +const ( + MariadbExecutableMariadbDump MariadbExecutable = "mariadb-dump" + MariadbExecutableMariadb MariadbExecutable = "mariadb" +) + +// GetMariadbExecutable returns the full path to a MariaDB executable. +// MariaDB uses a single client version (latest) that is backward compatible +// with all server versions. +func GetMariadbExecutable( + executable MariadbExecutable, + envMode env_utils.EnvMode, + mariadbInstallDir string, +) string { + basePath := getMariadbBasePath(envMode, mariadbInstallDir) + executableName := string(executable) + + if runtime.GOOS == "windows" { + executableName += ".exe" + } + + return filepath.Join(basePath, executableName) +} + +// VerifyMariadbInstallation verifies that MariaDB client tools are installed. +// Unlike MySQL/PostgreSQL, MariaDB uses a single client version that supports +// all server versions (backward compatible). +func VerifyMariadbInstallation( + logger *slog.Logger, + envMode env_utils.EnvMode, + mariadbInstallDir string, +) { + binDir := getMariadbBasePath(envMode, mariadbInstallDir) + + logger.Info( + "Verifying MariaDB installation", + "path", binDir, + ) + + if _, err := os.Stat(binDir); os.IsNotExist(err) { + if envMode == env_utils.EnvModeDevelopment { + logger.Warn( + "MariaDB bin directory not found. MariaDB support will be disabled. Read ./tools/readme.md for details", + "path", binDir, + ) + } else { + logger.Warn( + "MariaDB bin directory not found. MariaDB support will be disabled.", + "path", binDir, + ) + } + return + } + + requiredCommands := []MariadbExecutable{ + MariadbExecutableMariadbDump, + MariadbExecutableMariadb, + } + + for _, cmd := range requiredCommands { + cmdPath := GetMariadbExecutable(cmd, envMode, mariadbInstallDir) + + logger.Info( + "Checking for MariaDB command", + "command", cmd, + "path", cmdPath, + ) + + if _, err := os.Stat(cmdPath); os.IsNotExist(err) { + if envMode == env_utils.EnvModeDevelopment { + logger.Warn( + "MariaDB command not found. MariaDB support will be disabled. Read ./tools/readme.md for details", + "command", cmd, + "path", cmdPath, + ) + } else { + logger.Warn( + "MariaDB command not found. MariaDB support will be disabled.", + "command", cmd, + "path", cmdPath, + ) + } + continue + } + + logger.Info("MariaDB command found", "command", cmd) + } + + logger.Info("MariaDB client tools verification completed!") +} + +// IsMariadbBackupVersionHigherThanRestoreVersion checks if backup was made with +// a newer MariaDB version than the restore target +func IsMariadbBackupVersionHigherThanRestoreVersion( + backupVersion, restoreVersion MariadbVersion, +) bool { + versionOrder := map[MariadbVersion]int{ + MariadbVersion55: 1, + MariadbVersion101: 2, + MariadbVersion102: 3, + MariadbVersion103: 4, + MariadbVersion104: 5, + MariadbVersion105: 6, + MariadbVersion106: 7, + MariadbVersion1011: 8, + MariadbVersion114: 9, + MariadbVersion118: 10, + MariadbVersion120: 11, + } + return versionOrder[backupVersion] > versionOrder[restoreVersion] +} + +// GetMariadbVersionEnum converts a version string to MariadbVersion enum +func GetMariadbVersionEnum(version string) MariadbVersion { + switch version { + case "5.5": + return MariadbVersion55 + case "10.1": + return MariadbVersion101 + case "10.2": + return MariadbVersion102 + case "10.3": + return MariadbVersion103 + case "10.4": + return MariadbVersion104 + case "10.5": + return MariadbVersion105 + case "10.6": + return MariadbVersion106 + case "10.11": + return MariadbVersion1011 + case "11.4": + return MariadbVersion114 + case "11.8": + return MariadbVersion118 + case "12.0": + return MariadbVersion120 + default: + panic(fmt.Sprintf("invalid mariadb version: %s", version)) + } +} + +func getMariadbBasePath( + envMode env_utils.EnvMode, + mariadbInstallDir string, +) string { + if envMode == env_utils.EnvModeDevelopment { + return filepath.Join(mariadbInstallDir, "bin") + } + // Production: single client version in /usr/local/mariadb/bin + return "/usr/local/mariadb/bin" +} +``` + +--- + +## Model Definition + +### `backend/internal/features/databases/databases/mariadb/model.go` + +```go +package mariadb + +import ( + "context" + "database/sql" + "errors" + "fmt" + "log/slog" + "regexp" + "strings" + "time" + + "postgresus-backend/internal/util/encryption" + "postgresus-backend/internal/util/tools" + + _ "github.com/go-sql-driver/mysql" + "github.com/google/uuid" +) + +type MariadbDatabase struct { + ID uuid.UUID `json:"id" gorm:"primaryKey;type:uuid;default:gen_random_uuid()"` + DatabaseID *uuid.UUID `json:"databaseId" gorm:"type:uuid;column:database_id"` + + Version tools.MariadbVersion `json:"version" gorm:"type:text;not null"` + + Host string `json:"host" gorm:"type:text;not null"` + Port int `json:"port" gorm:"type:int;not null"` + Username string `json:"username" gorm:"type:text;not null"` + Password string `json:"password" gorm:"type:text;not null"` + Database *string `json:"database" gorm:"type:text"` + IsHttps bool `json:"isHttps" gorm:"type:boolean;default:false"` +} + +func (m *MariadbDatabase) TableName() string { + return "mariadb_databases" +} + +func (m *MariadbDatabase) Validate() error { + if m.Host == "" { + return errors.New("host is required") + } + if m.Port == 0 { + return errors.New("port is required") + } + if m.Username == "" { + return errors.New("username is required") + } + if m.Password == "" { + return errors.New("password is required") + } + return nil +} + +func (m *MariadbDatabase) TestConnection( + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) error { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + if m.Database == nil || *m.Database == "" { + return errors.New("database name is required for MariaDB backup") + } + + password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID) + if err != nil { + return fmt.Errorf("failed to decrypt password: %w", err) + } + + dsn := m.buildDSN(password, *m.Database) + + db, err := sql.Open("mysql", dsn) + if err != nil { + return fmt.Errorf("failed to connect to MariaDB database '%s': %w", *m.Database, err) + } + defer func() { + if closeErr := db.Close(); closeErr != nil { + logger.Error("Failed to close MariaDB connection", "error", closeErr) + } + }() + + db.SetConnMaxLifetime(15 * time.Second) + db.SetMaxOpenConns(1) + db.SetMaxIdleConns(1) + + if err := db.PingContext(ctx); err != nil { + return fmt.Errorf("failed to ping MariaDB database '%s': %w", *m.Database, err) + } + + detectedVersion, err := detectMariadbVersion(ctx, db) + if err != nil { + return err + } + m.Version = detectedVersion + + return nil +} + +func (m *MariadbDatabase) HideSensitiveData() { + if m == nil { + return + } + m.Password = "" +} + +func (m *MariadbDatabase) Update(incoming *MariadbDatabase) { + m.Version = incoming.Version + m.Host = incoming.Host + m.Port = incoming.Port + m.Username = incoming.Username + m.Database = incoming.Database + m.IsHttps = incoming.IsHttps + + if incoming.Password != "" { + m.Password = incoming.Password + } +} + +func (m *MariadbDatabase) EncryptSensitiveFields( + databaseID uuid.UUID, + encryptor encryption.FieldEncryptor, +) error { + if m.Password != "" { + encrypted, err := encryptor.Encrypt(databaseID, m.Password) + if err != nil { + return err + } + m.Password = encrypted + } + return nil +} + +func (m *MariadbDatabase) PopulateVersionIfEmpty( + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) error { + if m.Version != "" { + return nil + } + + if m.Database == nil || *m.Database == "" { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID) + if err != nil { + return fmt.Errorf("failed to decrypt password: %w", err) + } + + dsn := m.buildDSN(password, *m.Database) + + db, err := sql.Open("mysql", dsn) + if err != nil { + return fmt.Errorf("failed to connect to database: %w", err) + } + defer func() { + if closeErr := db.Close(); closeErr != nil { + logger.Error("Failed to close connection", "error", closeErr) + } + }() + + detectedVersion, err := detectMariadbVersion(ctx, db) + if err != nil { + return err + } + + m.Version = detectedVersion + return nil +} + +func (m *MariadbDatabase) IsUserReadOnly( + ctx context.Context, + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) (bool, error) { + password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID) + if err != nil { + return false, fmt.Errorf("failed to decrypt password: %w", err) + } + + dsn := m.buildDSN(password, *m.Database) + + db, err := sql.Open("mysql", dsn) + if err != nil { + return false, fmt.Errorf("failed to connect to database: %w", err) + } + defer func() { + if closeErr := db.Close(); closeErr != nil { + logger.Error("Failed to close connection", "error", closeErr) + } + }() + + rows, err := db.QueryContext(ctx, "SHOW GRANTS FOR CURRENT_USER()") + if err != nil { + return false, fmt.Errorf("failed to check grants: %w", err) + } + defer func() { _ = rows.Close() }() + + writePrivileges := []string{ + "INSERT", "UPDATE", "DELETE", "CREATE", "DROP", "ALTER", + "INDEX", "GRANT OPTION", "ALL PRIVILEGES", "SUPER", + } + + for rows.Next() { + var grant string + if err := rows.Scan(&grant); err != nil { + return false, fmt.Errorf("failed to scan grant: %w", err) + } + + for _, priv := range writePrivileges { + if regexp.MustCompile(`(?i)\b` + priv + `\b`).MatchString(grant) { + return false, nil + } + } + } + + if err := rows.Err(); err != nil { + return false, fmt.Errorf("error iterating grants: %w", err) + } + + return true, nil +} + +func (m *MariadbDatabase) CreateReadOnlyUser( + ctx context.Context, + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) (string, string, error) { + password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID) + if err != nil { + return "", "", fmt.Errorf("failed to decrypt password: %w", err) + } + + dsn := m.buildDSN(password, *m.Database) + + db, err := sql.Open("mysql", dsn) + if err != nil { + return "", "", fmt.Errorf("failed to connect to database: %w", err) + } + defer func() { + if closeErr := db.Close(); closeErr != nil { + logger.Error("Failed to close connection", "error", closeErr) + } + }() + + maxRetries := 3 + for attempt := range maxRetries { + newUsername := fmt.Sprintf("postgresus-%s", uuid.New().String()[:8]) + newPassword := uuid.New().String() + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return "", "", fmt.Errorf("failed to begin transaction: %w", err) + } + + success := false + defer func() { + if !success { + if rollbackErr := tx.Rollback(); rollbackErr != nil { + logger.Error("Failed to rollback transaction", "error", rollbackErr) + } + } + }() + + // MariaDB uses same syntax as MySQL for user creation + _, err = tx.ExecContext(ctx, fmt.Sprintf( + "CREATE USER '%s'@'%%' IDENTIFIED BY '%s'", + newUsername, + newPassword, + )) + if err != nil { + if attempt < maxRetries-1 { + continue + } + return "", "", fmt.Errorf("failed to create user: %w", err) + } + + // Grant SELECT, SHOW VIEW, LOCK TABLES for backup operations + // Also grant TRIGGER and EVENT for complete backups + _, err = tx.ExecContext(ctx, fmt.Sprintf( + "GRANT SELECT, SHOW VIEW, LOCK TABLES, TRIGGER, EVENT ON `%s`.* TO '%s'@'%%'", + *m.Database, + newUsername, + )) + if err != nil { + return "", "", fmt.Errorf("failed to grant database privileges: %w", err) + } + + // PROCESS privilege needed for --single-transaction + _, err = tx.ExecContext(ctx, fmt.Sprintf( + "GRANT PROCESS ON *.* TO '%s'@'%%'", + newUsername, + )) + if err != nil { + return "", "", fmt.Errorf("failed to grant PROCESS privilege: %w", err) + } + + _, err = tx.ExecContext(ctx, "FLUSH PRIVILEGES") + if err != nil { + return "", "", fmt.Errorf("failed to flush privileges: %w", err) + } + + if err := tx.Commit(); err != nil { + return "", "", fmt.Errorf("failed to commit transaction: %w", err) + } + + success = true + logger.Info( + "Read-only MariaDB user created successfully", + "username", newUsername, + ) + return newUsername, newPassword, nil + } + + return "", "", errors.New("failed to generate unique username after 3 attempts") +} + +func (m *MariadbDatabase) buildDSN(password string, database string) string { + tlsConfig := "false" + if m.IsHttps { + tlsConfig = "true" + } + + return fmt.Sprintf( + "%s:%s@tcp(%s:%d)/%s?parseTime=true&timeout=15s&tls=%s&charset=utf8mb4", + m.Username, + password, + m.Host, + m.Port, + database, + tlsConfig, + ) +} + +// detectMariadbVersion parses VERSION() output to detect MariaDB version +// MariaDB returns strings like "10.11.6-MariaDB" or "11.4.2-MariaDB-1:11.4.2+maria~ubu2204" +func detectMariadbVersion(ctx context.Context, db *sql.DB) (tools.MariadbVersion, error) { + var versionStr string + err := db.QueryRowContext(ctx, "SELECT VERSION()").Scan(&versionStr) + if err != nil { + return "", fmt.Errorf("failed to query MariaDB version: %w", err) + } + + // Check if this is actually MariaDB (not MySQL) + if !strings.Contains(strings.ToLower(versionStr), "mariadb") { + return "", fmt.Errorf("not a MariaDB server (version: %s). Use MySQL database type instead", versionStr) + } + + // Parse version number (e.g., "10.11.6-MariaDB" -> "10.11") + re := regexp.MustCompile(`^(\d+)\.(\d+)`) + matches := re.FindStringSubmatch(versionStr) + if len(matches) < 3 { + return "", fmt.Errorf("could not parse MariaDB version: %s", versionStr) + } + + major := matches[1] + minor := matches[2] + versionKey := fmt.Sprintf("%s.%s", major, minor) + + switch versionKey { + case "5.5": + return tools.MariadbVersion55, nil + case "10.1": + return tools.MariadbVersion101, nil + case "10.2": + return tools.MariadbVersion102, nil + case "10.3": + return tools.MariadbVersion103, nil + case "10.4": + return tools.MariadbVersion104, nil + case "10.5": + return tools.MariadbVersion105, nil + case "10.6": + return tools.MariadbVersion106, nil + case "10.11": + return tools.MariadbVersion1011, nil + case "11.4": + return tools.MariadbVersion114, nil + case "11.8": + return tools.MariadbVersion118, nil + case "12.0": + return tools.MariadbVersion120, nil + default: + return "", fmt.Errorf("unsupported MariaDB version: %s (supported: 5.5, 10.1-10.6, 10.11, 11.4, 11.8, 12.0)", versionKey) + } +} + +func decryptPasswordIfNeeded( + password string, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) (string, error) { + if encryptor == nil { + return password, nil + } + return encryptor.Decrypt(databaseID, password) +} +``` + +--- + +## Backup Implementation + +### `backend/internal/features/backups/backups/usecases/mariadb/create_backup_uc.go` + +```go +package usecases_mariadb + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "log/slog" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/google/uuid" + "github.com/klauspost/compress/zstd" + + "postgresus-backend/internal/config" + backup_encryption "postgresus-backend/internal/features/backups/backups/encryption" + usecases_common "postgresus-backend/internal/features/backups/backups/usecases/common" + backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/databases" + mariadbtypes "postgresus-backend/internal/features/databases/databases/mariadb" + encryption_secrets "postgresus-backend/internal/features/encryption/secrets" + "postgresus-backend/internal/features/storages" + "postgresus-backend/internal/util/encryption" + "postgresus-backend/internal/util/tools" +) + +const ( + backupTimeout = 23 * time.Hour + shutdownCheckInterval = 1 * time.Second + copyBufferSize = 8 * 1024 * 1024 + progressReportIntervalMB = 1.0 + zstdStorageCompressionLevel = 3 + exitCodeGenericError = 1 + exitCodeConnectionError = 2 +) + +type CreateMariadbBackupUsecase struct { + logger *slog.Logger + secretKeyService *encryption_secrets.SecretKeyService + fieldEncryptor encryption.FieldEncryptor +} + +type writeResult struct { + bytesWritten int + writeErr error +} + +func (uc *CreateMariadbBackupUsecase) Execute( + ctx context.Context, + backupID uuid.UUID, + backupConfig *backups_config.BackupConfig, + db *databases.Database, + storage *storages.Storage, + backupProgressListener func(completedMBs float64), +) (*usecases_common.BackupMetadata, error) { + uc.logger.Info( + "Creating MariaDB backup via mariadb-dump", + "databaseId", db.ID, + "storageId", storage.ID, + ) + + if !backupConfig.IsBackupsEnabled { + return nil, fmt.Errorf("backups are not enabled for this database: \"%s\"", db.Name) + } + + mdb := db.Mariadb + if mdb == nil { + return nil, fmt.Errorf("mariadb database configuration is required") + } + + if mdb.Database == nil || *mdb.Database == "" { + return nil, fmt.Errorf("database name is required for mariadb-dump backups") + } + + decryptedPassword, err := uc.fieldEncryptor.Decrypt(db.ID, mdb.Password) + if err != nil { + return nil, fmt.Errorf("failed to decrypt database password: %w", err) + } + + args := uc.buildMariadbDumpArgs(mdb) + + return uc.streamToStorage( + ctx, + backupID, + backupConfig, + tools.GetMariadbExecutable( + tools.MariadbExecutableMariadbDump, + config.GetEnv().EnvMode, + config.GetEnv().MariadbInstallDir, + ), + args, + decryptedPassword, + storage, + backupProgressListener, + mdb, + ) +} + +func (uc *CreateMariadbBackupUsecase) buildMariadbDumpArgs(mdb *mariadbtypes.MariadbDatabase) []string { + args := []string{ + "--host=" + mdb.Host, + "--port=" + strconv.Itoa(mdb.Port), + "--user=" + mdb.Username, + "--single-transaction", + "--routines", + "--triggers", + "--events", + "--quick", + "--verbose", + } + + // MariaDB supports zstd compression for network transfer (10.4.3+) + // Use it for all supported versions + args = append(args, "--compress") + + if mdb.IsHttps { + args = append(args, "--ssl") + } + + if mdb.Database != nil && *mdb.Database != "" { + args = append(args, *mdb.Database) + } + + return args +} + +func (uc *CreateMariadbBackupUsecase) streamToStorage( + parentCtx context.Context, + backupID uuid.UUID, + backupConfig *backups_config.BackupConfig, + mariadbBin string, + args []string, + password string, + storage *storages.Storage, + backupProgressListener func(completedMBs float64), + mdbConfig *mariadbtypes.MariadbDatabase, +) (*usecases_common.BackupMetadata, error) { + uc.logger.Info("Streaming MariaDB backup to storage", "mariadbBin", mariadbBin) + + ctx, cancel := uc.createBackupContext(parentCtx) + defer cancel() + + myCnfFile, err := uc.createTempMyCnfFile(mdbConfig, password) + if err != nil { + return nil, fmt.Errorf("failed to create .my.cnf: %w", err) + } + defer func() { _ = os.RemoveAll(filepath.Dir(myCnfFile)) }() + + fullArgs := append([]string{"--defaults-file=" + myCnfFile}, args...) + + cmd := exec.CommandContext(ctx, mariadbBin, fullArgs...) + uc.logger.Info("Executing MariaDB backup command", "command", cmd.String()) + + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, + "MYSQL_PWD=", + "LC_ALL=C.UTF-8", + "LANG=C.UTF-8", + ) + + pgStdout, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("stdout pipe: %w", err) + } + + pgStderr, err := cmd.StderrPipe() + if err != nil { + return nil, fmt.Errorf("stderr pipe: %w", err) + } + + stderrCh := make(chan []byte, 1) + go func() { + stderrOutput, _ := io.ReadAll(pgStderr) + stderrCh <- stderrOutput + }() + + storageReader, storageWriter := io.Pipe() + + finalWriter, encryptionWriter, backupMetadata, err := uc.setupBackupEncryption( + backupID, + backupConfig, + storageWriter, + ) + if err != nil { + return nil, err + } + + zstdWriter, err := zstd.NewWriter(finalWriter, + zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(zstdStorageCompressionLevel))) + if err != nil { + return nil, fmt.Errorf("failed to create zstd writer: %w", err) + } + countingWriter := usecases_common.NewCountingWriter(zstdWriter) + + saveErrCh := make(chan error, 1) + go func() { + saveErr := storage.SaveFile(ctx, uc.fieldEncryptor, uc.logger, backupID, storageReader) + saveErrCh <- saveErr + }() + + if err = cmd.Start(); err != nil { + return nil, fmt.Errorf("start %s: %w", filepath.Base(mariadbBin), err) + } + + copyResultCh := make(chan error, 1) + bytesWrittenCh := make(chan int64, 1) + go func() { + bytesWritten, err := uc.copyWithShutdownCheck( + ctx, + countingWriter, + pgStdout, + backupProgressListener, + ) + bytesWrittenCh <- bytesWritten + copyResultCh <- err + }() + + copyErr := <-copyResultCh + bytesWritten := <-bytesWrittenCh + waitErr := cmd.Wait() + + select { + case <-ctx.Done(): + uc.cleanupOnCancellation(zstdWriter, encryptionWriter, storageWriter, saveErrCh) + return nil, uc.checkCancellationReason() + default: + } + + if err := zstdWriter.Close(); err != nil { + uc.logger.Error("Failed to close zstd writer", "error", err) + } + if err := uc.closeWriters(encryptionWriter, storageWriter); err != nil { + <-saveErrCh + return nil, err + } + + saveErr := <-saveErrCh + stderrOutput := <-stderrCh + + if waitErr == nil && copyErr == nil && saveErr == nil && backupProgressListener != nil { + sizeMB := float64(bytesWritten) / (1024 * 1024) + backupProgressListener(sizeMB) + } + + switch { + case waitErr != nil: + return nil, uc.buildMariadbDumpErrorMessage(waitErr, stderrOutput, mariadbBin) + case copyErr != nil: + return nil, fmt.Errorf("copy to storage: %w", copyErr) + case saveErr != nil: + return nil, fmt.Errorf("save to storage: %w", saveErr) + } + + return &backupMetadata, nil +} + +func (uc *CreateMariadbBackupUsecase) createTempMyCnfFile( + mdbConfig *mariadbtypes.MariadbDatabase, + password string, +) (string, error) { + tempDir, err := os.MkdirTemp("", "mycnf") + if err != nil { + return "", fmt.Errorf("failed to create temp directory: %w", err) + } + + myCnfFile := filepath.Join(tempDir, ".my.cnf") + + // Escape password for .my.cnf format + escapedPassword := strings.ReplaceAll(password, "\\", "\\\\") + escapedPassword = strings.ReplaceAll(escapedPassword, "\"", "\\\"") + + content := fmt.Sprintf(`[client] +user=%s +password="%s" +host=%s +port=%d +`, mdbConfig.Username, escapedPassword, mdbConfig.Host, mdbConfig.Port) + + if mdbConfig.IsHttps { + content += "ssl=true\n" + } + + err = os.WriteFile(myCnfFile, []byte(content), 0600) + if err != nil { + return "", fmt.Errorf("failed to write .my.cnf: %w", err) + } + + return myCnfFile, nil +} + +// copyWithShutdownCheck, createBackupContext, setupBackupEncryption, +// cleanupOnCancellation, closeWriters, checkCancellationReason, +// buildMariadbDumpErrorMessage - same implementation as MySQL use case +// (copy from usecases_mysql package) +``` + +### `backend/internal/features/backups/backups/usecases/mariadb/di.go` + +```go +package usecases_mariadb + +import ( + "log/slog" + + encryption_secrets "postgresus-backend/internal/features/encryption/secrets" + "postgresus-backend/internal/util/encryption" + "postgresus-backend/internal/util/logger" +) + +var createMariadbBackupUsecase *CreateMariadbBackupUsecase + +func init() { + createMariadbBackupUsecase = &CreateMariadbBackupUsecase{ + logger: logger.GetLogger(), + secretKeyService: encryption_secrets.GetSecretKeyService(), + fieldEncryptor: encryption.GetFieldEncryptor(), + } +} + +func GetCreateMariadbBackupUsecase() *CreateMariadbBackupUsecase { + return createMariadbBackupUsecase +} +``` + +--- + +## Restore Implementation + +### `backend/internal/features/restores/usecases/mariadb/restore_backup_uc.go` + +```go +package usecases_mariadb + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "log/slog" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/google/uuid" + "github.com/klauspost/compress/zstd" + + "postgresus-backend/internal/config" + "postgresus-backend/internal/features/backups/backups" + "postgresus-backend/internal/features/backups/backups/encryption" + backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/databases" + mariadbtypes "postgresus-backend/internal/features/databases/databases/mariadb" + encryption_secrets "postgresus-backend/internal/features/encryption/secrets" + "postgresus-backend/internal/features/restores/models" + "postgresus-backend/internal/features/storages" + util_encryption "postgresus-backend/internal/util/encryption" + files_utils "postgresus-backend/internal/util/files" + "postgresus-backend/internal/util/tools" +) + +type RestoreMariadbBackupUsecase struct { + logger *slog.Logger + secretKeyService *encryption_secrets.SecretKeyService +} + +func (uc *RestoreMariadbBackupUsecase) Execute( + originalDB *databases.Database, + restoringToDB *databases.Database, + backupConfig *backups_config.BackupConfig, + restore models.Restore, + backup *backups.Backup, + storage *storages.Storage, +) error { + if originalDB.Type != databases.DatabaseTypeMariadb { + return errors.New("database type not supported") + } + + uc.logger.Info( + "Restoring MariaDB backup via mariadb client", + "restoreId", restore.ID, + "backupId", backup.ID, + ) + + mdb := restoringToDB.Mariadb + if mdb == nil { + return fmt.Errorf("mariadb configuration is required for restore") + } + + if mdb.Database == nil || *mdb.Database == "" { + return fmt.Errorf("target database name is required for mariadb restore") + } + + args := []string{ + "--host=" + mdb.Host, + "--port=" + strconv.Itoa(mdb.Port), + "--user=" + mdb.Username, + "--verbose", + } + + if mdb.IsHttps { + args = append(args, "--ssl") + } + + if mdb.Database != nil && *mdb.Database != "" { + args = append(args, *mdb.Database) + } + + return uc.restoreFromStorage( + originalDB, + tools.GetMariadbExecutable( + tools.MariadbExecutableMariadb, + config.GetEnv().EnvMode, + config.GetEnv().MariadbInstallDir, + ), + args, + mdb.Password, + backup, + storage, + mdb, + ) +} + +// restoreFromStorage, executeMysqlRestore, downloadBackupToTempFile, +// setupDecryption, createTempMyCnfFile, copyWithShutdownCheck, +// handleMariadbRestoreError - same pattern as MySQL restore use case +``` + +### `backend/internal/features/restores/usecases/mariadb/di.go` + +```go +package usecases_mariadb + +import ( + encryption_secrets "postgresus-backend/internal/features/encryption/secrets" + "postgresus-backend/internal/util/logger" +) + +var restoreMariadbBackupUsecase *RestoreMariadbBackupUsecase + +func init() { + restoreMariadbBackupUsecase = &RestoreMariadbBackupUsecase{ + logger: logger.GetLogger(), + secretKeyService: encryption_secrets.GetSecretKeyService(), + } +} + +func GetRestoreMariadbBackupUsecase() *RestoreMariadbBackupUsecase { + return restoreMariadbBackupUsecase +} +``` + +--- + +## Migration Script + +### `backend/migrations/XXXXXX_add_mariadb_databases_table.up.sql` + +```sql +CREATE TABLE mariadb_databases ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + database_id UUID REFERENCES databases(id) ON DELETE CASCADE, + version TEXT NOT NULL, + host TEXT NOT NULL, + port INT NOT NULL, + username TEXT NOT NULL, + password TEXT NOT NULL, + database TEXT, + is_https BOOLEAN NOT NULL DEFAULT FALSE +); + +CREATE INDEX idx_mariadb_databases_database_id ON mariadb_databases(database_id); +``` + +### `backend/migrations/XXXXXX_add_mariadb_databases_table.down.sql` + +```sql +DROP INDEX IF EXISTS idx_mariadb_databases_database_id; +DROP TABLE IF EXISTS mariadb_databases; +``` + +--- + +## Model Updates + +### `backend/internal/features/databases/model.go` changes + +Add to Database struct: + +```go +type Database struct { + // ... existing fields ... + + Postgresql *postgresql.PostgresqlDatabase `json:"postgresql,omitempty" gorm:"foreignKey:DatabaseID"` + Mysql *mysql.MysqlDatabase `json:"mysql,omitempty" gorm:"foreignKey:DatabaseID"` + Mariadb *mariadb.MariadbDatabase `json:"mariadb,omitempty" gorm:"foreignKey:DatabaseID"` + + // ... rest of fields ... +} +``` + +Update methods to handle MariaDB case: + +```go +func (d *Database) Validate() error { + // ... existing code ... + case DatabaseTypeMariadb: + if d.Mariadb == nil { + return errors.New("mariadb database is required") + } + return d.Mariadb.Validate() + // ... +} + +func (d *Database) TestConnection(...) error { + // ... add Mariadb case +} + +func (d *Database) EncryptSensitiveFields(...) error { + // ... add Mariadb case +} + +func (d *Database) PopulateVersionIfEmpty(...) error { + // ... add Mariadb case +} + +func (d *Database) Update(incoming *Database) { + // ... add Mariadb case +} + +func (d *Database) HideSensitiveData() { + // ... add Mariadb case +} +``` + +--- + +## Service Updates + +### `backend/internal/features/databases/service.go` - CopyDatabase + +Add MariaDB case: + +```go +case DatabaseTypeMariadb: + if existingDatabase.Mariadb != nil { + newDatabase.Mariadb = &mariadb.MariadbDatabase{ + ID: uuid.Nil, + DatabaseID: nil, + Version: existingDatabase.Mariadb.Version, + Host: existingDatabase.Mariadb.Host, + Port: existingDatabase.Mariadb.Port, + Username: existingDatabase.Mariadb.Username, + Password: existingDatabase.Mariadb.Password, + Database: existingDatabase.Mariadb.Database, + IsHttps: existingDatabase.Mariadb.IsHttps, + } + } +``` + +### `backend/internal/features/databases/service.go` - IsUserReadOnly + +Add MariaDB case: + +```go +case DatabaseTypeMariadb: + return usingDatabase.Mariadb.IsUserReadOnly( + ctx, s.logger, s.fieldEncryptor, usingDatabase.ID, + ) +``` + +### `backend/internal/features/databases/service.go` - CreateReadOnlyUser + +Add MariaDB case: + +```go +case DatabaseTypeMariadb: + username, password, err = usingDatabase.Mariadb.CreateReadOnlyUser( + ctx, s.logger, s.fieldEncryptor, usingDatabase.ID, + ) +``` + +--- + +## Repository Updates + +### `backend/internal/features/databases/repository.go` + +Add MariaDB preload and handling: + +```go +func (r *DatabaseRepository) FindByID(id uuid.UUID) (*Database, error) { + // ... existing code ... + Preload("Mariadb"). + // ... +} + +func (r *DatabaseRepository) Save(database *Database) (*Database, error) { + // ... add case for DatabaseTypeMariadb + case DatabaseTypeMariadb: + if database.Mariadb == nil { + return errors.New("mariadb configuration is required for MariaDB database") + } + database.Mariadb.DatabaseID = &database.ID + // ... +} + +func (r *DatabaseRepository) Delete(id uuid.UUID) error { + // ... add case for DatabaseTypeMariadb + case DatabaseTypeMariadb: + if err := tx. + Where("database_id = ?", id). + Delete(&mariadb.MariadbDatabase{}).Error; err != nil { + return err + } + // ... +} +``` + +--- + +## Restore Service Updates + +### `backend/internal/features/restores/dto.go` + +```go +type RestoreBackupRequest struct { + PostgresqlDatabase *postgresql.PostgresqlDatabase `json:"postgresql,omitempty"` + MysqlDatabase *mysql.MysqlDatabase `json:"mysql,omitempty"` + MariadbDatabase *mariadb.MariadbDatabase `json:"mariadb,omitempty"` +} +``` + +### `backend/internal/features/restores/service.go` + +Add version validation for MariaDB: + +```go +func (s *RestoreService) validateVersionCompatibility(...) error { + // ... existing cases ... + case databases.DatabaseTypeMariadb: + if requestDTO.MariadbDatabase == nil { + return errors.New("mariadb database configuration is required for restore") + } + if tools.IsMariadbBackupVersionHigherThanRestoreVersion( + backupDatabase.Mariadb.Version, + requestDTO.MariadbDatabase.Version, + ) { + return errors.New(`backup database version is higher than restore database version. ` + + `Should be restored to the same version as the backup database or higher. ` + + `For example, you can restore MariaDB 10.11 backup to MariaDB 10.11, 11.4 or higher. But cannot restore to 10.6`) + } + // ... +} +``` + +Add MariaDB restore handling: + +```go +func (s *RestoreService) RestoreBackup(...) error { + // ... existing code ... + case databases.DatabaseTypeMariadb: + if requestDTO.MariadbDatabase == nil { + return errors.New("mariadb database is required") + } + // ... +} +``` + +--- + +## Config Updates + +### `backend/internal/config/config.go` + +```go +type EnvVariables struct { + // ... existing fields ... + + MariadbInstallDir string `env:"MARIADB_INSTALL_DIR"` + + // Testing MariaDB + TestMariadb55Port string `env:"TEST_MARIADB_55_PORT"` + TestMariadb101Port string `env:"TEST_MARIADB_101_PORT"` + TestMariadb102Port string `env:"TEST_MARIADB_102_PORT"` + TestMariadb103Port string `env:"TEST_MARIADB_103_PORT"` + TestMariadb104Port string `env:"TEST_MARIADB_104_PORT"` + TestMariadb105Port string `env:"TEST_MARIADB_105_PORT"` + TestMariadb106Port string `env:"TEST_MARIADB_106_PORT"` + TestMariadb1011Port string `env:"TEST_MARIADB_1011_PORT"` + TestMariadb114Port string `env:"TEST_MARIADB_114_PORT"` + TestMariadb118Port string `env:"TEST_MARIADB_118_PORT"` + TestMariadb120Port string `env:"TEST_MARIADB_120_PORT"` +} + +// In loadEnvVariables(): +env.MariadbInstallDir = filepath.Join(backendRoot, "tools", "mariadb") +tools.VerifyMariadbInstallation(log, env.EnvMode, env.MariadbInstallDir) + +if env.IsTesting { + // ... existing checks ... + if env.TestMariadb55Port == "" { + log.Error("TEST_MARIADB_55_PORT is empty") + os.Exit(1) + } + // ... similar for other MariaDB ports (10.1-10.6, 10.11, 11.4, 11.8, 12.0) +} +``` + +--- + +## Docker Compose for Testing + +### `backend/docker-compose.yml.example` additions + +```yaml +services: + # ... existing services ... + + test-mariadb-55: + image: mariadb:5.5 + container_name: test-mariadb-55 + ports: + - "${TEST_MARIADB_55_PORT:-33055}:3306" + environment: + MYSQL_ROOT_PASSWORD: rootpassword + MYSQL_DATABASE: testdb + MYSQL_USER: testuser + MYSQL_PASSWORD: testpassword + command: --character-set-server=utf8 --collation-server=utf8_unicode_ci + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-prootpassword"] + interval: 5s + timeout: 5s + retries: 10 + + test-mariadb-101: + image: mariadb:10.1 + container_name: test-mariadb-101 + ports: + - "${TEST_MARIADB_101_PORT:-33101}:3306" + environment: + MYSQL_ROOT_PASSWORD: rootpassword + MYSQL_DATABASE: testdb + MYSQL_USER: testuser + MYSQL_PASSWORD: testpassword + command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-prootpassword"] + interval: 5s + timeout: 5s + retries: 10 + + test-mariadb-102: + image: mariadb:10.2 + container_name: test-mariadb-102 + ports: + - "${TEST_MARIADB_102_PORT:-33102}:3306" + environment: + MYSQL_ROOT_PASSWORD: rootpassword + MYSQL_DATABASE: testdb + MYSQL_USER: testuser + MYSQL_PASSWORD: testpassword + command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-prootpassword"] + interval: 5s + timeout: 5s + retries: 10 + + test-mariadb-103: + image: mariadb:10.3 + container_name: test-mariadb-103 + ports: + - "${TEST_MARIADB_103_PORT:-33103}:3306" + environment: + MYSQL_ROOT_PASSWORD: rootpassword + MYSQL_DATABASE: testdb + MYSQL_USER: testuser + MYSQL_PASSWORD: testpassword + command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-prootpassword"] + interval: 5s + timeout: 5s + retries: 10 + + test-mariadb-104: + image: mariadb:10.4 + container_name: test-mariadb-104 + ports: + - "${TEST_MARIADB_104_PORT:-33104}:3306" + environment: + MARIADB_ROOT_PASSWORD: rootpassword + MARIADB_DATABASE: testdb + MARIADB_USER: testuser + MARIADB_PASSWORD: testpassword + command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci + healthcheck: + test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"] + interval: 5s + timeout: 5s + retries: 10 + + test-mariadb-105: + image: mariadb:10.5 + container_name: test-mariadb-105 + ports: + - "${TEST_MARIADB_105_PORT:-33105}:3306" + environment: + MARIADB_ROOT_PASSWORD: rootpassword + MARIADB_DATABASE: testdb + MARIADB_USER: testuser + MARIADB_PASSWORD: testpassword + command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci + healthcheck: + test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"] + interval: 5s + timeout: 5s + retries: 10 + + test-mariadb-106: + image: mariadb:10.6 + container_name: test-mariadb-106 + ports: + - "${TEST_MARIADB_106_PORT:-33106}:3306" + environment: + MARIADB_ROOT_PASSWORD: rootpassword + MARIADB_DATABASE: testdb + MARIADB_USER: testuser + MARIADB_PASSWORD: testpassword + command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci + healthcheck: + test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"] + interval: 5s + timeout: 5s + retries: 10 + + test-mariadb-1011: + image: mariadb:10.11 + container_name: test-mariadb-1011 + ports: + - "${TEST_MARIADB_1011_PORT:-33111}:3306" + environment: + MARIADB_ROOT_PASSWORD: rootpassword + MARIADB_DATABASE: testdb + MARIADB_USER: testuser + MARIADB_PASSWORD: testpassword + command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci + healthcheck: + test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"] + interval: 5s + timeout: 5s + retries: 10 + + test-mariadb-114: + image: mariadb:11.4 + container_name: test-mariadb-114 + ports: + - "${TEST_MARIADB_114_PORT:-33114}:3306" + environment: + MARIADB_ROOT_PASSWORD: rootpassword + MARIADB_DATABASE: testdb + MARIADB_USER: testuser + MARIADB_PASSWORD: testpassword + command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci + healthcheck: + test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"] + interval: 5s + timeout: 5s + retries: 10 + + test-mariadb-118: + image: mariadb:11.8 + container_name: test-mariadb-118 + ports: + - "${TEST_MARIADB_118_PORT:-33118}:3306" + environment: + MARIADB_ROOT_PASSWORD: rootpassword + MARIADB_DATABASE: testdb + MARIADB_USER: testuser + MARIADB_PASSWORD: testpassword + command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci + healthcheck: + test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"] + interval: 5s + timeout: 5s + retries: 10 + + test-mariadb-120: + image: mariadb:12.0 + container_name: test-mariadb-120 + ports: + - "${TEST_MARIADB_120_PORT:-33120}:3306" + environment: + MARIADB_ROOT_PASSWORD: rootpassword + MARIADB_DATABASE: testdb + MARIADB_USER: testuser + MARIADB_PASSWORD: testpassword + command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci + healthcheck: + test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"] + interval: 5s + timeout: 5s + retries: 10 +``` + +--- + +## Dockerfile Updates + +Add to the runtime stage: + +```dockerfile +# Install MariaDB client tools (single latest version - backward compatible) +# MariaDB client 12.0 supports all server versions (5.5, 10.6, 10.11, 11.4, 11.8, 12.0) +RUN apt-get update && apt-get install -y --no-install-recommends \ + apt-transport-https curl && \ + # Add MariaDB repository + curl -fsSL https://mariadb.org/mariadb_release_signing_key.asc | apt-key add - && \ + echo "deb https://mirror.mariadb.org/repo/12.0/debian $(lsb_release -cs) main" > /etc/apt/sources.list.d/mariadb.list && \ + apt-get update && \ + apt-get install -y --no-install-recommends mariadb-client && \ + # Create symlinks to standard location + mkdir -p /usr/local/mariadb/bin && \ + ln -sf /usr/bin/mariadb /usr/local/mariadb/bin/mariadb && \ + ln -sf /usr/bin/mariadb-dump /usr/local/mariadb/bin/mariadb-dump && \ + rm -rf /var/lib/apt/lists/* +``` + +--- + +## CI/CD Updates + +### `.github/workflows/ci-release.yml` additions + +```yaml +# In .env file creation: +# testing MariaDB +TEST_MARIADB_55_PORT=33055 +TEST_MARIADB_101_PORT=33101 +TEST_MARIADB_102_PORT=33102 +TEST_MARIADB_103_PORT=33103 +TEST_MARIADB_104_PORT=33104 +TEST_MARIADB_105_PORT=33105 +TEST_MARIADB_106_PORT=33106 +TEST_MARIADB_1011_PORT=33111 +TEST_MARIADB_114_PORT=33114 +TEST_MARIADB_118_PORT=33118 +TEST_MARIADB_120_PORT=33120 + +# In "Wait for containers to be ready": +# Wait for MariaDB containers (legacy versions use mysqladmin, newer use healthcheck.sh) +echo "Waiting for MariaDB 5.5..." +timeout 120 bash -c 'until docker exec test-mariadb-55 mysqladmin ping -h localhost -prootpassword 2>/dev/null; do sleep 2; done' +echo "Waiting for MariaDB 10.1..." +timeout 120 bash -c 'until docker exec test-mariadb-101 mysqladmin ping -h localhost -prootpassword 2>/dev/null; do sleep 2; done' +echo "Waiting for MariaDB 10.2..." +timeout 120 bash -c 'until docker exec test-mariadb-102 mysqladmin ping -h localhost -prootpassword 2>/dev/null; do sleep 2; done' +echo "Waiting for MariaDB 10.3..." +timeout 120 bash -c 'until docker exec test-mariadb-103 mysqladmin ping -h localhost -prootpassword 2>/dev/null; do sleep 2; done' +echo "Waiting for MariaDB 10.4..." +timeout 120 bash -c 'until docker exec test-mariadb-104 healthcheck.sh --connect --innodb_initialized 2>/dev/null; do sleep 2; done' +echo "Waiting for MariaDB 10.5..." +timeout 120 bash -c 'until docker exec test-mariadb-105 healthcheck.sh --connect --innodb_initialized 2>/dev/null; do sleep 2; done' +echo "Waiting for MariaDB 10.6..." +timeout 120 bash -c 'until docker exec test-mariadb-106 healthcheck.sh --connect --innodb_initialized 2>/dev/null; do sleep 2; done' +echo "Waiting for MariaDB 10.11..." +timeout 120 bash -c 'until docker exec test-mariadb-1011 healthcheck.sh --connect --innodb_initialized 2>/dev/null; do sleep 2; done' +echo "Waiting for MariaDB 11.4..." +timeout 120 bash -c 'until docker exec test-mariadb-114 healthcheck.sh --connect --innodb_initialized 2>/dev/null; do sleep 2; done' +echo "Waiting for MariaDB 11.8..." +timeout 120 bash -c 'until docker exec test-mariadb-118 healthcheck.sh --connect --innodb_initialized 2>/dev/null; do sleep 2; done' +echo "Waiting for MariaDB 12.0..." +timeout 120 bash -c 'until docker exec test-mariadb-120 healthcheck.sh --connect --innodb_initialized 2>/dev/null; do sleep 2; done' + +# Cache MariaDB client tools +- name: Cache MariaDB client tools + id: cache-mariadb + uses: actions/cache@v4 + with: + path: backend/tools/mariadb + key: mariadb-client-120-v1 +``` + +--- + +## Download Scripts Updates + +### `backend/tools/download_linux.sh` additions + +```bash +# ========== MariaDB Installation ========== +echo "========================================" +echo "Installing MariaDB client tools (single latest version 12.0)..." +echo "========================================" + +MARIADB_DIR="$(pwd)/mariadb" +mkdir -p "$MARIADB_DIR/bin" + +# Add MariaDB repository and install client +$SUDO apt-get install -y -qq apt-transport-https curl +curl -fsSL https://mariadb.org/mariadb_release_signing_key.asc | $SUDO apt-key add - 2>/dev/null +echo "deb https://mirror.mariadb.org/repo/12.0/debian $(lsb_release -cs) main" | $SUDO tee /etc/apt/sources.list.d/mariadb.list >/dev/null +$SUDO apt-get update -qq -y +$SUDO apt-get install -y -qq mariadb-client + +# Create symlinks +ln -sf /usr/bin/mariadb "$MARIADB_DIR/bin/mariadb" +ln -sf /usr/bin/mariadb-dump "$MARIADB_DIR/bin/mariadb-dump" + +echo "MariaDB client tools installed successfully" +echo " mariadb: $MARIADB_DIR/bin/mariadb" +echo " mariadb-dump: $MARIADB_DIR/bin/mariadb-dump" +``` + +--- + +## Test Implementation + +### `backend/internal/features/tests/mariadb_backup_restore_test.go` + +```go +package tests + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/gin-gonic/gin" + _ "github.com/go-sql-driver/mysql" + "github.com/google/uuid" + "github.com/jmoiron/sqlx" + "github.com/stretchr/testify/assert" + + "postgresus-backend/internal/config" + "postgresus-backend/internal/features/backups/backups" + backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/databases" + mariadbtypes "postgresus-backend/internal/features/databases/databases/mariadb" + "postgresus-backend/internal/features/restores" + restores_enums "postgresus-backend/internal/features/restores/enums" + restores_models "postgresus-backend/internal/features/restores/models" + "postgresus-backend/internal/features/storages" + users_enums "postgresus-backend/internal/features/users/enums" + users_testing "postgresus-backend/internal/features/users/testing" + workspaces_testing "postgresus-backend/internal/features/workspaces/testing" + test_utils "postgresus-backend/internal/util/testing" + "postgresus-backend/internal/util/tools" +) + +const dropMariadbTestTableQuery = `DROP TABLE IF EXISTS test_data` + +const createMariadbTestTableQuery = ` +CREATE TABLE test_data ( + id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(255) NOT NULL, + value INT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +)` + +const insertMariadbTestDataQuery = ` +INSERT INTO test_data (name, value) VALUES + ('test1', 100), + ('test2', 200), + ('test3', 300)` + +type MariadbContainer struct { + Host string + Port int + Username string + Password string + Database string + Version tools.MariadbVersion + DB *sqlx.DB +} + +type MariadbTestDataItem struct { + ID int `db:"id"` + Name string `db:"name"` + Value int `db:"value"` + CreatedAt time.Time `db:"created_at"` +} + +func Test_BackupAndRestoreMariadb_RestoreIsSuccessful(t *testing.T) { + env := config.GetEnv() + cases := []struct { + name string + version tools.MariadbVersion + port string + }{ + {"MariaDB 5.5", tools.MariadbVersion55, env.TestMariadb55Port}, + {"MariaDB 10.1", tools.MariadbVersion101, env.TestMariadb101Port}, + {"MariaDB 10.2", tools.MariadbVersion102, env.TestMariadb102Port}, + {"MariaDB 10.3", tools.MariadbVersion103, env.TestMariadb103Port}, + {"MariaDB 10.4", tools.MariadbVersion104, env.TestMariadb104Port}, + {"MariaDB 10.5", tools.MariadbVersion105, env.TestMariadb105Port}, + {"MariaDB 10.6", tools.MariadbVersion106, env.TestMariadb106Port}, + {"MariaDB 10.11", tools.MariadbVersion1011, env.TestMariadb1011Port}, + {"MariaDB 11.4", tools.MariadbVersion114, env.TestMariadb114Port}, + {"MariaDB 11.8", tools.MariadbVersion118, env.TestMariadb118Port}, + {"MariaDB 12.0", tools.MariadbVersion120, env.TestMariadb120Port}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + testMariadbBackupRestoreForVersion(t, tc.version, tc.port) + }) + } +} + +func Test_BackupAndRestoreMariadbWithEncryption_RestoreIsSuccessful(t *testing.T) { + env := config.GetEnv() + cases := []struct { + name string + version tools.MariadbVersion + port string + }{ + {"MariaDB 5.5", tools.MariadbVersion55, env.TestMariadb55Port}, + {"MariaDB 10.1", tools.MariadbVersion101, env.TestMariadb101Port}, + {"MariaDB 10.2", tools.MariadbVersion102, env.TestMariadb102Port}, + {"MariaDB 10.3", tools.MariadbVersion103, env.TestMariadb103Port}, + {"MariaDB 10.4", tools.MariadbVersion104, env.TestMariadb104Port}, + {"MariaDB 10.5", tools.MariadbVersion105, env.TestMariadb105Port}, + {"MariaDB 10.6", tools.MariadbVersion106, env.TestMariadb106Port}, + {"MariaDB 10.11", tools.MariadbVersion1011, env.TestMariadb1011Port}, + {"MariaDB 11.4", tools.MariadbVersion114, env.TestMariadb114Port}, + {"MariaDB 11.8", tools.MariadbVersion118, env.TestMariadb118Port}, + {"MariaDB 12.0", tools.MariadbVersion120, env.TestMariadb120Port}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + testMariadbBackupRestoreWithEncryptionForVersion(t, tc.version, tc.port) + }) + } +} + +func Test_BackupAndRestoreMariadb_WithReadOnlyUser_RestoreIsSuccessful(t *testing.T) { + env := config.GetEnv() + cases := []struct { + name string + version tools.MariadbVersion + port string + }{ + {"MariaDB 5.5", tools.MariadbVersion55, env.TestMariadb55Port}, + {"MariaDB 10.1", tools.MariadbVersion101, env.TestMariadb101Port}, + {"MariaDB 10.2", tools.MariadbVersion102, env.TestMariadb102Port}, + {"MariaDB 10.3", tools.MariadbVersion103, env.TestMariadb103Port}, + {"MariaDB 10.4", tools.MariadbVersion104, env.TestMariadb104Port}, + {"MariaDB 10.5", tools.MariadbVersion105, env.TestMariadb105Port}, + {"MariaDB 10.6", tools.MariadbVersion106, env.TestMariadb106Port}, + {"MariaDB 10.11", tools.MariadbVersion1011, env.TestMariadb1011Port}, + {"MariaDB 11.4", tools.MariadbVersion114, env.TestMariadb114Port}, + {"MariaDB 11.8", tools.MariadbVersion118, env.TestMariadb118Port}, + {"MariaDB 12.0", tools.MariadbVersion120, env.TestMariadb120Port}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + testMariadbBackupRestoreWithReadOnlyUserForVersion(t, tc.version, tc.port) + }) + } +} + +// Test implementations follow the same pattern as MySQL tests +// See mysql_backup_restore_test.go for reference + +func connectToMariadbContainer(version tools.MariadbVersion, port string) (*MariadbContainer, error) { + if port == "" { + return nil, fmt.Errorf("MariaDB %s port not configured", version) + } + + dbName := "testdb" + password := "rootpassword" + username := "root" + host := "127.0.0.1" + + portInt, err := strconv.Atoi(port) + if err != nil { + return nil, fmt.Errorf("failed to parse port: %w", err) + } + + dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true", + username, password, host, portInt, dbName) + + db, err := sqlx.Connect("mysql", dsn) + if err != nil { + return nil, fmt.Errorf("failed to connect to MariaDB database: %w", err) + } + + return &MariadbContainer{ + Host: host, + Port: portInt, + Username: username, + Password: password, + Database: dbName, + Version: version, + DB: db, + }, nil +} + +func createMariadbDatabaseViaAPI( + t *testing.T, + router *gin.Engine, + name string, + workspaceID uuid.UUID, + host string, + port int, + username string, + password string, + database string, + version tools.MariadbVersion, + token string, +) *databases.Database { + request := databases.Database{ + Name: name, + WorkspaceID: &workspaceID, + Type: databases.DatabaseTypeMariadb, + Mariadb: &mariadbtypes.MariadbDatabase{ + Host: host, + Port: port, + Username: username, + Password: password, + Database: &database, + Version: version, + }, + } + + w := workspaces_testing.MakeAPIRequest( + router, + "POST", + "/api/v1/databases/create", + "Bearer "+token, + request, + ) + + if w.Code != http.StatusCreated { + t.Fatalf("Failed to create MariaDB database. Status: %d, Body: %s", w.Code, w.Body.String()) + } + + var createdDatabase databases.Database + if err := json.Unmarshal(w.Body.Bytes(), &createdDatabase); err != nil { + t.Fatalf("Failed to unmarshal database response: %v", err) + } + + return &createdDatabase +} + +func createMariadbRestoreViaAPI( + t *testing.T, + router *gin.Engine, + backupID uuid.UUID, + host string, + port int, + username string, + password string, + database string, + version tools.MariadbVersion, + token string, +) { + request := restores.RestoreBackupRequest{ + MariadbDatabase: &mariadbtypes.MariadbDatabase{ + Host: host, + Port: port, + Username: username, + Password: password, + Database: &database, + Version: version, + }, + } + + test_utils.MakePostRequest( + t, + router, + fmt.Sprintf("/api/v1/restores/%s/restore", backupID.String()), + "Bearer "+token, + request, + http.StatusOK, + ) +} +``` + +--- + +## Implementation Order + +1. **Phase 1: Core Infrastructure** + - [ ] Create `MariadbVersion` enums and `mariadb.go` tools + - [ ] Create `mariadb/model.go` with `MariadbDatabase` struct + - [ ] Add migration for `mariadb_databases` table + - [ ] Update `databases/model.go` to include MariaDB + - [ ] Update `databases/enums.go` with `DatabaseTypeMariadb` + - [ ] Update `databases/repository.go` to preload MariaDB + +2. **Phase 2: Backup Implementation** + - [ ] Create `usecases/mariadb/create_backup_uc.go` + - [ ] Create `usecases/mariadb/di.go` + - [ ] Update `usecases/create_backup_uc.go` to route to MariaDB + - [ ] Update `usecases/di.go` to wire MariaDB use case + +3. **Phase 3: Restore Implementation** + - [ ] Create `restores/usecases/mariadb/restore_backup_uc.go` + - [ ] Create `restores/usecases/mariadb/di.go` + - [ ] Update `restores/usecases/restore_backup_uc.go` to route to MariaDB + - [ ] Update `restores/dto.go` for MariaDB restore DTO + - [ ] Update `restores/service.go` for MariaDB version validation + +4. **Phase 4: Service Layer Updates** + - [ ] Update `databases/service.go` - CopyDatabase, IsUserReadOnly, CreateReadOnlyUser + - [ ] Update `config/config.go` with MariaDB configuration + - [ ] Add MariaDB installation verification + +5. **Phase 5: Infrastructure** + - [ ] Update Dockerfile with MariaDB client installation + - [ ] Update docker-compose.yml.example with MariaDB test containers + - [ ] Update download_linux.sh with MariaDB client + - [ ] Update download_macos.sh with MariaDB client + - [ ] Update download_windows.bat with MariaDB client + - [ ] Update CI/CD workflow + +6. **Phase 6: Testing** + - [ ] Create `mariadb_backup_restore_test.go` + - [ ] Test all MariaDB versions (10.6, 10.11, 11.4, 11.8) + - [ ] Test encryption with MariaDB backups + - [ ] Test read-only user creation + +--- + +## Notes and Considerations + +### MariaDB vs MySQL Differences + +| Feature | MySQL | MariaDB | +| -------------------- | ------------------------------ | ------------------------------ | +| Client tools | `mysqldump`, `mysql` | `mariadb-dump`, `mariadb` | +| Client compatibility | Version-specific | Single client for all versions | +| VERSION() output | `8.0.35` | `10.11.6-MariaDB` | +| Default auth plugin | `caching_sha2_password` (8.0+) | `mysql_native_password` | +| Storage engines | InnoDB, MyISAM | InnoDB, Aria, ColumnStore | + +### Version Detection + +MariaDB's `SELECT VERSION()` returns strings containing "MariaDB": + +- `10.6.18-MariaDB` +- `10.11.6-MariaDB-1:10.11.6+maria~ubu2204` +- `11.4.2-MariaDB` +- `11.8.0-MariaDB` + +The detection logic checks for "MariaDB" substring to distinguish from MySQL. + +### Client Backward Compatibility + +Unlike MySQL/PostgreSQL where version-specific clients are needed, MariaDB's client tools are backward compatible: + +- `mariadb-dump` from 12.0 can dump databases from 5.5, 10.1-10.6, 10.11, 11.4, 11.8 +- `mariadb` client from 12.0 can restore to any supported version + +This simplifies the implementation - only one client version is needed. + +### ARM64 Support + +All MariaDB versions (10.6, 10.11, 11.4, 11.8, 12.0) have full ARM64 support, unlike MySQL 5.7 which is x86_64 only. + +**Note:** MariaDB 5.5 Docker images are available for both amd64 and arm64, but arm64 support may be limited on some platforms. + +### Security Considerations + +1. **Password handling**: Uses `.my.cnf` temp file (same as MySQL) +2. **File permissions**: `.my.cnf` must be 0600 +3. **TLS**: Support SSL/TLS connections via `--ssl` flag + +### Cloud Database Compatibility + +- **AWS RDS MariaDB**: Works with standard mariadb-dump +- **Google Cloud SQL MariaDB**: Works with standard mariadb-dump +- **Azure Database for MariaDB**: Works with standard mariadb-dump +- **SkySQL (MariaDB Cloud)**: Works with standard mariadb-dump + +### Legacy Version Considerations (5.5, 10.1-10.3) + +These EOL versions are supported for legacy systems that cannot be upgraded: + +| Version | Environment Variables | Healthcheck | Character Set | +| ------- | --------------------- | -------------- | ------------- | +| 5.5 | `MYSQL_*` | mysqladmin | utf8 | +| 10.1 | `MYSQL_*` | mysqladmin | utf8mb4 | +| 10.2 | `MYSQL_*` | mysqladmin | utf8mb4 | +| 10.3 | `MYSQL_*` | mysqladmin | utf8mb4 | +| 10.4+ | `MARIADB_*` | healthcheck.sh | utf8mb4 | + +**Key differences:** + +1. **Environment variables**: Versions 5.5-10.3 use `MYSQL_*` env vars, 10.4+ use `MARIADB_*` +2. **Healthcheck**: Versions 5.5-10.3 use `mysqladmin ping`, 10.4+ use `healthcheck.sh` +3. **Character set**: 5.5 defaults to `utf8`, 10.1+ support full `utf8mb4` +4. **Client compatibility**: Modern mariadb-dump (12.0) can backup all legacy versions +5. **Compression**: `--compress` flag behavior varies across versions + +### MariaDB 12.0 Specific Features + +MariaDB 12.0 introduced several new features: + +1. **Passphrase-protected keys**: Enhanced security for key management +2. **SET SESSION AUTHORIZATION**: Execute actions as another user +3. **SHA2 support**: For `file_key_management.so` plugin +4. **New optimizer hints**: Improved query optimization diff --git a/backend/MONGODB_PLAN.MD b/backend/MONGODB_PLAN.MD new file mode 100644 index 0000000..c4f0623 --- /dev/null +++ b/backend/MONGODB_PLAN.MD @@ -0,0 +1,2949 @@ +# MongoDB Implementation Plan + +## Overview + +This document outlines the implementation plan for adding MongoDB backup and restore support to Postgresus. The implementation will follow the existing PostgreSQL, MySQL and MariaDB architecture patterns. + +## Supported MongoDB Versions + +| Version | Status | EOL Date | Support | +| --------------- | ------- | ---------- | ------- | +| **MongoDB 4.0** | EOL | April 2022 | ✅ Yes | +| **MongoDB 4.2** | EOL | April 2023 | ✅ Yes | +| **MongoDB 4.4** | EOL | Feb 2024 | ✅ Yes | +| **MongoDB 5.0** | EOL | Oct 2024 | ✅ Yes | +| **MongoDB 6.0** | Active | July 2025 | ✅ Yes | +| **MongoDB 7.0** | LTS | Aug 2026 | ✅ Yes | +| **MongoDB 8.0** | Current | TBD | ✅ Yes | + +**Notes:** + +- MongoDB 4.0-4.4 reached EOL but are still supported for legacy systems that cannot be upgraded. +- MongoDB 7.0 is the current LTS (Long Term Support) version. +- MongoDB 8.0 is the latest stable release. + +## Key Decisions + +| Aspect | Decision | +| ------------------ | -------------------------------------------------------------------- | +| **Architecture** | Separate `DatabaseTypeMongodb` | +| **Client tools** | Single latest version - backward compatible with all server versions | +| **Backup tool** | `mongodump` with `--archive` and `--gzip` flags | +| **Restore tool** | `mongorestore` with `--archive` and `--gzip` flags | +| **Compression** | Built-in gzip (`--gzip` flag) - no external compression needed | +| **Auto-detection** | Parse version from `db.version()` or `buildInfo` command | +| **Cross-restore** | Yes (newer mongorestore can restore older backups) | +| **ARM64** | Full support (all versions) | + +--- + +## File Structure + +### New Files to Create + +``` +backend/internal/features/databases/databases/mongodb/ +├── model.go # MongodbDatabase struct and methods + +backend/internal/features/backups/backups/usecases/mongodb/ +├── create_backup_uc.go # MongoDB backup use case +├── di.go # Dependency injection + +backend/internal/features/restores/usecases/mongodb/ +├── restore_backup_uc.go # MongoDB restore use case +├── di.go # Dependency injection + +backend/internal/util/tools/ +├── mongodb.go # MongoDB executable helpers and version enums + +backend/internal/features/tests/ +├── mongodb_backup_restore_test.go # Integration tests + +backend/internal/features/databases/databases/mongodb/ +├── readonly_user_test.go # Read-only user integration tests + +backend/migrations/ +├── XXXXXX_add_mongodb_databases_table.up.sql +├── XXXXXX_add_mongodb_databases_table.down.sql +``` + +### Files to Modify + +``` +backend/internal/features/databases/ +├── enums.go # Add DatabaseTypeMongodb +├── model.go # Add Mongodb field, update methods +├── service.go # Handle MongoDB in CopyDatabase, IsUserReadOnly, CreateReadOnlyUser +├── repository.go # Preload MongoDB relation +├── controller_test.go # Add MongoDB test cases for CRUD and sensitive data lifecycle + +backend/internal/features/backups/backups/usecases/ +├── create_backup_uc.go # Add MongoDB case +├── di.go # Wire MongoDB use case + +backend/internal/features/restores/usecases/ +├── restore_backup_uc.go # Add MongoDB case +├── di.go # Wire MongoDB use case + +backend/internal/features/restores/ +├── service.go # Add MongoDB validation, version compatibility +├── dto.go # Add MongodbDatabase field to RestoreBackupRequest + +backend/internal/features/healthcheck/attempt/ +├── check_database_health_uc.go # Add MongoDB case in validateDatabase() + +backend/internal/config/ +├── config.go # Add MongoDB test ports, verify MongoDB installation + +backend/tools/ +├── download_linux.sh # Add MongoDB tools download +├── download_macos.sh # Add MongoDB tools download +├── download_windows.bat # Add MongoDB tools download +├── readme.md # Update with MongoDB instructions + +Dockerfile # Add MongoDB tools installation +docker-compose.yml.example # Add MongoDB test containers +.github/workflows/ci-release.yml # Add MongoDB test setup +``` + +--- + +## Database Type Enum + +### `backend/internal/features/databases/enums.go` + +```go +const ( + DatabaseTypePostgres DatabaseType = "POSTGRES" + DatabaseTypeMysql DatabaseType = "MYSQL" + DatabaseTypeMariadb DatabaseType = "MARIADB" + DatabaseTypeMongodb DatabaseType = "MONGODB" +) +``` + +--- + +## MongoDB Version Enums + +### `backend/internal/util/tools/mongodb.go` + +```go +package tools + +import ( + "fmt" + "log/slog" + "os" + "path/filepath" + "runtime" + + env_utils "postgresus-backend/internal/util/env" +) + +type MongodbVersion string + +const ( + MongodbVersion40 MongodbVersion = "4.0" + MongodbVersion42 MongodbVersion = "4.2" + MongodbVersion44 MongodbVersion = "4.4" + MongodbVersion50 MongodbVersion = "5.0" + MongodbVersion60 MongodbVersion = "6.0" + MongodbVersion70 MongodbVersion = "7.0" + MongodbVersion80 MongodbVersion = "8.0" +) + +type MongodbExecutable string + +const ( + MongodbExecutableMongodump MongodbExecutable = "mongodump" + MongodbExecutableMongorestore MongodbExecutable = "mongorestore" +) + +// GetMongodbExecutable returns the full path to a MongoDB executable. +// MongoDB Database Tools use a single client version that is backward compatible +// with all server versions. +func GetMongodbExecutable( + executable MongodbExecutable, + envMode env_utils.EnvMode, + mongodbInstallDir string, +) string { + basePath := getMongodbBasePath(envMode, mongodbInstallDir) + executableName := string(executable) + + if runtime.GOOS == "windows" { + executableName += ".exe" + } + + return filepath.Join(basePath, executableName) +} + +// VerifyMongodbInstallation verifies that MongoDB Database Tools are installed. +// Unlike PostgreSQL (version-specific), MongoDB tools use a single version that +// supports all server versions (backward compatible). +func VerifyMongodbInstallation( + logger *slog.Logger, + envMode env_utils.EnvMode, + mongodbInstallDir string, +) { + binDir := getMongodbBasePath(envMode, mongodbInstallDir) + + logger.Info( + "Verifying MongoDB Database Tools installation", + "path", binDir, + ) + + if _, err := os.Stat(binDir); os.IsNotExist(err) { + if envMode == env_utils.EnvModeDevelopment { + logger.Warn( + "MongoDB bin directory not found. MongoDB support will be disabled. Read ./tools/readme.md for details", + "path", binDir, + ) + } else { + logger.Warn( + "MongoDB bin directory not found. MongoDB support will be disabled.", + "path", binDir, + ) + } + return + } + + requiredCommands := []MongodbExecutable{ + MongodbExecutableMongodump, + MongodbExecutableMongorestore, + } + + for _, cmd := range requiredCommands { + cmdPath := GetMongodbExecutable(cmd, envMode, mongodbInstallDir) + + logger.Info( + "Checking for MongoDB command", + "command", cmd, + "path", cmdPath, + ) + + if _, err := os.Stat(cmdPath); os.IsNotExist(err) { + if envMode == env_utils.EnvModeDevelopment { + logger.Warn( + "MongoDB command not found. MongoDB support will be disabled. Read ./tools/readme.md for details", + "command", cmd, + "path", cmdPath, + ) + } else { + logger.Warn( + "MongoDB command not found. MongoDB support will be disabled.", + "command", cmd, + "path", cmdPath, + ) + } + continue + } + + logger.Info("MongoDB command found", "command", cmd) + } + + logger.Info("MongoDB Database Tools verification completed!") +} + +// IsMongodbBackupVersionHigherThanRestoreVersion checks if backup was made with +// a newer MongoDB version than the restore target +func IsMongodbBackupVersionHigherThanRestoreVersion( + backupVersion, restoreVersion MongodbVersion, +) bool { + versionOrder := map[MongodbVersion]int{ + MongodbVersion40: 1, + MongodbVersion42: 2, + MongodbVersion44: 3, + MongodbVersion50: 4, + MongodbVersion60: 5, + MongodbVersion70: 6, + MongodbVersion80: 7, + } + return versionOrder[backupVersion] > versionOrder[restoreVersion] +} + +// GetMongodbVersionEnum converts a version string to MongodbVersion enum +func GetMongodbVersionEnum(version string) MongodbVersion { + switch version { + case "4.0": + return MongodbVersion40 + case "4.2": + return MongodbVersion42 + case "4.4": + return MongodbVersion44 + case "5.0": + return MongodbVersion50 + case "6.0": + return MongodbVersion60 + case "7.0": + return MongodbVersion70 + case "8.0": + return MongodbVersion80 + default: + panic(fmt.Sprintf("invalid mongodb version: %s", version)) + } +} + +func getMongodbBasePath( + envMode env_utils.EnvMode, + mongodbInstallDir string, +) string { + if envMode == env_utils.EnvModeDevelopment { + return filepath.Join(mongodbInstallDir, "bin") + } + // Production: single client version in /usr/local/mongodb-database-tools/bin + return "/usr/local/mongodb-database-tools/bin" +} +``` + +--- + +## Model Definition + +### `backend/internal/features/databases/databases/mongodb/model.go` + +```go +package mongodb + +import ( + "context" + "errors" + "fmt" + "log/slog" + "regexp" + "time" + + "postgresus-backend/internal/util/encryption" + "postgresus-backend/internal/util/tools" + + "github.com/google/uuid" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +type MongodbDatabase struct { + ID uuid.UUID `json:"id" gorm:"primaryKey;type:uuid;default:gen_random_uuid()"` + DatabaseID *uuid.UUID `json:"databaseId" gorm:"type:uuid;column:database_id"` + + Version tools.MongodbVersion `json:"version" gorm:"type:text;not null"` + + Host string `json:"host" gorm:"type:text;not null"` + Port int `json:"port" gorm:"type:int;not null"` + Username string `json:"username" gorm:"type:text;not null"` + Password string `json:"password" gorm:"type:text;not null"` + Database string `json:"database" gorm:"type:text;not null"` + AuthDatabase string `json:"authDatabase" gorm:"type:text;not null;default:'admin'"` + UseTLS bool `json:"useTls" gorm:"type:boolean;default:false"` +} + +func (m *MongodbDatabase) TableName() string { + return "mongodb_databases" +} + +func (m *MongodbDatabase) Validate() error { + if m.Host == "" { + return errors.New("host is required") + } + if m.Port == 0 { + return errors.New("port is required") + } + if m.Username == "" { + return errors.New("username is required") + } + if m.Password == "" { + return errors.New("password is required") + } + if m.Database == "" { + return errors.New("database is required") + } + return nil +} + +func (m *MongodbDatabase) TestConnection( + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) error { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID) + if err != nil { + return fmt.Errorf("failed to decrypt password: %w", err) + } + + uri := m.buildConnectionURI(password) + + clientOptions := options.Client().ApplyURI(uri) + client, err := mongo.Connect(ctx, clientOptions) + if err != nil { + return fmt.Errorf("failed to connect to MongoDB: %w", err) + } + defer func() { + if disconnectErr := client.Disconnect(ctx); disconnectErr != nil { + logger.Error("Failed to disconnect from MongoDB", "error", disconnectErr) + } + }() + + if err := client.Ping(ctx, nil); err != nil { + return fmt.Errorf("failed to ping MongoDB database '%s': %w", m.Database, err) + } + + // Detect version + detectedVersion, err := detectMongodbVersion(ctx, client) + if err != nil { + return err + } + m.Version = detectedVersion + + return nil +} + +func (m *MongodbDatabase) HideSensitiveData() { + if m == nil { + return + } + m.Password = "" +} + +func (m *MongodbDatabase) Update(incoming *MongodbDatabase) { + m.Version = incoming.Version + m.Host = incoming.Host + m.Port = incoming.Port + m.Username = incoming.Username + m.Database = incoming.Database + m.AuthDatabase = incoming.AuthDatabase + m.UseTLS = incoming.UseTLS + + if incoming.Password != "" { + m.Password = incoming.Password + } +} + +func (m *MongodbDatabase) EncryptSensitiveFields( + databaseID uuid.UUID, + encryptor encryption.FieldEncryptor, +) error { + if m.Password != "" { + encrypted, err := encryptor.Encrypt(databaseID, m.Password) + if err != nil { + return err + } + m.Password = encrypted + } + return nil +} + +func (m *MongodbDatabase) PopulateVersionIfEmpty( + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) error { + if m.Version != "" { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID) + if err != nil { + return fmt.Errorf("failed to decrypt password: %w", err) + } + + uri := m.buildConnectionURI(password) + + clientOptions := options.Client().ApplyURI(uri) + client, err := mongo.Connect(ctx, clientOptions) + if err != nil { + return fmt.Errorf("failed to connect to database: %w", err) + } + defer func() { + if disconnectErr := client.Disconnect(ctx); disconnectErr != nil { + logger.Error("Failed to disconnect", "error", disconnectErr) + } + }() + + detectedVersion, err := detectMongodbVersion(ctx, client) + if err != nil { + return err + } + + m.Version = detectedVersion + return nil +} + +func (m *MongodbDatabase) IsUserReadOnly( + ctx context.Context, + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) (bool, error) { + password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID) + if err != nil { + return false, fmt.Errorf("failed to decrypt password: %w", err) + } + + uri := m.buildConnectionURI(password) + + clientOptions := options.Client().ApplyURI(uri) + client, err := mongo.Connect(ctx, clientOptions) + if err != nil { + return false, fmt.Errorf("failed to connect to database: %w", err) + } + defer func() { + if disconnectErr := client.Disconnect(ctx); disconnectErr != nil { + logger.Error("Failed to disconnect", "error", disconnectErr) + } + }() + + // Check user roles using usersInfo command + adminDB := client.Database(m.AuthDatabase) + var result bson.M + err = adminDB.RunCommand(ctx, bson.D{ + {Key: "usersInfo", Value: bson.D{ + {Key: "user", Value: m.Username}, + {Key: "db", Value: m.AuthDatabase}, + }}, + }).Decode(&result) + if err != nil { + return false, fmt.Errorf("failed to get user info: %w", err) + } + + // Check if user has any write roles + writeRoles := []string{ + "readWrite", "readWriteAnyDatabase", "dbAdmin", "dbAdminAnyDatabase", + "userAdmin", "userAdminAnyDatabase", "clusterAdmin", "root", + "dbOwner", "backup", "restore", + } + + users, ok := result["users"].(bson.A) + if !ok || len(users) == 0 { + return true, nil // User not found, assume read-only + } + + user := users[0].(bson.M) + roles, ok := user["roles"].(bson.A) + if !ok { + return true, nil + } + + for _, roleDoc := range roles { + role := roleDoc.(bson.M) + roleName, _ := role["role"].(string) + for _, writeRole := range writeRoles { + if roleName == writeRole { + return false, nil // Has write access + } + } + } + + return true, nil +} + +func (m *MongodbDatabase) CreateReadOnlyUser( + ctx context.Context, + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) (string, string, error) { + password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID) + if err != nil { + return "", "", fmt.Errorf("failed to decrypt password: %w", err) + } + + uri := m.buildConnectionURI(password) + + clientOptions := options.Client().ApplyURI(uri) + client, err := mongo.Connect(ctx, clientOptions) + if err != nil { + return "", "", fmt.Errorf("failed to connect to database: %w", err) + } + defer func() { + if disconnectErr := client.Disconnect(ctx); disconnectErr != nil { + logger.Error("Failed to disconnect", "error", disconnectErr) + } + }() + + maxRetries := 3 + for attempt := range maxRetries { + newUsername := fmt.Sprintf("postgresus-%s", uuid.New().String()[:8]) + newPassword := uuid.New().String() + + // Create user with backup role (allows mongodump) + adminDB := client.Database(m.AuthDatabase) + err = adminDB.RunCommand(ctx, bson.D{ + {Key: "createUser", Value: newUsername}, + {Key: "pwd", Value: newPassword}, + {Key: "roles", Value: bson.A{ + bson.D{ + {Key: "role", Value: "backup"}, + {Key: "db", Value: "admin"}, + }, + bson.D{ + {Key: "role", Value: "read"}, + {Key: "db", Value: m.Database}, + }, + }}, + }).Err() + + if err != nil { + if attempt < maxRetries-1 { + continue + } + return "", "", fmt.Errorf("failed to create user: %w", err) + } + + logger.Info( + "Read-only MongoDB user created successfully", + "username", newUsername, + ) + return newUsername, newPassword, nil + } + + return "", "", errors.New("failed to generate unique username after 3 attempts") +} + +// buildConnectionURI builds a MongoDB connection URI +func (m *MongodbDatabase) buildConnectionURI(password string) string { + authDB := m.AuthDatabase + if authDB == "" { + authDB = "admin" + } + + tlsOption := "false" + if m.UseTLS { + tlsOption = "true" + } + + return fmt.Sprintf( + "mongodb://%s:%s@%s:%d/%s?authSource=%s&tls=%s&connectTimeoutMS=15000", + m.Username, + password, + m.Host, + m.Port, + m.Database, + authDB, + tlsOption, + ) +} + +// BuildMongodumpURI builds a URI suitable for mongodump (without database in path) +func (m *MongodbDatabase) BuildMongodumpURI(password string) string { + authDB := m.AuthDatabase + if authDB == "" { + authDB = "admin" + } + + tlsOption := "false" + if m.UseTLS { + tlsOption = "true" + } + + return fmt.Sprintf( + "mongodb://%s:%s@%s:%d/?authSource=%s&tls=%s&connectTimeoutMS=15000", + m.Username, + password, + m.Host, + m.Port, + authDB, + tlsOption, + ) +} + +// detectMongodbVersion gets MongoDB server version from buildInfo command +func detectMongodbVersion(ctx context.Context, client *mongo.Client) (tools.MongodbVersion, error) { + adminDB := client.Database("admin") + var result bson.M + err := adminDB.RunCommand(ctx, bson.D{{Key: "buildInfo", Value: 1}}).Decode(&result) + if err != nil { + return "", fmt.Errorf("failed to get MongoDB version: %w", err) + } + + versionStr, ok := result["version"].(string) + if !ok { + return "", errors.New("could not parse MongoDB version from buildInfo") + } + + // Parse version string (e.g., "7.0.14", "8.0.3", "4.4.29") + re := regexp.MustCompile(`^(\d+)\.(\d+)`) + matches := re.FindStringSubmatch(versionStr) + if len(matches) < 3 { + return "", fmt.Errorf("could not parse MongoDB version: %s", versionStr) + } + + major := matches[1] + minor := matches[2] + versionKey := fmt.Sprintf("%s.%s", major, minor) + + switch versionKey { + case "4.0": + return tools.MongodbVersion40, nil + case "4.2": + return tools.MongodbVersion42, nil + case "4.4": + return tools.MongodbVersion44, nil + case "5.0": + return tools.MongodbVersion50, nil + case "6.0": + return tools.MongodbVersion60, nil + case "7.0": + return tools.MongodbVersion70, nil + case "8.0": + return tools.MongodbVersion80, nil + default: + return "", fmt.Errorf("unsupported MongoDB version: %s (supported: 4.0, 4.2, 4.4, 5.0, 6.0, 7.0, 8.0)", versionKey) + } +} + +func decryptPasswordIfNeeded( + password string, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) (string, error) { + if encryptor == nil { + return password, nil + } + return encryptor.Decrypt(databaseID, password) +} +``` + +--- + +## Backup Implementation + +### `backend/internal/features/backups/backups/usecases/mongodb/create_backup_uc.go` + +```go +package usecases_mongodb + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "log/slog" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/google/uuid" + + "postgresus-backend/internal/config" + backup_encryption "postgresus-backend/internal/features/backups/backups/encryption" + usecases_common "postgresus-backend/internal/features/backups/backups/usecases/common" + backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/databases" + mongodbtypes "postgresus-backend/internal/features/databases/databases/mongodb" + encryption_secrets "postgresus-backend/internal/features/encryption/secrets" + "postgresus-backend/internal/features/storages" + "postgresus-backend/internal/util/encryption" + "postgresus-backend/internal/util/tools" +) + +const ( + backupTimeout = 23 * time.Hour + shutdownCheckInterval = 1 * time.Second + copyBufferSize = 8 * 1024 * 1024 + progressReportIntervalMB = 1.0 +) + +type CreateMongodbBackupUsecase struct { + logger *slog.Logger + secretKeyService *encryption_secrets.SecretKeyService + fieldEncryptor encryption.FieldEncryptor +} + +func (uc *CreateMongodbBackupUsecase) Execute( + ctx context.Context, + backupID uuid.UUID, + backupConfig *backups_config.BackupConfig, + db *databases.Database, + storage *storages.Storage, + backupProgressListener func(completedMBs float64), +) (*usecases_common.BackupMetadata, error) { + uc.logger.Info( + "Creating MongoDB backup via mongodump", + "databaseId", db.ID, + "storageId", storage.ID, + ) + + if !backupConfig.IsBackupsEnabled { + return nil, fmt.Errorf("backups are not enabled for this database: \"%s\"", db.Name) + } + + mdb := db.Mongodb + if mdb == nil { + return nil, fmt.Errorf("mongodb database configuration is required") + } + + if mdb.Database == "" { + return nil, fmt.Errorf("database name is required for mongodump backups") + } + + decryptedPassword, err := uc.fieldEncryptor.Decrypt(db.ID, mdb.Password) + if err != nil { + return nil, fmt.Errorf("failed to decrypt database password: %w", err) + } + + args := uc.buildMongodumpArgs(mdb, decryptedPassword) + + return uc.streamToStorage( + ctx, + backupID, + backupConfig, + tools.GetMongodbExecutable( + tools.MongodbExecutableMongodump, + config.GetEnv().EnvMode, + config.GetEnv().MongodbInstallDir, + ), + args, + storage, + backupProgressListener, + ) +} + +func (uc *CreateMongodbBackupUsecase) buildMongodumpArgs( + mdb *mongodbtypes.MongodbDatabase, + password string, +) []string { + uri := mdb.BuildMongodumpURI(password) + + args := []string{ + "--uri=" + uri, + "--db=" + mdb.Database, + "--archive", // Output to stdout as archive + "--gzip", // Built-in gzip compression + } + + return args +} + +func (uc *CreateMongodbBackupUsecase) streamToStorage( + parentCtx context.Context, + backupID uuid.UUID, + backupConfig *backups_config.BackupConfig, + mongodumpBin string, + args []string, + storage *storages.Storage, + backupProgressListener func(completedMBs float64), +) (*usecases_common.BackupMetadata, error) { + uc.logger.Info("Streaming MongoDB backup to storage", "mongodumpBin", mongodumpBin) + + ctx, cancel := uc.createBackupContext(parentCtx) + defer cancel() + + cmd := exec.CommandContext(ctx, mongodumpBin, args...) + + // Log command without password + safeArgs := make([]string, len(args)) + for i, arg := range args { + if len(arg) > 6 && arg[:6] == "--uri=" { + safeArgs[i] = "--uri=mongodb://***:***@***" + } else { + safeArgs[i] = arg + } + } + uc.logger.Info("Executing MongoDB backup command", "command", mongodumpBin, "args", safeArgs) + + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, + "LC_ALL=C.UTF-8", + "LANG=C.UTF-8", + ) + + pgStdout, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("stdout pipe: %w", err) + } + + pgStderr, err := cmd.StderrPipe() + if err != nil { + return nil, fmt.Errorf("stderr pipe: %w", err) + } + + stderrCh := make(chan []byte, 1) + go func() { + stderrOutput, _ := io.ReadAll(pgStderr) + stderrCh <- stderrOutput + }() + + storageReader, storageWriter := io.Pipe() + + // Setup encryption if enabled + // Note: mongodump --gzip already compresses, so we don't add another compression layer + finalWriter, encryptionWriter, backupMetadata, err := uc.setupBackupEncryption( + backupID, + backupConfig, + storageWriter, + ) + if err != nil { + return nil, err + } + + countingWriter := usecases_common.NewCountingWriter(finalWriter) + + saveErrCh := make(chan error, 1) + go func() { + saveErr := storage.SaveFile(ctx, uc.fieldEncryptor, uc.logger, backupID, storageReader) + saveErrCh <- saveErr + }() + + if err = cmd.Start(); err != nil { + return nil, fmt.Errorf("start %s: %w", filepath.Base(mongodumpBin), err) + } + + copyResultCh := make(chan error, 1) + bytesWrittenCh := make(chan int64, 1) + go func() { + bytesWritten, copyErr := uc.copyWithShutdownCheck( + ctx, + countingWriter, + pgStdout, + backupProgressListener, + ) + bytesWrittenCh <- bytesWritten + copyResultCh <- copyErr + }() + + copyErr := <-copyResultCh + bytesWritten := <-bytesWrittenCh + waitErr := cmd.Wait() + + select { + case <-ctx.Done(): + uc.cleanupOnCancellation(encryptionWriter, storageWriter, saveErrCh) + return nil, uc.checkCancellationReason() + default: + } + + if err := uc.closeWriters(encryptionWriter, storageWriter); err != nil { + <-saveErrCh + return nil, err + } + + saveErr := <-saveErrCh + stderrOutput := <-stderrCh + + if waitErr == nil && copyErr == nil && saveErr == nil && backupProgressListener != nil { + sizeMB := float64(bytesWritten) / (1024 * 1024) + backupProgressListener(sizeMB) + } + + switch { + case waitErr != nil: + return nil, uc.buildMongodumpErrorMessage(waitErr, stderrOutput, mongodumpBin) + case copyErr != nil: + return nil, fmt.Errorf("copy to storage: %w", copyErr) + case saveErr != nil: + return nil, fmt.Errorf("save to storage: %w", saveErr) + } + + return &backupMetadata, nil +} + +func (uc *CreateMongodbBackupUsecase) createBackupContext( + parentCtx context.Context, +) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithTimeout(parentCtx, backupTimeout) + + go func() { + ticker := time.NewTicker(shutdownCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if config.IsShouldShutdown() { + cancel() + return + } + } + } + }() + + return ctx, cancel +} + +func (uc *CreateMongodbBackupUsecase) setupBackupEncryption( + backupID uuid.UUID, + backupConfig *backups_config.BackupConfig, + storageWriter io.WriteCloser, +) (io.Writer, *backup_encryption.EncryptionWriter, usecases_common.BackupMetadata, error) { + backupMetadata := usecases_common.BackupMetadata{ + Encryption: backups_config.BackupEncryptionNone, + } + + if backupConfig.Encryption != backups_config.BackupEncryptionEncrypted { + return storageWriter, nil, backupMetadata, nil + } + + secretKey, err := uc.secretKeyService.GetOrCreateSecretKey(backupConfig.ProjectID) + if err != nil { + return nil, nil, backupMetadata, fmt.Errorf("failed to get secret key: %w", err) + } + + encryptionWriter, salt, nonce, err := backup_encryption.NewEncryptionWriter( + storageWriter, + secretKey, + ) + if err != nil { + return nil, nil, backupMetadata, fmt.Errorf("failed to create encryption writer: %w", err) + } + + saltBase64 := base64.StdEncoding.EncodeToString(salt) + nonceBase64 := base64.StdEncoding.EncodeToString(nonce) + + backupMetadata.Encryption = backups_config.BackupEncryptionEncrypted + backupMetadata.EncryptionSalt = &saltBase64 + backupMetadata.EncryptionIV = &nonceBase64 + + return encryptionWriter, encryptionWriter, backupMetadata, nil +} + +func (uc *CreateMongodbBackupUsecase) copyWithShutdownCheck( + ctx context.Context, + dst io.Writer, + src io.Reader, + backupProgressListener func(completedMBs float64), +) (int64, error) { + buf := make([]byte, copyBufferSize) + var totalWritten int64 + var lastReportedMB float64 + + for { + select { + case <-ctx.Done(): + return totalWritten, ctx.Err() + default: + } + + if config.IsShouldShutdown() { + return totalWritten, errors.New("shutdown requested") + } + + nr, readErr := src.Read(buf) + if nr > 0 { + nw, writeErr := dst.Write(buf[:nr]) + if nw > 0 { + totalWritten += int64(nw) + + if backupProgressListener != nil { + currentMB := float64(totalWritten) / (1024 * 1024) + if currentMB-lastReportedMB >= progressReportIntervalMB { + backupProgressListener(currentMB) + lastReportedMB = currentMB + } + } + } + if writeErr != nil { + return totalWritten, writeErr + } + if nr != nw { + return totalWritten, io.ErrShortWrite + } + } + if readErr != nil { + if readErr == io.EOF { + return totalWritten, nil + } + return totalWritten, readErr + } + } +} + +func (uc *CreateMongodbBackupUsecase) cleanupOnCancellation( + encryptionWriter *backup_encryption.EncryptionWriter, + storageWriter *io.PipeWriter, + saveErrCh chan error, +) { + if encryptionWriter != nil { + _ = encryptionWriter.Close() + } + _ = storageWriter.CloseWithError(errors.New("backup cancelled")) + <-saveErrCh +} + +func (uc *CreateMongodbBackupUsecase) closeWriters( + encryptionWriter *backup_encryption.EncryptionWriter, + storageWriter *io.PipeWriter, +) error { + if encryptionWriter != nil { + if err := encryptionWriter.Close(); err != nil { + uc.logger.Error("Failed to close encryption writer", "error", err) + return fmt.Errorf("failed to close encryption writer: %w", err) + } + } + if err := storageWriter.Close(); err != nil { + uc.logger.Error("Failed to close storage writer", "error", err) + return fmt.Errorf("failed to close storage writer: %w", err) + } + return nil +} + +func (uc *CreateMongodbBackupUsecase) checkCancellationReason() error { + if config.IsShouldShutdown() { + return errors.New("backup cancelled due to shutdown") + } + return errors.New("backup cancelled due to timeout") +} + +func (uc *CreateMongodbBackupUsecase) buildMongodumpErrorMessage( + waitErr error, + stderrOutput []byte, + mongodumpBin string, +) error { + stderrStr := string(stderrOutput) + + if len(stderrStr) > 0 { + return fmt.Errorf("%s failed: %w\nstderr: %s", filepath.Base(mongodumpBin), waitErr, stderrStr) + } + + return fmt.Errorf("%s failed: %w", filepath.Base(mongodumpBin), waitErr) +} +``` + +### `backend/internal/features/backups/backups/usecases/mongodb/di.go` + +```go +package usecases_mongodb + +import ( + encryption_secrets "postgresus-backend/internal/features/encryption/secrets" + "postgresus-backend/internal/util/encryption" + "postgresus-backend/internal/util/logger" +) + +var createMongodbBackupUsecase *CreateMongodbBackupUsecase + +func init() { + createMongodbBackupUsecase = &CreateMongodbBackupUsecase{ + logger: logger.GetLogger(), + secretKeyService: encryption_secrets.GetSecretKeyService(), + fieldEncryptor: encryption.GetFieldEncryptor(), + } +} + +func GetCreateMongodbBackupUsecase() *CreateMongodbBackupUsecase { + return createMongodbBackupUsecase +} +``` + +--- + +## Restore Implementation + +### `backend/internal/features/restores/usecases/mongodb/restore_backup_uc.go` + +```go +package usecases_mongodb + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "log/slog" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/google/uuid" + + "postgresus-backend/internal/config" + "postgresus-backend/internal/features/backups/backups" + "postgresus-backend/internal/features/backups/backups/encryption" + backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/databases" + mongodbtypes "postgresus-backend/internal/features/databases/databases/mongodb" + encryption_secrets "postgresus-backend/internal/features/encryption/secrets" + "postgresus-backend/internal/features/restores/models" + "postgresus-backend/internal/features/storages" + util_encryption "postgresus-backend/internal/util/encryption" + files_utils "postgresus-backend/internal/util/files" + "postgresus-backend/internal/util/tools" +) + +const ( + restoreTimeout = 60 * time.Minute +) + +type RestoreMongodbBackupUsecase struct { + logger *slog.Logger + secretKeyService *encryption_secrets.SecretKeyService + fieldEncryptor util_encryption.FieldEncryptor +} + +func (uc *RestoreMongodbBackupUsecase) Execute( + originalDB *databases.Database, + restoringToDB *databases.Database, + backupConfig *backups_config.BackupConfig, + restore models.Restore, + backup *backups.Backup, + storage *storages.Storage, +) error { + if originalDB.Type != databases.DatabaseTypeMongodb { + return errors.New("database type not supported") + } + + uc.logger.Info( + "Restoring MongoDB backup via mongorestore", + "restoreId", restore.ID, + "backupId", backup.ID, + ) + + mdb := restoringToDB.Mongodb + if mdb == nil { + return fmt.Errorf("mongodb configuration is required for restore") + } + + if mdb.Database == "" { + return fmt.Errorf("target database name is required for mongorestore") + } + + decryptedPassword, err := uc.fieldEncryptor.Decrypt(restoringToDB.ID, mdb.Password) + if err != nil { + return fmt.Errorf("failed to decrypt password: %w", err) + } + + args := uc.buildMongorestoreArgs(mdb, decryptedPassword, originalDB.Mongodb.Database) + + return uc.restoreFromStorage( + originalDB, + tools.GetMongodbExecutable( + tools.MongodbExecutableMongorestore, + config.GetEnv().EnvMode, + config.GetEnv().MongodbInstallDir, + ), + args, + backup, + storage, + ) +} + +func (uc *RestoreMongodbBackupUsecase) buildMongorestoreArgs( + mdb *mongodbtypes.MongodbDatabase, + password string, + sourceDatabase string, +) []string { + uri := mdb.BuildMongodumpURI(password) + + args := []string{ + "--uri=" + uri, + "--archive", // Read from stdin as archive + "--gzip", // Input is gzip compressed + "--nsFrom=" + sourceDatabase + ".*", + "--nsTo=" + mdb.Database + ".*", + "--drop", // Drop existing collections before restore + } + + return args +} + +func (uc *RestoreMongodbBackupUsecase) restoreFromStorage( + database *databases.Database, + mongorestoreBin string, + args []string, + backup *backups.Backup, + storage *storages.Storage, +) error { + ctx, cancel := context.WithTimeout(context.Background(), restoreTimeout) + defer cancel() + + // Monitor for shutdown + go func() { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if config.IsShouldShutdown() { + cancel() + return + } + } + } + }() + + // Download backup to temp file + tempBackupFile, cleanupFunc, err := uc.downloadBackupToTempFile(ctx, backup, storage) + if err != nil { + return fmt.Errorf("failed to download backup: %w", err) + } + defer cleanupFunc() + + return uc.executeMongoRestore(ctx, mongorestoreBin, args, tempBackupFile, backup) +} + +func (uc *RestoreMongodbBackupUsecase) executeMongoRestore( + ctx context.Context, + mongorestoreBin string, + args []string, + backupFile string, + backup *backups.Backup, +) error { + cmd := exec.CommandContext(ctx, mongorestoreBin, args...) + + // Log command without password + safeArgs := make([]string, len(args)) + for i, arg := range args { + if len(arg) > 6 && arg[:6] == "--uri=" { + safeArgs[i] = "--uri=mongodb://***:***@***" + } else { + safeArgs[i] = arg + } + } + uc.logger.Info("Executing MongoDB restore command", "command", mongorestoreBin, "args", safeArgs) + + // Open backup file + backupFileHandle, err := os.Open(backupFile) + if err != nil { + return fmt.Errorf("failed to open backup file: %w", err) + } + defer func() { _ = backupFileHandle.Close() }() + + var inputReader io.Reader = backupFileHandle + + // Decrypt if needed + if backup.Encryption == backups_config.BackupEncryptionEncrypted { + decryptReader, err := uc.setupDecryption(backupFileHandle, backup) + if err != nil { + return fmt.Errorf("failed to setup decryption: %w", err) + } + inputReader = decryptReader + } + + // Note: mongorestore expects gzip-compressed archive, which is already the format + // No additional decompression needed since --gzip flag handles it + + cmd.Stdin = inputReader + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "LC_ALL=C.UTF-8", "LANG=C.UTF-8") + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return fmt.Errorf("stderr pipe: %w", err) + } + + stderrCh := make(chan []byte, 1) + go func() { + output, _ := io.ReadAll(stderrPipe) + stderrCh <- output + }() + + if err = cmd.Start(); err != nil { + return fmt.Errorf("start mongorestore: %w", err) + } + + waitErr := cmd.Wait() + stderrOutput := <-stderrCh + + if config.IsShouldShutdown() { + return fmt.Errorf("restore cancelled due to shutdown") + } + + if waitErr != nil { + return uc.handleMongoRestoreError(waitErr, stderrOutput, mongorestoreBin) + } + + return nil +} + +func (uc *RestoreMongodbBackupUsecase) downloadBackupToTempFile( + ctx context.Context, + backup *backups.Backup, + storage *storages.Storage, +) (string, func(), error) { + tempDir := config.GetEnv().TempFolder + tempFile := filepath.Join(tempDir, fmt.Sprintf("restore-%s.archive.gz", backup.ID.String())) + + reader, err := storage.GetFile(ctx, uc.fieldEncryptor, uc.logger, backup.ID) + if err != nil { + return "", nil, fmt.Errorf("failed to get backup from storage: %w", err) + } + defer func() { _ = reader.Close() }() + + file, err := os.Create(tempFile) + if err != nil { + return "", nil, fmt.Errorf("failed to create temp file: %w", err) + } + + _, err = io.Copy(file, reader) + if err != nil { + _ = file.Close() + _ = os.Remove(tempFile) + return "", nil, fmt.Errorf("failed to write backup to temp file: %w", err) + } + + if err := file.Close(); err != nil { + _ = os.Remove(tempFile) + return "", nil, fmt.Errorf("failed to close temp file: %w", err) + } + + cleanup := func() { + if err := os.Remove(tempFile); err != nil { + uc.logger.Warn("Failed to remove temp backup file", "file", tempFile, "error", err) + } + } + + return tempFile, cleanup, nil +} + +func (uc *RestoreMongodbBackupUsecase) setupDecryption( + reader io.Reader, + backup *backups.Backup, +) (io.Reader, error) { + if backup.EncryptionSalt == nil || backup.EncryptionIV == nil { + return nil, errors.New("encrypted backup missing salt or IV") + } + + salt, err := base64.StdEncoding.DecodeString(*backup.EncryptionSalt) + if err != nil { + return nil, fmt.Errorf("failed to decode encryption salt: %w", err) + } + + nonce, err := base64.StdEncoding.DecodeString(*backup.EncryptionIV) + if err != nil { + return nil, fmt.Errorf("failed to decode encryption IV: %w", err) + } + + secretKey, err := uc.secretKeyService.GetSecretKey(backup.ProjectID) + if err != nil { + return nil, fmt.Errorf("failed to get secret key: %w", err) + } + + decryptReader, err := encryption.NewDecryptionReader(reader, secretKey, salt, nonce) + if err != nil { + return nil, fmt.Errorf("failed to create decryption reader: %w", err) + } + + return decryptReader, nil +} + +func (uc *RestoreMongodbBackupUsecase) handleMongoRestoreError( + waitErr error, + stderrOutput []byte, + mongorestoreBin string, +) error { + stderrStr := string(stderrOutput) + + if len(stderrStr) > 0 { + return fmt.Errorf("%s failed: %w\nstderr: %s", filepath.Base(mongorestoreBin), waitErr, stderrStr) + } + + return fmt.Errorf("%s failed: %w", filepath.Base(mongorestoreBin), waitErr) +} +``` + +### `backend/internal/features/restores/usecases/mongodb/di.go` + +```go +package usecases_mongodb + +import ( + encryption_secrets "postgresus-backend/internal/features/encryption/secrets" + "postgresus-backend/internal/util/encryption" + "postgresus-backend/internal/util/logger" +) + +var restoreMongodbBackupUsecase *RestoreMongodbBackupUsecase + +func init() { + restoreMongodbBackupUsecase = &RestoreMongodbBackupUsecase{ + logger: logger.GetLogger(), + secretKeyService: encryption_secrets.GetSecretKeyService(), + fieldEncryptor: encryption.GetFieldEncryptor(), + } +} + +func GetRestoreMongodbBackupUsecase() *RestoreMongodbBackupUsecase { + return restoreMongodbBackupUsecase +} +``` + +--- + +## Migration Script + +### `backend/migrations/XXXXXX_add_mongodb_databases_table.up.sql` + +```sql +CREATE TABLE mongodb_databases ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + database_id UUID REFERENCES databases(id) ON DELETE CASCADE, + version TEXT NOT NULL, + host TEXT NOT NULL, + port INT NOT NULL, + username TEXT NOT NULL, + password TEXT NOT NULL, + database TEXT NOT NULL, + auth_database TEXT NOT NULL DEFAULT 'admin', + use_tls BOOLEAN NOT NULL DEFAULT FALSE +); + +CREATE INDEX idx_mongodb_databases_database_id ON mongodb_databases(database_id); +``` + +### `backend/migrations/XXXXXX_add_mongodb_databases_table.down.sql` + +```sql +DROP INDEX IF EXISTS idx_mongodb_databases_database_id; +DROP TABLE IF EXISTS mongodb_databases; +``` + +--- + +## Model Updates + +### `backend/internal/features/databases/model.go` changes + +Add to Database struct: + +```go +type Database struct { + // ... existing fields ... + + Postgresql *postgresql.PostgresqlDatabase `json:"postgresql,omitempty" gorm:"foreignKey:DatabaseID"` + Mysql *mysql.MysqlDatabase `json:"mysql,omitempty" gorm:"foreignKey:DatabaseID"` + Mariadb *mariadb.MariadbDatabase `json:"mariadb,omitempty" gorm:"foreignKey:DatabaseID"` + Mongodb *mongodb.MongodbDatabase `json:"mongodb,omitempty" gorm:"foreignKey:DatabaseID"` + + // ... rest of fields ... +} +``` + +Update methods to handle MongoDB case: + +```go +func (d *Database) Validate() error { + // ... existing code ... + case DatabaseTypeMongodb: + if d.Mongodb == nil { + return errors.New("mongodb database is required") + } + return d.Mongodb.Validate() + // ... +} + +func (d *Database) TestConnection(...) error { + // ... add Mongodb case +} + +func (d *Database) EncryptSensitiveFields(...) error { + // ... add Mongodb case +} + +func (d *Database) PopulateVersionIfEmpty(...) error { + // ... add Mongodb case +} + +func (d *Database) Update(incoming *Database) { + // ... add Mongodb case +} + +func (d *Database) HideSensitiveData() { + // ... add Mongodb case +} +``` + +--- + +## Service Updates + +### `backend/internal/features/databases/service.go` - CopyDatabase + +Add MongoDB case: + +```go +case DatabaseTypeMongodb: + if existingDatabase.Mongodb != nil { + newDatabase.Mongodb = &mongodb.MongodbDatabase{ + ID: uuid.Nil, + DatabaseID: nil, + Version: existingDatabase.Mongodb.Version, + Host: existingDatabase.Mongodb.Host, + Port: existingDatabase.Mongodb.Port, + Username: existingDatabase.Mongodb.Username, + Password: existingDatabase.Mongodb.Password, + Database: existingDatabase.Mongodb.Database, + AuthDatabase: existingDatabase.Mongodb.AuthDatabase, + UseTLS: existingDatabase.Mongodb.UseTLS, + } + } +``` + +### `backend/internal/features/databases/service.go` - IsUserReadOnly + +Add MongoDB case: + +```go +case DatabaseTypeMongodb: + return usingDatabase.Mongodb.IsUserReadOnly( + ctx, s.logger, s.fieldEncryptor, usingDatabase.ID, + ) +``` + +### `backend/internal/features/databases/service.go` - CreateReadOnlyUser + +Add MongoDB case: + +```go +case DatabaseTypeMongodb: + username, password, err = usingDatabase.Mongodb.CreateReadOnlyUser( + ctx, s.logger, s.fieldEncryptor, usingDatabase.ID, + ) +``` + +--- + +## Repository Updates + +### `backend/internal/features/databases/repository.go` + +Add MongoDB preload and handling: + +```go +func (r *DatabaseRepository) FindByID(id uuid.UUID) (*Database, error) { + // ... existing code ... + Preload("Mongodb"). + // ... +} + +func (r *DatabaseRepository) Save(database *Database) (*Database, error) { + // ... add case for DatabaseTypeMongodb + case DatabaseTypeMongodb: + if database.Mongodb == nil { + return errors.New("mongodb configuration is required for MongoDB database") + } + database.Mongodb.DatabaseID = &database.ID + // ... +} + +func (r *DatabaseRepository) Delete(id uuid.UUID) error { + // ... add case for DatabaseTypeMongodb + case DatabaseTypeMongodb: + if err := tx. + Where("database_id = ?", id). + Delete(&mongodb.MongodbDatabase{}).Error; err != nil { + return err + } + // ... +} +``` + +--- + +## Restore Service Updates + +### `backend/internal/features/restores/dto.go` + +```go +type RestoreBackupRequest struct { + PostgresqlDatabase *postgresql.PostgresqlDatabase `json:"postgresql,omitempty"` + MysqlDatabase *mysql.MysqlDatabase `json:"mysql,omitempty"` + MariadbDatabase *mariadb.MariadbDatabase `json:"mariadb,omitempty"` + MongodbDatabase *mongodb.MongodbDatabase `json:"mongodb,omitempty"` +} +``` + +### `backend/internal/features/restores/service.go` + +Add version validation for MongoDB: + +```go +func (s *RestoreService) validateVersionCompatibility(...) error { + // ... existing cases ... + case databases.DatabaseTypeMongodb: + if requestDTO.MongodbDatabase == nil { + return errors.New("mongodb database configuration is required for restore") + } + if tools.IsMongodbBackupVersionHigherThanRestoreVersion( + backupDatabase.Mongodb.Version, + requestDTO.MongodbDatabase.Version, + ) { + return errors.New(`backup database version is higher than restore database version. ` + + `Should be restored to the same version as the backup database or higher. ` + + `For example, you can restore MongoDB 6.0 backup to MongoDB 6.0, 7.0 or 8.0. But cannot restore to 5.0`) + } + // ... +} +``` + +Add MongoDB restore handling: + +```go +func (s *RestoreService) RestoreBackup(...) error { + // ... existing code ... + case databases.DatabaseTypeMongodb: + if requestDTO.MongodbDatabase == nil { + return errors.New("mongodb database is required") + } + // ... +} +``` + +--- + +## Healthcheck Updates + +### `backend/internal/features/healthcheck/attempt/check_database_health_uc.go` + +Add MongoDB case to `validateDatabase()`: + +```go +func (uc *CheckDatabaseHealthUseCase) validateDatabase( + database *databases.Database, +) error { + switch database.Type { + case databases.DatabaseTypePostgres: + if database.Postgresql == nil { + return fmt.Errorf("database Postgresql config is not set") + } + case databases.DatabaseTypeMysql: + if database.Mysql == nil { + return fmt.Errorf("database MySQL config is not set") + } + case databases.DatabaseTypeMariadb: + if database.Mariadb == nil { + return fmt.Errorf("database MariaDB config is not set") + } + case databases.DatabaseTypeMongodb: + if database.Mongodb == nil { + return fmt.Errorf("database MongoDB config is not set") + } + default: + return fmt.Errorf("unsupported database type: %s", database.Type) + } + + return nil +} +``` + +--- + +## Config Updates + +### `backend/internal/config/config.go` + +```go +type EnvVariables struct { + // ... existing fields ... + + MongodbInstallDir string `env:"MONGODB_INSTALL_DIR"` + + // Testing MongoDB + TestMongodb40Port string `env:"TEST_MONGODB_40_PORT"` + TestMongodb42Port string `env:"TEST_MONGODB_42_PORT"` + TestMongodb44Port string `env:"TEST_MONGODB_44_PORT"` + TestMongodb50Port string `env:"TEST_MONGODB_50_PORT"` + TestMongodb60Port string `env:"TEST_MONGODB_60_PORT"` + TestMongodb70Port string `env:"TEST_MONGODB_70_PORT"` + TestMongodb80Port string `env:"TEST_MONGODB_80_PORT"` +} + +// In loadEnvVariables(): +env.MongodbInstallDir = filepath.Join(backendRoot, "tools", "mongodb") +tools.VerifyMongodbInstallation(log, env.EnvMode, env.MongodbInstallDir) + +if env.IsTesting { + // ... existing checks ... + if env.TestMongodb40Port == "" { + log.Error("TEST_MONGODB_40_PORT is empty") + os.Exit(1) + } + // ... similar for other MongoDB ports (4.2, 4.4, 5.0, 6.0, 7.0, 8.0) +} +``` + +--- + +## Docker Compose for Testing + +### `backend/docker-compose.yml.example` additions + +```yaml +services: + # ... existing services ... + + test-mongodb-40: + image: mongo:4.0 + container_name: test-mongodb-40 + ports: + - "${TEST_MONGODB_40_PORT:-27040}:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: rootpassword + MONGO_INITDB_DATABASE: testdb + command: mongod --auth + healthcheck: + test: ["CMD", "mongo", "--eval", "db.adminCommand('ping')"] + interval: 5s + timeout: 5s + retries: 10 + + test-mongodb-42: + image: mongo:4.2 + container_name: test-mongodb-42 + ports: + - "${TEST_MONGODB_42_PORT:-27042}:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: rootpassword + MONGO_INITDB_DATABASE: testdb + command: mongod --auth + healthcheck: + test: ["CMD", "mongo", "--eval", "db.adminCommand('ping')"] + interval: 5s + timeout: 5s + retries: 10 + + test-mongodb-44: + image: mongo:4.4 + container_name: test-mongodb-44 + ports: + - "${TEST_MONGODB_44_PORT:-27044}:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: rootpassword + MONGO_INITDB_DATABASE: testdb + command: mongod --auth + healthcheck: + test: ["CMD", "mongo", "--eval", "db.adminCommand('ping')"] + interval: 5s + timeout: 5s + retries: 10 + + test-mongodb-50: + image: mongo:5.0 + container_name: test-mongodb-50 + ports: + - "${TEST_MONGODB_50_PORT:-27050}:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: rootpassword + MONGO_INITDB_DATABASE: testdb + command: mongod --auth + healthcheck: + test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"] + interval: 5s + timeout: 5s + retries: 10 + + test-mongodb-60: + image: mongo:6.0 + container_name: test-mongodb-60 + ports: + - "${TEST_MONGODB_60_PORT:-27060}:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: rootpassword + MONGO_INITDB_DATABASE: testdb + command: mongod --auth + healthcheck: + test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"] + interval: 5s + timeout: 5s + retries: 10 + + test-mongodb-70: + image: mongo:7.0 + container_name: test-mongodb-70 + ports: + - "${TEST_MONGODB_70_PORT:-27070}:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: rootpassword + MONGO_INITDB_DATABASE: testdb + command: mongod --auth + healthcheck: + test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"] + interval: 5s + timeout: 5s + retries: 10 + + test-mongodb-80: + image: mongo:8.0 + container_name: test-mongodb-80 + ports: + - "${TEST_MONGODB_80_PORT:-27080}:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: rootpassword + MONGO_INITDB_DATABASE: testdb + command: mongod --auth + healthcheck: + test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"] + interval: 5s + timeout: 5s + retries: 10 +``` + +--- + +## Dockerfile Updates + +Add to the runtime stage: + +```dockerfile +# Install MongoDB Database Tools (single latest version - backward compatible) +# MongoDB Database Tools support all server versions (4.0-8.0) +RUN apt-get update && apt-get install -y --no-install-recommends \ + wget gnupg && \ + # Download MongoDB Database Tools + wget -q https://fastdl.mongodb.org/tools/db/mongodb-database-tools-debian12-x86_64-100.10.0.deb && \ + dpkg -i mongodb-database-tools-debian12-x86_64-100.10.0.deb && \ + rm mongodb-database-tools-debian12-x86_64-100.10.0.deb && \ + # Create symlinks to standard location + mkdir -p /usr/local/mongodb-database-tools/bin && \ + ln -sf /usr/bin/mongodump /usr/local/mongodb-database-tools/bin/mongodump && \ + ln -sf /usr/bin/mongorestore /usr/local/mongodb-database-tools/bin/mongorestore && \ + rm -rf /var/lib/apt/lists/* +``` + +--- + +## CI/CD Updates + +### `.github/workflows/ci-release.yml` additions + +```yaml +# In .env file creation: +# testing MongoDB +TEST_MONGODB_40_PORT=27040 +TEST_MONGODB_42_PORT=27042 +TEST_MONGODB_44_PORT=27044 +TEST_MONGODB_50_PORT=27050 +TEST_MONGODB_60_PORT=27060 +TEST_MONGODB_70_PORT=27070 +TEST_MONGODB_80_PORT=27080 + +# In "Wait for containers to be ready": +# Wait for MongoDB containers +echo "Waiting for MongoDB 4.0..." +timeout 120 bash -c 'until docker exec test-mongodb-40 mongo --eval "db.adminCommand(\"ping\")" -u root -p rootpassword --authenticationDatabase admin 2>/dev/null; do sleep 2; done' +echo "Waiting for MongoDB 4.2..." +timeout 120 bash -c 'until docker exec test-mongodb-42 mongo --eval "db.adminCommand(\"ping\")" -u root -p rootpassword --authenticationDatabase admin 2>/dev/null; do sleep 2; done' +echo "Waiting for MongoDB 4.4..." +timeout 120 bash -c 'until docker exec test-mongodb-44 mongo --eval "db.adminCommand(\"ping\")" -u root -p rootpassword --authenticationDatabase admin 2>/dev/null; do sleep 2; done' +echo "Waiting for MongoDB 5.0..." +timeout 120 bash -c 'until docker exec test-mongodb-50 mongosh --eval "db.adminCommand(\"ping\")" -u root -p rootpassword --authenticationDatabase admin 2>/dev/null; do sleep 2; done' +echo "Waiting for MongoDB 6.0..." +timeout 120 bash -c 'until docker exec test-mongodb-60 mongosh --eval "db.adminCommand(\"ping\")" -u root -p rootpassword --authenticationDatabase admin 2>/dev/null; do sleep 2; done' +echo "Waiting for MongoDB 7.0..." +timeout 120 bash -c 'until docker exec test-mongodb-70 mongosh --eval "db.adminCommand(\"ping\")" -u root -p rootpassword --authenticationDatabase admin 2>/dev/null; do sleep 2; done' +echo "Waiting for MongoDB 8.0..." +timeout 120 bash -c 'until docker exec test-mongodb-80 mongosh --eval "db.adminCommand(\"ping\")" -u root -p rootpassword --authenticationDatabase admin 2>/dev/null; do sleep 2; done' + +# Cache MongoDB Database Tools +- name: Cache MongoDB Database Tools + id: cache-mongodb + uses: actions/cache@v4 + with: + path: backend/tools/mongodb + key: mongodb-database-tools-100.10.0-v1 +``` + +--- + +## Download Scripts Updates + +### `backend/tools/download_linux.sh` additions + +```bash +# ========== MongoDB Installation ========== +echo "========================================" +echo "Installing MongoDB Database Tools (single latest version)..." +echo "========================================" + +MONGODB_DIR="$(pwd)/mongodb" +mkdir -p "$MONGODB_DIR/bin" + +# MongoDB Database Tools are backward compatible - single version supports all servers +MONGODB_TOOLS_URL="https://fastdl.mongodb.org/tools/db/mongodb-database-tools-debian12-x86_64-100.10.0.deb" + +TEMP_DIR="/tmp/mongodb_install" +mkdir -p "$TEMP_DIR" +cd "$TEMP_DIR" + +echo "Downloading MongoDB Database Tools..." +wget -q "$MONGODB_TOOLS_URL" -O mongodb-database-tools.deb + +echo "Installing MongoDB Database Tools..." +$SUDO dpkg -i mongodb-database-tools.deb 2>/dev/null || $SUDO apt-get install -f -y + +# Create symlinks to tools directory +ln -sf /usr/bin/mongodump "$MONGODB_DIR/bin/mongodump" +ln -sf /usr/bin/mongorestore "$MONGODB_DIR/bin/mongorestore" + +cd - >/dev/null +rm -rf "$TEMP_DIR" + +echo "MongoDB Database Tools installed successfully" +echo " mongodump: $MONGODB_DIR/bin/mongodump" +echo " mongorestore: $MONGODB_DIR/bin/mongorestore" +``` + +### `backend/tools/download_macos.sh` additions + +```bash +# ========== MongoDB Installation ========== +echo "========================================" +echo "Installing MongoDB Database Tools..." +echo "========================================" + +MONGODB_DIR="$(pwd)/mongodb" +mkdir -p "$MONGODB_DIR/bin" + +# Install via Homebrew +echo "Installing MongoDB Database Tools via Homebrew..." +brew tap mongodb/brew 2>/dev/null || true +brew install mongodb-database-tools 2>/dev/null || { + echo "Warning: Could not install mongodb-database-tools via Homebrew" +} + +# Find Homebrew MongoDB tools path +BREW_MONGODB="" +if [ -f "/opt/homebrew/bin/mongodump" ]; then + BREW_MONGODB="/opt/homebrew/bin" +elif [ -f "/usr/local/bin/mongodump" ]; then + BREW_MONGODB="/usr/local/bin" +fi + +if [ -n "$BREW_MONGODB" ]; then + ln -sf "$BREW_MONGODB/mongodump" "$MONGODB_DIR/bin/mongodump" + ln -sf "$BREW_MONGODB/mongorestore" "$MONGODB_DIR/bin/mongorestore" + echo "MongoDB Database Tools linked from Homebrew" + + mongodump_ver=$("$MONGODB_DIR/bin/mongodump" --version 2>/dev/null | head -1) + echo " Verified: $mongodump_ver" +else + echo "Warning: Could not find MongoDB Database Tools binaries" + echo "Please install manually: brew tap mongodb/brew && brew install mongodb-database-tools" +fi +``` + +### `backend/tools/download_windows.bat` additions + +```batch +:: ========== MongoDB Installation ========== +echo ======================================== +echo Installing MongoDB Database Tools... +echo ======================================== +echo. + +:: MongoDB Database Tools download URL for Windows x64 +set "MONGODB_TOOLS_URL=https://fastdl.mongodb.org/tools/db/mongodb-database-tools-windows-x86_64-100.10.0.zip" + +set "mongodb_install_dir=%MONGODB_DIR%" + +:: Check if already installed +if exist "!mongodb_install_dir!\bin\mongodump.exe" ( + echo MongoDB Database Tools already installed, skipping... +) else ( + set "mongodb_filename=mongodb-database-tools.zip" + + if not exist "!mongodb_filename!" ( + echo Downloading MongoDB Database Tools... + curl -L -o "!mongodb_filename!" -A "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" "!MONGODB_TOOLS_URL!" + if !errorlevel! neq 0 ( + echo ERROR: Download request failed + goto :skip_mongodb + ) + ) else ( + echo MongoDB Database Tools already downloaded + ) + + :: Extract MongoDB Database Tools + echo Extracting MongoDB Database Tools... + mkdir "!mongodb_install_dir!" 2>nul + mkdir "!mongodb_install_dir!\bin" 2>nul + + powershell -Command "Expand-Archive -Path '!mongodb_filename!' -DestinationPath '!mongodb_install_dir!_temp' -Force" + + :: Move files from nested directory to install_dir + for /d %%d in ("!mongodb_install_dir!_temp\mongodb-database-tools-*") do ( + if exist "%%d\bin\mongodump.exe" ( + copy "%%d\bin\mongodump.exe" "!mongodb_install_dir!\bin\" >nul 2>&1 + copy "%%d\bin\mongorestore.exe" "!mongodb_install_dir!\bin\" >nul 2>&1 + ) + ) + + :: Cleanup temp directory + rmdir /s /q "!mongodb_install_dir!_temp" 2>nul + + :: Verify installation + if exist "!mongodb_install_dir!\bin\mongodump.exe" ( + echo MongoDB Database Tools installed successfully + ) else ( + echo Failed to install MongoDB Database Tools - mongodump.exe not found + ) +) + +:skip_mongodb +echo. +``` + +--- + +## Test Implementation + +### `backend/internal/features/tests/mongodb_backup_restore_test.go` + +```go +package tests + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + "postgresus-backend/internal/config" + "postgresus-backend/internal/features/backups/backups" + backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/databases" + mongodbtypes "postgresus-backend/internal/features/databases/databases/mongodb" + "postgresus-backend/internal/features/restores" + restores_enums "postgresus-backend/internal/features/restores/enums" + "postgresus-backend/internal/features/storages" + users_enums "postgresus-backend/internal/features/users/enums" + users_testing "postgresus-backend/internal/features/users/testing" + workspaces_testing "postgresus-backend/internal/features/workspaces/testing" + test_utils "postgresus-backend/internal/util/testing" + "postgresus-backend/internal/util/tools" +) + +type MongodbContainer struct { + Host string + Port int + Username string + Password string + Database string + AuthDatabase string + Version tools.MongodbVersion + Client *mongo.Client +} + +type MongodbTestDataItem struct { + ID string `bson:"_id"` + Name string `bson:"name"` + Value int `bson:"value"` + CreatedAt time.Time `bson:"created_at"` +} + +func Test_BackupAndRestoreMongodb_RestoreIsSuccessful(t *testing.T) { + env := config.GetEnv() + cases := []struct { + name string + version tools.MongodbVersion + port string + }{ + {"MongoDB 4.0", tools.MongodbVersion40, env.TestMongodb40Port}, + {"MongoDB 4.2", tools.MongodbVersion42, env.TestMongodb42Port}, + {"MongoDB 4.4", tools.MongodbVersion44, env.TestMongodb44Port}, + {"MongoDB 5.0", tools.MongodbVersion50, env.TestMongodb50Port}, + {"MongoDB 6.0", tools.MongodbVersion60, env.TestMongodb60Port}, + {"MongoDB 7.0", tools.MongodbVersion70, env.TestMongodb70Port}, + {"MongoDB 8.0", tools.MongodbVersion80, env.TestMongodb80Port}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + testMongodbBackupRestoreForVersion(t, tc.version, tc.port) + }) + } +} + +func Test_BackupAndRestoreMongodbWithEncryption_RestoreIsSuccessful(t *testing.T) { + env := config.GetEnv() + cases := []struct { + name string + version tools.MongodbVersion + port string + }{ + {"MongoDB 4.0", tools.MongodbVersion40, env.TestMongodb40Port}, + {"MongoDB 4.2", tools.MongodbVersion42, env.TestMongodb42Port}, + {"MongoDB 4.4", tools.MongodbVersion44, env.TestMongodb44Port}, + {"MongoDB 5.0", tools.MongodbVersion50, env.TestMongodb50Port}, + {"MongoDB 6.0", tools.MongodbVersion60, env.TestMongodb60Port}, + {"MongoDB 7.0", tools.MongodbVersion70, env.TestMongodb70Port}, + {"MongoDB 8.0", tools.MongodbVersion80, env.TestMongodb80Port}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + testMongodbBackupRestoreWithEncryptionForVersion(t, tc.version, tc.port) + }) + } +} + +func testMongodbBackupRestoreForVersion(t *testing.T, mongodbVersion tools.MongodbVersion, port string) { + container, err := connectToMongodbContainer(mongodbVersion, port) + assert.NoError(t, err) + defer func() { _ = container.Client.Disconnect(context.Background()) }() + + // Create test data + ctx := context.Background() + collection := container.Client.Database(container.Database).Collection("test_data") + + // Drop existing collection + _ = collection.Drop(ctx) + + // Insert test data + testDocs := []interface{}{ + MongodbTestDataItem{ID: "1", Name: "test1", Value: 100, CreatedAt: time.Now()}, + MongodbTestDataItem{ID: "2", Name: "test2", Value: 200, CreatedAt: time.Now()}, + MongodbTestDataItem{ID: "3", Name: "test3", Value: 300, CreatedAt: time.Now()}, + } + _, err = collection.InsertMany(ctx, testDocs) + assert.NoError(t, err) + + router := createMongodbTestRouter() + user := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", user, router) + + storage := storages.CreateTestStorage(workspace.ID) + + database := createMongodbDatabaseViaAPI( + t, router, "Test MongoDB Database", workspace.ID, + container.Host, container.Port, + container.Username, container.Password, + container.Database, container.AuthDatabase, + mongodbVersion, user.Token, + ) + + enableBackupsViaAPI( + t, router, database.ID, storage.ID, + backups_config.BackupEncryptionNone, user.Token, + ) + + createBackupViaAPI(t, router, database.ID, user.Token) + + backup := waitForBackupCompletion(t, router, database.ID, user.Token, 5*time.Minute) + assert.Equal(t, backups.BackupStatusCompleted, backup.Status) + + // Create new database for restore + restoreDBName := "restoreddb_" + uuid.New().String()[:8] + + createMongodbRestoreViaAPI( + t, router, backup.ID, + container.Host, container.Port, + container.Username, container.Password, + restoreDBName, container.AuthDatabase, + mongodbVersion, user.Token, + ) + + restore := waitForRestoreCompletion(t, router, backup.ID, user.Token, 5*time.Minute) + assert.Equal(t, restores_enums.RestoreStatusCompleted, restore.Status) + + // Verify data integrity + restoredCollection := container.Client.Database(restoreDBName).Collection("test_data") + count, err := restoredCollection.CountDocuments(ctx, bson.M{}) + assert.NoError(t, err) + assert.Equal(t, int64(3), count) + + // Cleanup + err = os.Remove(filepath.Join(config.GetEnv().DataFolder, backup.ID.String())) + if err != nil { + t.Logf("Warning: Failed to delete backup file: %v", err) + } + + // Drop restored database + _ = container.Client.Database(restoreDBName).Drop(ctx) + + test_utils.MakeDeleteRequest( + t, router, + "/api/v1/databases/"+database.ID.String(), + "Bearer "+user.Token, + http.StatusNoContent, + ) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) +} + +func testMongodbBackupRestoreWithEncryptionForVersion(t *testing.T, mongodbVersion tools.MongodbVersion, port string) { + // Similar to testMongodbBackupRestoreForVersion but with encryption enabled + // Implementation follows the same pattern as MySQL/MariaDB encrypted tests +} + +func connectToMongodbContainer(version tools.MongodbVersion, port string) (*MongodbContainer, error) { + if port == "" { + return nil, fmt.Errorf("MongoDB %s port not configured", version) + } + + dbName := "testdb" + password := "rootpassword" + username := "root" + authDatabase := "admin" + host := "127.0.0.1" + + portInt, err := strconv.Atoi(port) + if err != nil { + return nil, fmt.Errorf("failed to parse port: %w", err) + } + + uri := fmt.Sprintf("mongodb://%s:%s@%s:%d/?authSource=%s", + username, password, host, portInt, authDatabase) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + clientOptions := options.Client().ApplyURI(uri) + client, err := mongo.Connect(ctx, clientOptions) + if err != nil { + return nil, fmt.Errorf("failed to connect to MongoDB: %w", err) + } + + if err = client.Ping(ctx, nil); err != nil { + return nil, fmt.Errorf("failed to ping MongoDB: %w", err) + } + + return &MongodbContainer{ + Host: host, + Port: portInt, + Username: username, + Password: password, + Database: dbName, + AuthDatabase: authDatabase, + Version: version, + Client: client, + }, nil +} + +func createMongodbDatabaseViaAPI( + t *testing.T, + router *gin.Engine, + name string, + workspaceID uuid.UUID, + host string, + port int, + username string, + password string, + database string, + authDatabase string, + version tools.MongodbVersion, + token string, +) *databases.Database { + request := databases.Database{ + Name: name, + WorkspaceID: &workspaceID, + Type: databases.DatabaseTypeMongodb, + Mongodb: &mongodbtypes.MongodbDatabase{ + Host: host, + Port: port, + Username: username, + Password: password, + Database: database, + AuthDatabase: authDatabase, + Version: version, + }, + } + + w := workspaces_testing.MakeAPIRequest( + router, + "POST", + "/api/v1/databases/create", + "Bearer "+token, + request, + ) + + if w.Code != http.StatusCreated { + t.Fatalf("Failed to create MongoDB database. Status: %d, Body: %s", w.Code, w.Body.String()) + } + + var createdDatabase databases.Database + if err := json.Unmarshal(w.Body.Bytes(), &createdDatabase); err != nil { + t.Fatalf("Failed to unmarshal database response: %v", err) + } + + return &createdDatabase +} + +func createMongodbRestoreViaAPI( + t *testing.T, + router *gin.Engine, + backupID uuid.UUID, + host string, + port int, + username string, + password string, + database string, + authDatabase string, + version tools.MongodbVersion, + token string, +) { + request := restores.RestoreBackupRequest{ + MongodbDatabase: &mongodbtypes.MongodbDatabase{ + Host: host, + Port: port, + Username: username, + Password: password, + Database: database, + AuthDatabase: authDatabase, + Version: version, + }, + } + + test_utils.MakePostRequest( + t, + router, + fmt.Sprintf("/api/v1/restores/%s/restore", backupID.String()), + "Bearer "+token, + request, + http.StatusOK, + ) +} + +func createMongodbTestRouter() *gin.Engine { + // Same pattern as MySQL/MariaDB test router creation + gin.SetMode(gin.TestMode) + router := gin.New() + // ... setup routes + return router +} +``` + +--- + +## Controller Test Updates + +### `backend/internal/features/databases/controller_test.go` + +Add MongoDB test case to `Test_DatabaseSensitiveDataLifecycle_AllTypes`: + +```go +func Test_DatabaseSensitiveDataLifecycle_AllTypes(t *testing.T) { + testCases := []struct { + name string + databaseType DatabaseType + createDatabase func(workspaceID uuid.UUID) *Database + updateDatabase func(workspaceID uuid.UUID, databaseID uuid.UUID) *Database + verifySensitiveData func(t *testing.T, database *Database) + verifyHiddenData func(t *testing.T, database *Database) + }{ + // ... existing PostgreSQL and MariaDB cases ... + + { + name: "MongoDB Database", + databaseType: DatabaseTypeMongodb, + createDatabase: func(workspaceID uuid.UUID) *Database { + return &Database{ + WorkspaceID: &workspaceID, + Name: "Test MongoDB Database", + Type: DatabaseTypeMongodb, + Mongodb: &mongodb.MongodbDatabase{ + Version: tools.MongodbVersion70, + Host: "localhost", + Port: 27017, + Username: "root", + Password: "original-password-secret", + Database: "testdb", + AuthDatabase: "admin", + }, + } + }, + updateDatabase: func(workspaceID uuid.UUID, databaseID uuid.UUID) *Database { + return &Database{ + ID: databaseID, + WorkspaceID: &workspaceID, + Name: "Updated MongoDB Database", + Type: DatabaseTypeMongodb, + Mongodb: &mongodb.MongodbDatabase{ + Version: tools.MongodbVersion80, + Host: "updated-host", + Port: 27018, + Username: "updated_user", + Password: "", + Database: "updated_testdb", + AuthDatabase: "admin", + }, + } + }, + verifySensitiveData: func(t *testing.T, database *Database) { + assert.True(t, strings.HasPrefix(database.Mongodb.Password, "enc:"), + "Password should be encrypted in database") + + encryptor := encryption.GetFieldEncryptor() + decrypted, err := encryptor.Decrypt(database.ID, database.Mongodb.Password) + assert.NoError(t, err) + assert.Equal(t, "original-password-secret", decrypted) + }, + verifyHiddenData: func(t *testing.T, database *Database) { + assert.Equal(t, "", database.Mongodb.Password) + }, + }, + } + + // ... rest of the test function ... +} +``` + +--- + +## Read-Only User Tests + +### `backend/internal/features/databases/databases/mongodb/readonly_user_test.go` + +```go +package mongodb + +import ( + "context" + "fmt" + "log/slog" + "os" + "strconv" + "strings" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + "postgresus-backend/internal/config" + "postgresus-backend/internal/util/tools" +) + +func Test_IsUserReadOnly_AdminUser_ReturnsFalse(t *testing.T) { + env := config.GetEnv() + cases := []struct { + name string + version tools.MongodbVersion + port string + }{ + {"MongoDB 4.0", tools.MongodbVersion40, env.TestMongodb40Port}, + {"MongoDB 4.2", tools.MongodbVersion42, env.TestMongodb42Port}, + {"MongoDB 4.4", tools.MongodbVersion44, env.TestMongodb44Port}, + {"MongoDB 5.0", tools.MongodbVersion50, env.TestMongodb50Port}, + {"MongoDB 6.0", tools.MongodbVersion60, env.TestMongodb60Port}, + {"MongoDB 7.0", tools.MongodbVersion70, env.TestMongodb70Port}, + {"MongoDB 8.0", tools.MongodbVersion80, env.TestMongodb80Port}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + container := connectToMongodbContainer(t, tc.port, tc.version) + defer func() { _ = container.Client.Disconnect(context.Background()) }() + + mongodbModel := createMongodbModel(container) + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + ctx := context.Background() + + isReadOnly, err := mongodbModel.IsUserReadOnly(ctx, logger, nil, uuid.New()) + assert.NoError(t, err) + assert.False(t, isReadOnly, "Root user should not be read-only") + }) + } +} + +func Test_CreateReadOnlyUser_UserCanReadButNotWrite(t *testing.T) { + env := config.GetEnv() + cases := []struct { + name string + version tools.MongodbVersion + port string + }{ + {"MongoDB 4.0", tools.MongodbVersion40, env.TestMongodb40Port}, + {"MongoDB 4.2", tools.MongodbVersion42, env.TestMongodb42Port}, + {"MongoDB 4.4", tools.MongodbVersion44, env.TestMongodb44Port}, + {"MongoDB 5.0", tools.MongodbVersion50, env.TestMongodb50Port}, + {"MongoDB 6.0", tools.MongodbVersion60, env.TestMongodb60Port}, + {"MongoDB 7.0", tools.MongodbVersion70, env.TestMongodb70Port}, + {"MongoDB 8.0", tools.MongodbVersion80, env.TestMongodb80Port}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + container := connectToMongodbContainer(t, tc.port, tc.version) + defer func() { _ = container.Client.Disconnect(context.Background()) }() + + // Create test collection and data + ctx := context.Background() + collection := container.Client.Database(container.Database).Collection("readonly_test") + _ = collection.Drop(ctx) + + _, err := collection.InsertMany(ctx, []interface{}{ + bson.M{"data": "test1"}, + bson.M{"data": "test2"}, + }) + assert.NoError(t, err) + + mongodbModel := createMongodbModel(container) + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + + username, password, err := mongodbModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New()) + assert.NoError(t, err) + assert.NotEmpty(t, username) + assert.NotEmpty(t, password) + assert.True(t, strings.HasPrefix(username, "postgresus-")) + + // Connect as read-only user + readOnlyModel := &MongodbDatabase{ + Version: mongodbModel.Version, + Host: mongodbModel.Host, + Port: mongodbModel.Port, + Username: username, + Password: password, + Database: mongodbModel.Database, + AuthDatabase: mongodbModel.AuthDatabase, + } + + isReadOnly, err := readOnlyModel.IsUserReadOnly(ctx, logger, nil, uuid.New()) + assert.NoError(t, err) + assert.True(t, isReadOnly, "Created user should be read-only") + + // Connect as read-only user to test operations + readOnlyURI := fmt.Sprintf("mongodb://%s:%s@%s:%d/?authSource=%s", + username, password, container.Host, container.Port, container.AuthDatabase) + readOnlyClient, err := mongo.Connect(ctx, options.Client().ApplyURI(readOnlyURI)) + assert.NoError(t, err) + defer func() { _ = readOnlyClient.Disconnect(ctx) }() + + readOnlyCollection := readOnlyClient.Database(container.Database).Collection("readonly_test") + + // Test read - should succeed + count, err := readOnlyCollection.CountDocuments(ctx, bson.M{}) + assert.NoError(t, err) + assert.Equal(t, int64(2), count) + + // Test write operations - should fail + _, err = readOnlyCollection.InsertOne(ctx, bson.M{"data": "should-fail"}) + assert.Error(t, err) + + _, err = readOnlyCollection.UpdateOne(ctx, bson.M{"data": "test1"}, bson.M{"$set": bson.M{"data": "hacked"}}) + assert.Error(t, err) + + _, err = readOnlyCollection.DeleteOne(ctx, bson.M{"data": "test1"}) + assert.Error(t, err) + + // Cleanup - drop user + adminDB := container.Client.Database("admin") + _ = adminDB.RunCommand(ctx, bson.D{{Key: "dropUser", Value: username}}) + }) + } +} + +func Test_ReadOnlyUser_CannotCreateOrDropCollections(t *testing.T) { + env := config.GetEnv() + container := connectToMongodbContainer(t, env.TestMongodb70Port, tools.MongodbVersion70) + defer func() { _ = container.Client.Disconnect(context.Background()) }() + + ctx := context.Background() + mongodbModel := createMongodbModel(container) + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + + username, password, err := mongodbModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New()) + assert.NoError(t, err) + + // Connect as read-only user + readOnlyURI := fmt.Sprintf("mongodb://%s:%s@%s:%d/?authSource=%s", + username, password, container.Host, container.Port, container.AuthDatabase) + readOnlyClient, err := mongo.Connect(ctx, options.Client().ApplyURI(readOnlyURI)) + assert.NoError(t, err) + defer func() { _ = readOnlyClient.Disconnect(ctx) }() + + // Test create collection - should fail + err = readOnlyClient.Database(container.Database).CreateCollection(ctx, "hack_collection") + assert.Error(t, err) + + // Test drop collection - should fail + err = readOnlyClient.Database(container.Database).Collection("readonly_test").Drop(ctx) + assert.Error(t, err) + + // Cleanup + adminDB := container.Client.Database("admin") + _ = adminDB.RunCommand(ctx, bson.D{{Key: "dropUser", Value: username}}) +} + +func Test_CreateReadOnlyUser_DatabaseNameWithSpecialChars_Success(t *testing.T) { + env := config.GetEnv() + container := connectToMongodbContainer(t, env.TestMongodb70Port, tools.MongodbVersion70) + defer func() { _ = container.Client.Disconnect(context.Background()) }() + + ctx := context.Background() + + // Create database with special characters (dash) + specialDbName := "test-db-with-dash" + + // Create a collection in the special database to ensure it exists + specialCollection := container.Client.Database(specialDbName).Collection("test_collection") + _ = specialCollection.Drop(ctx) + _, err := specialCollection.InsertOne(ctx, bson.M{"data": "test"}) + assert.NoError(t, err) + + defer func() { + _ = container.Client.Database(specialDbName).Drop(ctx) + }() + + mongodbModel := &MongodbDatabase{ + Version: tools.MongodbVersion70, + Host: container.Host, + Port: container.Port, + Username: container.Username, + Password: container.Password, + Database: specialDbName, + AuthDatabase: container.AuthDatabase, + } + + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + + username, password, err := mongodbModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New()) + assert.NoError(t, err) + assert.NotEmpty(t, username) + assert.NotEmpty(t, password) + assert.True(t, strings.HasPrefix(username, "postgresus-")) + + // Verify read-only user can read from special database + readOnlyURI := fmt.Sprintf("mongodb://%s:%s@%s:%d/?authSource=%s", + username, password, container.Host, container.Port, container.AuthDatabase) + readOnlyClient, err := mongo.Connect(ctx, options.Client().ApplyURI(readOnlyURI)) + assert.NoError(t, err) + defer func() { _ = readOnlyClient.Disconnect(ctx) }() + + count, err := readOnlyClient.Database(specialDbName).Collection("test_collection").CountDocuments(ctx, bson.M{}) + assert.NoError(t, err) + assert.Equal(t, int64(1), count) + + // Verify write fails + _, err = readOnlyClient.Database(specialDbName).Collection("test_collection").InsertOne(ctx, bson.M{"data": "fail"}) + assert.Error(t, err) + + // Cleanup + adminDB := container.Client.Database("admin") + _ = adminDB.RunCommand(ctx, bson.D{{Key: "dropUser", Value: username}}) +} + +type MongodbContainer struct { + Host string + Port int + Username string + Password string + Database string + AuthDatabase string + Version tools.MongodbVersion + Client *mongo.Client +} + +func connectToMongodbContainer( + t *testing.T, + port string, + version tools.MongodbVersion, +) *MongodbContainer { + if port == "" { + t.Skipf("MongoDB port not configured for version %s", version) + } + + dbName := "testdb" + host := "127.0.0.1" + username := "root" + password := "rootpassword" + authDatabase := "admin" + + portInt, err := strconv.Atoi(port) + assert.NoError(t, err) + + uri := fmt.Sprintf("mongodb://%s:%s@%s:%d/?authSource=%s", + username, password, host, portInt, authDatabase) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + client, err := mongo.Connect(ctx, options.Client().ApplyURI(uri)) + if err != nil { + t.Skipf("Failed to connect to MongoDB %s: %v", version, err) + } + + if err = client.Ping(ctx, nil); err != nil { + t.Skipf("Failed to ping MongoDB %s: %v", version, err) + } + + return &MongodbContainer{ + Host: host, + Port: portInt, + Username: username, + Password: password, + Database: dbName, + AuthDatabase: authDatabase, + Version: version, + Client: client, + } +} + +func createMongodbModel(container *MongodbContainer) *MongodbDatabase { + return &MongodbDatabase{ + Version: container.Version, + Host: container.Host, + Port: container.Port, + Username: container.Username, + Password: container.Password, + Database: container.Database, + AuthDatabase: container.AuthDatabase, + } +} +``` + +--- + +## Implementation Order + +1. **Phase 1: Core Infrastructure** + - [ ] Create `MongodbVersion` enums and `mongodb.go` tools + - [ ] Create `mongodb/model.go` with `MongodbDatabase` struct + - [ ] Add migration for `mongodb_databases` table + - [ ] Update `databases/model.go` to include MongoDB + - [ ] Update `databases/enums.go` with `DatabaseTypeMongodb` + - [ ] Update `databases/repository.go` to preload MongoDB + +2. **Phase 2: Backup Implementation** + - [ ] Create `usecases/mongodb/create_backup_uc.go` + - [ ] Create `usecases/mongodb/di.go` + - [ ] Update `usecases/create_backup_uc.go` to route to MongoDB + - [ ] Update `usecases/di.go` to wire MongoDB use case + +3. **Phase 3: Restore Implementation** + - [ ] Create `restores/usecases/mongodb/restore_backup_uc.go` + - [ ] Create `restores/usecases/mongodb/di.go` + - [ ] Update `restores/usecases/restore_backup_uc.go` to route to MongoDB + - [ ] Update `restores/dto.go` for MongoDB restore DTO + - [ ] Update `restores/service.go` for MongoDB version validation + +4. **Phase 4: Service Layer Updates** + - [ ] Update `databases/service.go` - CopyDatabase, IsUserReadOnly, CreateReadOnlyUser + - [ ] Update `config/config.go` with MongoDB configuration + - [ ] Add MongoDB installation verification + +5. **Phase 5: Healthcheck Updates** + - [ ] Update `healthcheck/attempt/check_database_health_uc.go` - Add MongoDB case to `validateDatabase()` + +6. **Phase 6: Infrastructure** + - [ ] Update Dockerfile with MongoDB Database Tools installation + - [ ] Update docker-compose.yml.example with MongoDB test containers + - [ ] Update download_linux.sh with MongoDB tools + - [ ] Update download_macos.sh with MongoDB tools + - [ ] Update download_windows.bat with MongoDB tools + - [ ] Update CI/CD workflow + +7. **Phase 7: Testing** + - [ ] Create `mongodb_backup_restore_test.go` - Integration tests for backup/restore + - [ ] Create `mongodb/readonly_user_test.go` - Read-only user tests for all MongoDB versions + - [ ] Update `databases/controller_test.go` - Add MongoDB case to `Test_DatabaseSensitiveDataLifecycle_AllTypes` + - [ ] Test all MongoDB versions (4.0, 4.2, 4.4, 5.0, 6.0, 7.0, 8.0) + - [ ] Test encryption with MongoDB backups + - [ ] Test read-only user creation and permissions + +--- + +## Notes and Considerations + +### MongoDB vs MySQL/MariaDB Differences + +| Feature | MySQL/MariaDB | MongoDB | +| -------------------- | --------------------------- | -------------------------------- | +| Backup tool | `mysqldump`/`mariadb-dump` | `mongodump` | +| Restore tool | `mysql`/`mariadb` | `mongorestore` | +| Output format | SQL text + zstd | Archive + gzip (built-in) | +| Password handling | `.my.cnf` temp file | URI-based (in connection string) | +| Default port | 3306 | 27017 | +| Auth database | N/A | Usually "admin" | +| Compression | External (zstd in pipeline) | Built-in (`--gzip` flag) | +| Client compatibility | Version-specific (MySQL) | Single version (backward compat) | + +### Backup Pipeline + +``` +mongodump --archive --gzip → [encryption] → storage +``` + +Note: Unlike MySQL/MariaDB where we add zstd compression externally, MongoDB's `--gzip` flag provides built-in compression. No additional compression layer is needed. + +### Restore Pipeline + +``` +storage → [decryption] → mongorestore --archive --gzip +``` + +### Version Detection + +MongoDB's `buildInfo` command returns version strings like: + +- `7.0.14` +- `8.0.3` +- `4.4.29` + +The detection logic parses major.minor version to map to `MongodbVersion` enum. + +### Client Backward Compatibility + +MongoDB Database Tools are backward compatible: + +- Latest `mongodump` (100.x) can backup MongoDB 4.0-8.0 servers +- Latest `mongorestore` (100.x) can restore to MongoDB 4.0-8.0 servers +- This simplifies installation - only one client version needed + +### Security Considerations + +1. **Password handling**: Passed via URI in connection string (standard MongoDB practice) +2. **TLS**: Support via `tls=true` parameter in URI +3. **Auth database**: Configurable (default: "admin") + +### Cloud Database Compatibility + +- **MongoDB Atlas**: Works with standard mongodump/mongorestore +- **AWS DocumentDB**: Compatible (with some limitations) +- **Azure Cosmos DB for MongoDB**: Compatible +- **Google Cloud MongoDB (via Atlas)**: Works with standard tools + +### ARM64 Support + +All MongoDB versions (4.0-8.0) have full ARM64 support for both server and client tools. + +### Healthcheck Differences by Version + +| MongoDB Version | Shell Command | Healthcheck | +| --------------- | ------------- | ------------------------------------------ | +| 4.0 - 4.4 | `mongo` | `mongo --eval "db.adminCommand('ping')"` | +| 5.0+ | `mongosh` | `mongosh --eval "db.adminCommand('ping')"` | + +The `mongo` shell was replaced by `mongosh` starting from MongoDB 5.0. + +### Go Driver Dependency + +The MongoDB model uses the official Go driver: + +```go +import "go.mongodb.org/mongo-driver/mongo" +``` + +Add to `go.mod`: + +``` +go.mongodb.org/mongo-driver v1.17.1 +``` diff --git a/backend/MYSQL_PLAN.MD b/backend/MYSQL_PLAN.MD new file mode 100644 index 0000000..ccd5c50 --- /dev/null +++ b/backend/MYSQL_PLAN.MD @@ -0,0 +1,1777 @@ +# MySQL Implementation Plan + +## Overview + +This document outlines the implementation plan for adding MySQL backup and restore support to Postgresus. The implementation will follow the existing PostgreSQL architecture patterns. + +## Supported MySQL Versions + +- MySQL 5.7 +- MySQL 8.0 +- MySQL 8.4 + +## Architecture + +### Database Type Enum + +Add new database type to `backend/internal/features/databases/enums.go`: + +```go +const ( + DatabaseTypePostgres DatabaseType = "POSTGRES" + DatabaseTypeMysql DatabaseType = "MYSQL" +) +``` + +--- + +## File Structure + +### New Files to Create + +``` +backend/internal/features/databases/databases/mysql/ +├── model.go # MysqlDatabase struct and methods + +backend/internal/features/backups/backups/usecases/mysql/ +├── create_backup_uc.go # MySQL backup use case +├── di.go # Dependency injection +├── dto.go # DTOs for backup metadata +├── interfaces.go # Interfaces + +backend/internal/features/restores/usecases/mysql/ +├── restore_backup_uc.go # MySQL restore use case +├── di.go # Dependency injection + +backend/internal/util/tools/ +├── mysql.go # MySQL executable helpers and version enums + +backend/internal/features/tests/ +├── mysql_backup_restore_test.go # Integration tests + +backend/migrations/ +├── XXXXXX_add_mysql_databases_table.up.sql +├── XXXXXX_add_mysql_databases_table.down.sql +``` + +### Files to Modify + +``` +backend/internal/features/databases/ +├── enums.go # Add DatabaseTypeMysql +├── model.go # Add Mysql field, update methods +├── service.go # Handle MySQL in CopyDatabase +├── repository.go # Preload MySQL relation + +backend/internal/features/backups/backups/usecases/ +├── create_backup_uc.go # Add MySQL case +├── di.go # Wire MySQL use case + +backend/internal/features/restores/usecases/ +├── restore_backup_uc.go # Add MySQL case +├── di.go # Wire MySQL use case + +backend/internal/config/ +├── config.go # Add MySQL test ports, verify MySQL installation + +backend/internal/util/tools/ +├── enums.go # Add MySQL version enums (or create separate file) +``` + +--- + +## Model Definition + +### `backend/internal/features/databases/databases/mysql/model.go` + +```go +package mysql + +import ( + "context" + "database/sql" + "errors" + "fmt" + "log/slog" + "regexp" + "time" + + "github.com/go-sql-driver/mysql" + "github.com/google/uuid" + + "postgresus-backend/internal/util/encryption" + "postgresus-backend/internal/util/tools" +) + +type MysqlDatabase struct { + ID uuid.UUID `json:"id" gorm:"primaryKey;type:uuid;default:gen_random_uuid()"` + DatabaseID *uuid.UUID `json:"databaseId" gorm:"type:uuid;column:database_id"` + + Version tools.MysqlVersion `json:"version" gorm:"type:text;not null"` + + // Connection data + Host string `json:"host" gorm:"type:text;not null"` + Port int `json:"port" gorm:"type:int;not null"` + Username string `json:"username" gorm:"type:text;not null"` + Password string `json:"password" gorm:"type:text;not null"` + Database *string `json:"database" gorm:"type:text"` // Target database name (single DB per config) + UseTLS bool `json:"useTls" gorm:"type:boolean;default:false"` +} + +func (m *MysqlDatabase) TableName() string { + return "mysql_databases" +} + +func (m *MysqlDatabase) Validate() error { + if m.Host == "" { + return errors.New("host is required") + } + if m.Port == 0 { + return errors.New("port is required") + } + if m.Username == "" { + return errors.New("username is required") + } + if m.Password == "" { + return errors.New("password is required") + } + return nil +} + +func (m *MysqlDatabase) TestConnection( + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) error { + // Implementation: connect via go-sql-driver/mysql + // Decrypt password, build DSN, test connection + // Auto-detect version via SELECT VERSION() +} + +func (m *MysqlDatabase) HideSensitiveData() { + if m == nil { + return + } + m.Password = "" +} + +func (m *MysqlDatabase) Update(incoming *MysqlDatabase) { + m.Version = incoming.Version + m.Host = incoming.Host + m.Port = incoming.Port + m.Username = incoming.Username + m.Database = incoming.Database + m.UseTLS = incoming.UseTLS + + if incoming.Password != "" { + m.Password = incoming.Password + } +} + +func (m *MysqlDatabase) EncryptSensitiveFields( + databaseID uuid.UUID, + encryptor encryption.FieldEncryptor, +) error { + if m.Password != "" { + encrypted, err := encryptor.Encrypt(databaseID, m.Password) + if err != nil { + return err + } + m.Password = encrypted + } + return nil +} + +func (m *MysqlDatabase) PopulateVersionIfEmpty( + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) error { + // Connect and run SELECT VERSION() + // Parse version string like "8.0.35" or "5.7.44-log" + // Map to MysqlVersion enum +} + +func (m *MysqlDatabase) IsUserReadOnly( + ctx context.Context, + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) (bool, error) { + // Check MySQL privileges: + // - SHOW GRANTS FOR CURRENT_USER() + // - Look for SELECT only (no INSERT, UPDATE, DELETE, CREATE, DROP, ALTER, etc.) +} + +func (m *MysqlDatabase) CreateReadOnlyUser( + ctx context.Context, + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) (string, string, error) { + // CREATE USER 'postgresus-xxxxx'@'%' IDENTIFIED BY 'password'; + // GRANT SELECT ON database.* TO 'postgresus-xxxxx'@'%'; + // FLUSH PRIVILEGES; +} + +// Helper: build MySQL DSN +func (m *MysqlDatabase) buildDSN(password string, database string) string { + cfg := mysql.Config{ + User: m.Username, + Passwd: password, + Net: "tcp", + Addr: fmt.Sprintf("%s:%d", m.Host, m.Port), + DBName: database, + AllowNativePasswords: true, + ParseTime: true, + Timeout: 15 * time.Second, + } + + if m.UseTLS { + cfg.TLSConfig = "true" // or custom TLS config name + } + + return cfg.FormatDSN() +} + +// detectMysqlVersion parses VERSION() output +func detectMysqlVersion(versionStr string) (tools.MysqlVersion, error) { + // Parse "8.0.35", "5.7.44-log", "8.4.0" etc. + re := regexp.MustCompile(`^(\d+)\.(\d+)`) + matches := re.FindStringSubmatch(versionStr) + if len(matches) < 3 { + return "", fmt.Errorf("could not parse version: %s", versionStr) + } + + major := matches[1] + minor := matches[2] + + switch { + case major == "5" && minor == "7": + return tools.MysqlVersion57, nil + case major == "8" && minor == "0": + return tools.MysqlVersion80, nil + case major == "8" && minor == "4": + return tools.MysqlVersion84, nil + default: + return "", fmt.Errorf("unsupported MySQL version: %s.%s", major, minor) + } +} +``` + +--- + +## MySQL Version Enums + +### `backend/internal/util/tools/mysql.go` + +```go +package tools + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + + env_utils "postgresus-backend/internal/util/env" +) + +type MysqlVersion string + +const ( + MysqlVersion57 MysqlVersion = "5.7" + MysqlVersion80 MysqlVersion = "8.0" + MysqlVersion84 MysqlVersion = "8.4" +) + +type MysqlExecutable string + +const ( + MysqlExecutableMysqldump MysqlExecutable = "mysqldump" + MysqlExecutableMysql MysqlExecutable = "mysql" +) + +func GetMysqlExecutable( + version MysqlVersion, + executable MysqlExecutable, + envMode env_utils.EnvMode, + mysqlInstallDir string, +) string { + basePath := getMysqlBasePath(version, envMode, mysqlInstallDir) + executableName := string(executable) + + if runtime.GOOS == "windows" { + executableName += ".exe" + } + + return filepath.Join(basePath, executableName) +} + +func getMysqlBasePath( + version MysqlVersion, + envMode env_utils.EnvMode, + mysqlInstallDir string, +) string { + if envMode == env_utils.EnvModeDevelopment { + return filepath.Join( + mysqlInstallDir, + fmt.Sprintf("mysql-%s", string(version)), + "bin", + ) + } + // Production: /usr/bin or /usr/local/mysql-X.Y/bin + return fmt.Sprintf("/usr/local/mysql-%s/bin", string(version)) +} + +func IsMysqlBackupVersionHigherThanRestoreVersion( + backupVersion, restoreVersion MysqlVersion, +) bool { + // Compare versions: 5.7 < 8.0 < 8.4 + versionOrder := map[MysqlVersion]int{ + MysqlVersion57: 1, + MysqlVersion80: 2, + MysqlVersion84: 3, + } + return versionOrder[backupVersion] > versionOrder[restoreVersion] +} + +// EscapeMysqlPassword escapes special characters for MySQL command line +// Used when creating .my.cnf temporary file +func EscapeMysqlPassword(password string) string { + // In .my.cnf, passwords with special chars should be quoted + // Escape backslash and quote characters + password = strings.ReplaceAll(password, "\\", "\\\\") + password = strings.ReplaceAll(password, "\"", "\\\"") + return password +} +``` + +--- + +## Backup Implementation + +### `backend/internal/features/backups/backups/usecases/mysql/create_backup_uc.go` + +Key implementation details: + +```go +package usecases_mysql + +import ( + "context" + "encoding/base64" + "fmt" + "io" + "log/slog" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/google/uuid" + "github.com/klauspost/compress/zstd" + + "postgresus-backend/internal/config" + backup_encryption "postgresus-backend/internal/features/backups/backups/encryption" + backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/databases" + mysqltypes "postgresus-backend/internal/features/databases/databases/mysql" + encryption_secrets "postgresus-backend/internal/features/encryption/secrets" + "postgresus-backend/internal/features/storages" + "postgresus-backend/internal/util/encryption" + "postgresus-backend/internal/util/tools" +) + +const ( + backupTimeout = 23 * time.Hour + shutdownCheckInterval = 1 * time.Second + copyBufferSize = 8 * 1024 * 1024 + progressReportMB = 1.0 + mysqlConnectTimeout = 30 + zstdStorageCompressionLevel = 3 // Medium level (1-19, 3 is default/balanced) +) + +type CreateMysqlBackupUsecase struct { + logger *slog.Logger + secretKeyService *encryption_secrets.SecretKeyService + fieldEncryptor encryption.FieldEncryptor +} + +type BackupMetadata struct { + Encryption backups_config.BackupEncryption + EncryptionSalt *string + EncryptionIV *string +} + +func (uc *CreateMysqlBackupUsecase) Execute( + ctx context.Context, + backupID uuid.UUID, + backupConfig *backups_config.BackupConfig, + db *databases.Database, + storage *storages.Storage, + backupProgressListener func(completedMBs float64), +) (*BackupMetadata, error) { + uc.logger.Info( + "Creating MySQL backup via mysqldump", + "databaseId", db.ID, + "storageId", storage.ID, + ) + + if !backupConfig.IsBackupsEnabled { + return nil, fmt.Errorf("backups are not enabled for this database: \"%s\"", db.Name) + } + + my := db.Mysql + if my == nil { + return nil, fmt.Errorf("mysql database configuration is required") + } + + // Decrypt password + decryptedPassword, err := uc.fieldEncryptor.Decrypt(db.ID, my.Password) + if err != nil { + return nil, fmt.Errorf("failed to decrypt database password: %w", err) + } + + args := uc.buildMysqldumpArgs(my) + + return uc.streamToStorage( + ctx, + backupID, + backupConfig, + tools.GetMysqlExecutable( + my.Version, + tools.MysqlExecutableMysqldump, + config.GetEnv().EnvMode, + config.GetEnv().MysqlInstallDir, + ), + args, + decryptedPassword, + storage, + db, + backupProgressListener, + my, + ) +} + +func (uc *CreateMysqlBackupUsecase) buildMysqldumpArgs(my *mysqltypes.MysqlDatabase) []string { + args := []string{ + "--host=" + my.Host, + "--port=" + strconv.Itoa(my.Port), + "--user=" + my.Username, + "--single-transaction", // Consistent snapshot for InnoDB + "--routines", // Include stored procedures and functions + "--triggers", // Include triggers + "--events", // Include scheduled events + "--set-gtid-purged=OFF", // Avoid GTID issues on restore + "--quick", // Don't buffer result set in memory + "--verbose", + } + + // Network compression: use zstd for MySQL 8.0.18+, zlib for older versions + // This compresses data during transfer between MySQL server and mysqldump client + args = append(args, uc.getNetworkCompressionArgs(my.Version)...) + + // MySQL 8.0+ client connecting to 5.7 server needs this flag + if my.Version == tools.MysqlVersion57 { + args = append(args, "--column-statistics=0") + } + + // SSL/TLS + if my.UseTLS { + args = append(args, "--ssl-mode=REQUIRED") + } + + // Single database backup + if my.Database != nil && *my.Database != "" { + args = append(args, *my.Database) + } + + return args +} + +// getNetworkCompressionArgs returns compression arguments based on MySQL version +// MySQL 8.0.18+ supports zstd compression algorithm (faster, better ratio) +// MySQL 5.7 and early 8.0 only support zlib via --compress flag +func (uc *CreateMysqlBackupUsecase) getNetworkCompressionArgs(version tools.MysqlVersion) []string { + const zstdCompressionLevel = 3 // Medium level (1-19, default 3) + + switch version { + case tools.MysqlVersion80, tools.MysqlVersion84: + // MySQL 8.0.18+ supports zstd with compression level + return []string{ + "--compression-algorithms=zstd", + fmt.Sprintf("--zstd-compression-level=%d", zstdCompressionLevel), + } + case tools.MysqlVersion57: + // MySQL 5.7 only supports zlib via --compress (no level control) + return []string{"--compress"} + default: + // Fallback to basic compression + return []string{"--compress"} + } +} + +func (uc *CreateMysqlBackupUsecase) streamToStorage( + parentCtx context.Context, + backupID uuid.UUID, + backupConfig *backups_config.BackupConfig, + mysqlBin string, + args []string, + password string, + storage *storages.Storage, + db *databases.Database, + backupProgressListener func(completedMBs float64), + myConfig *mysqltypes.MysqlDatabase, +) (*BackupMetadata, error) { + uc.logger.Info("Streaming MySQL backup to storage", "mysqlBin", mysqlBin) + + ctx, cancel := uc.createBackupContext(parentCtx) + defer cancel() + + // Create temporary .my.cnf file for password authentication + // This avoids password appearing in process list + myCnfFile, err := uc.createTempMyCnfFile(myConfig, password) + if err != nil { + return nil, fmt.Errorf("failed to create .my.cnf: %w", err) + } + defer os.RemoveAll(filepath.Dir(myCnfFile)) + + // Add defaults-file as FIRST argument (required by mysqldump) + fullArgs := append([]string{"--defaults-file=" + myCnfFile}, args...) + + cmd := exec.CommandContext(ctx, mysqlBin, fullArgs...) + uc.logger.Info("Executing MySQL backup command", "command", cmd.String()) + + // Setup environment + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, + "MYSQL_PWD=", // Clear any existing password env var + "LC_ALL=C.UTF-8", + "LANG=C.UTF-8", + ) + + pgStdout, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("stdout pipe: %w", err) + } + + pgStderr, err := cmd.StderrPipe() + if err != nil { + return nil, fmt.Errorf("stderr pipe: %w", err) + } + + // Capture stderr + stderrCh := make(chan []byte, 1) + go func() { + stderrOutput, _ := io.ReadAll(pgStderr) + stderrCh <- stderrOutput + }() + + // Setup storage writer with optional encryption + storageReader, storageWriter := io.Pipe() + + finalWriter, encryptionWriter, backupMetadata, err := uc.setupBackupEncryption( + backupID, + backupConfig, + storageWriter, + ) + if err != nil { + return nil, err + } + + // Wrap with zstd compression (mysqldump outputs plain SQL) + // zstd level 3 (medium) - 10x faster than gzip with similar ratio + zstdWriter, err := zstd.NewWriter(finalWriter, + zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(zstdStorageCompressionLevel))) + if err != nil { + return nil, fmt.Errorf("failed to create zstd writer: %w", err) + } + countingWriter := &CountingWriter{writer: zstdWriter} + + // Start storage save goroutine + saveErrCh := make(chan error, 1) + go func() { + saveErr := storage.SaveFile(ctx, uc.fieldEncryptor, uc.logger, backupID, storageReader) + saveErrCh <- saveErr + }() + + // Start mysqldump + if err = cmd.Start(); err != nil { + return nil, fmt.Errorf("start %s: %w", filepath.Base(mysqlBin), err) + } + + // Copy with shutdown check (same pattern as PostgreSQL) + copyResultCh := make(chan error, 1) + bytesWrittenCh := make(chan int64, 1) + go func() { + bytesWritten, err := uc.copyWithShutdownCheck( + ctx, + countingWriter, + pgStdout, + backupProgressListener, + ) + bytesWrittenCh <- bytesWritten + copyResultCh <- err + }() + + copyErr := <-copyResultCh + bytesWritten := <-bytesWrittenCh + waitErr := cmd.Wait() + + // Check for cancellation + select { + case <-ctx.Done(): + uc.cleanupOnCancellation(zstdWriter, encryptionWriter, storageWriter, saveErrCh) + return nil, uc.checkCancellationReason() + default: + } + + // Close writers in order: zstd -> encryption -> pipe + if err := zstdWriter.Close(); err != nil { + uc.logger.Error("Failed to close zstd writer", "error", err) + } + if err := uc.closeWriters(encryptionWriter, storageWriter); err != nil { + <-saveErrCh + return nil, err + } + + saveErr := <-saveErrCh + stderrOutput := <-stderrCh + + // Report final size + if waitErr == nil && copyErr == nil && saveErr == nil && backupProgressListener != nil { + sizeMB := float64(bytesWritten) / (1024 * 1024) + backupProgressListener(sizeMB) + } + + // Handle errors + switch { + case waitErr != nil: + return nil, uc.buildMysqldumpErrorMessage(waitErr, stderrOutput, mysqlBin) + case copyErr != nil: + return nil, fmt.Errorf("copy to storage: %w", copyErr) + case saveErr != nil: + return nil, fmt.Errorf("save to storage: %w", saveErr) + } + + return &backupMetadata, nil +} + +// createTempMyCnfFile creates a temporary MySQL config file with credentials +// This is the secure way to pass password to mysqldump +func (uc *CreateMysqlBackupUsecase) createTempMyCnfFile( + myConfig *mysqltypes.MysqlDatabase, + password string, +) (string, error) { + tempDir, err := os.MkdirTemp("", "mycnf") + if err != nil { + return "", fmt.Errorf("failed to create temp directory: %w", err) + } + + myCnfFile := filepath.Join(tempDir, ".my.cnf") + + content := fmt.Sprintf(`[client] +user=%s +password="%s" +host=%s +port=%d +`, myConfig.Username, tools.EscapeMysqlPassword(password), myConfig.Host, myConfig.Port) + + if myConfig.UseTLS { + content += "ssl-mode=REQUIRED\n" + } + + err = os.WriteFile(myCnfFile, []byte(content), 0600) + if err != nil { + return "", fmt.Errorf("failed to write .my.cnf: %w", err) + } + + return myCnfFile, nil +} + +// copyWithShutdownCheck - same pattern as PostgreSQL implementation +// Supports cancellation and progress reporting +func (uc *CreateMysqlBackupUsecase) copyWithShutdownCheck( + ctx context.Context, + dst io.Writer, + src io.Reader, + backupProgressListener func(completedMBs float64), +) (int64, error) { + // Same implementation as PostgreSQL + // Uses copyBufferSize, checks ctx.Done() and config.IsShouldShutdown() +} + +// setupBackupEncryption - same pattern as PostgreSQL +func (uc *CreateMysqlBackupUsecase) setupBackupEncryption( + backupID uuid.UUID, + backupConfig *backups_config.BackupConfig, + storageWriter io.WriteCloser, +) (io.Writer, *backup_encryption.EncryptionWriter, BackupMetadata, error) { + // Same implementation as PostgreSQL + // AES-256-GCM encryption with generated salt and nonce +} +``` + +### Key Differences from PostgreSQL: + +1. **Output Format**: mysqldump outputs plain SQL (not custom format like pg_dump -Fc) +2. **Compression**: We add gzip compression in the pipeline since mysqldump doesn't have built-in custom format +3. **Password File**: Uses `.my.cnf` instead of `.pgpass` +4. **Arguments**: `--defaults-file` must be the FIRST argument +5. **Parallelism**: mysqldump doesn't support parallel dump (mysqlpump does, but has issues) + +### Backup Pipeline: + +``` +mysqldump stdout -> gzip -> [encryption] -> storage +``` + +--- + +## Restore Implementation + +### `backend/internal/features/restores/usecases/mysql/restore_backup_uc.go` + +```go +package usecases_mysql + +import ( + "context" + "fmt" + "io" + "log/slog" + "os" + "os/exec" + "path/filepath" + "strconv" + "time" + + "github.com/google/uuid" + "github.com/klauspost/compress/zstd" + + "postgresus-backend/internal/config" + "postgresus-backend/internal/features/backups/backups" + backup_encryption "postgresus-backend/internal/features/backups/backups/encryption" + backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/databases" + mysqltypes "postgresus-backend/internal/features/databases/databases/mysql" + encryption_secrets "postgresus-backend/internal/features/encryption/secrets" + "postgresus-backend/internal/features/restores/models" + "postgresus-backend/internal/features/storages" + "postgresus-backend/internal/util/encryption" + "postgresus-backend/internal/util/tools" +) + +type RestoreMysqlBackupUsecase struct { + logger *slog.Logger + fieldEncryptor encryption.FieldEncryptor + secretKeyService *encryption_secrets.SecretKeyService +} + +func (uc *RestoreMysqlBackupUsecase) Execute( + originalDB *databases.Database, + restoringToDB *databases.Database, + backupConfig *backups_config.BackupConfig, + restore models.Restore, + backup *backups.Backup, + storage *storages.Storage, +) error { + if originalDB.Type != databases.DatabaseTypeMysql { + return fmt.Errorf("database type not supported") + } + + uc.logger.Info( + "Restoring MySQL backup via mysql client", + "restoreId", restore.ID, + "backupId", backup.ID, + ) + + my := restoringToDB.Mysql + if my == nil { + return fmt.Errorf("mysql configuration is required for restore") + } + + // Build mysql client args + args := []string{ + "--host=" + my.Host, + "--port=" + strconv.Itoa(my.Port), + "--user=" + my.Username, + "--verbose", + } + + if my.UseTLS { + args = append(args, "--ssl-mode=REQUIRED") + } + + // Optionally specify target database + if my.Database != nil && *my.Database != "" { + args = append(args, *my.Database) + } + + return uc.restoreFromStorage( + originalDB, + tools.GetMysqlExecutable( + my.Version, + tools.MysqlExecutableMysql, + config.GetEnv().EnvMode, + config.GetEnv().MysqlInstallDir, + ), + args, + my.Password, + backup, + storage, + my, + ) +} + +func (uc *RestoreMysqlBackupUsecase) restoreFromStorage( + database *databases.Database, + mysqlBin string, + args []string, + password string, + backup *backups.Backup, + storage *storages.Storage, + myConfig *mysqltypes.MysqlDatabase, +) error { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute) + defer cancel() + + // Monitor for shutdown + go func() { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if config.IsShouldShutdown() { + cancel() + return + } + } + } + }() + + // Create temp .my.cnf + myCnfFile, err := uc.createTempMyCnfFile(myConfig, password) + if err != nil { + return fmt.Errorf("failed to create .my.cnf: %w", err) + } + defer os.RemoveAll(filepath.Dir(myCnfFile)) + + // Download backup to temp file + tempBackupFile, cleanupFunc, err := uc.downloadBackupToTempFile(ctx, backup, storage) + if err != nil { + return fmt.Errorf("failed to download backup: %w", err) + } + defer cleanupFunc() + + // Decompress and pipe to mysql + return uc.executeMysqlRestore(ctx, database, mysqlBin, args, myCnfFile, tempBackupFile, backup) +} + +func (uc *RestoreMysqlBackupUsecase) executeMysqlRestore( + ctx context.Context, + database *databases.Database, + mysqlBin string, + args []string, + myCnfFile string, + backupFile string, + backup *backups.Backup, +) error { + fullArgs := append([]string{"--defaults-file=" + myCnfFile}, args...) + + cmd := exec.CommandContext(ctx, mysqlBin, fullArgs...) + uc.logger.Info("Executing MySQL restore command", "command", cmd.String()) + + // Setup stdin pipeline: file -> [decryption] -> gunzip -> mysql stdin + backupFileHandle, err := os.Open(backupFile) + if err != nil { + return fmt.Errorf("failed to open backup file: %w", err) + } + defer backupFileHandle.Close() + + var inputReader io.Reader = backupFileHandle + + // Decrypt if needed + if backup.Encryption == backups_config.BackupEncryptionEncrypted { + // Setup decryption reader (same as PostgreSQL) + decryptReader, err := uc.setupDecryption(backupFileHandle, backup) + if err != nil { + return fmt.Errorf("failed to setup decryption: %w", err) + } + inputReader = decryptReader + } + + // Decompress (zstd) + zstdReader, err := zstd.NewReader(inputReader) + if err != nil { + return fmt.Errorf("failed to create zstd reader: %w", err) + } + defer zstdReader.Close() + + cmd.Stdin = zstdReader + + // Setup environment + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "MYSQL_PWD=", "LC_ALL=C.UTF-8") + + // Get stderr for error messages + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return fmt.Errorf("stderr pipe: %w", err) + } + + stderrCh := make(chan []byte, 1) + go func() { + output, _ := io.ReadAll(stderrPipe) + stderrCh <- output + }() + + if err = cmd.Start(); err != nil { + return fmt.Errorf("start mysql: %w", err) + } + + waitErr := cmd.Wait() + stderrOutput := <-stderrCh + + if config.IsShouldShutdown() { + return fmt.Errorf("restore cancelled due to shutdown") + } + + if waitErr != nil { + return uc.handleMysqlRestoreError(waitErr, stderrOutput, mysqlBin) + } + + return nil +} + +func (uc *RestoreMysqlBackupUsecase) downloadBackupToTempFile( + ctx context.Context, + backup *backups.Backup, + storage *storages.Storage, +) (string, func(), error) { + // Same implementation as PostgreSQL + // Download from storage to temp file +} + +func (uc *RestoreMysqlBackupUsecase) createTempMyCnfFile( + myConfig *mysqltypes.MysqlDatabase, + password string, +) (string, error) { + // Same as backup implementation +} +``` + +### Restore Pipeline: + +``` +storage -> [decryption] -> gunzip -> mysql client stdin +``` + +--- + +## Migration Script + +### `backend/migrations/XXXXXX_add_mysql_databases_table.up.sql` + +```sql +CREATE TABLE mysql_databases ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + database_id UUID REFERENCES databases(id) ON DELETE CASCADE, + version TEXT NOT NULL, + host TEXT NOT NULL, + port INT NOT NULL, + username TEXT NOT NULL, + password TEXT NOT NULL, + database TEXT, + use_tls BOOLEAN NOT NULL DEFAULT FALSE +); + +CREATE INDEX idx_mysql_databases_database_id ON mysql_databases(database_id); +``` + +### `backend/migrations/XXXXXX_add_mysql_databases_table.down.sql` + +```sql +DROP INDEX IF EXISTS idx_mysql_databases_database_id; +DROP TABLE IF EXISTS mysql_databases; +``` + +--- + +## Model Updates + +### `backend/internal/features/databases/model.go` changes + +```go +type Database struct { + // ... existing fields ... + + Postgresql *postgresql.PostgresqlDatabase `json:"postgresql,omitempty" gorm:"foreignKey:DatabaseID"` + Mysql *mysql.MysqlDatabase `json:"mysql,omitempty" gorm:"foreignKey:DatabaseID"` + + // ... rest of fields ... +} + +func (d *Database) Validate() error { + if d.Name == "" { + return errors.New("name is required") + } + + switch d.Type { + case DatabaseTypePostgres: + if d.Postgresql == nil { + return errors.New("postgresql database is required") + } + return d.Postgresql.Validate() + case DatabaseTypeMysql: + if d.Mysql == nil { + return errors.New("mysql database is required") + } + return d.Mysql.Validate() + default: + return errors.New("invalid database type: " + string(d.Type)) + } +} + +func (d *Database) TestConnection( + logger *slog.Logger, + encryptor encryption.FieldEncryptor, +) error { + return d.getSpecificDatabase().TestConnection(logger, encryptor, d.ID) +} + +func (d *Database) EncryptSensitiveFields(encryptor encryption.FieldEncryptor) error { + if d.Postgresql != nil { + return d.Postgresql.EncryptSensitiveFields(d.ID, encryptor) + } + if d.Mysql != nil { + return d.Mysql.EncryptSensitiveFields(d.ID, encryptor) + } + return nil +} + +func (d *Database) PopulateVersionIfEmpty( + logger *slog.Logger, + encryptor encryption.FieldEncryptor, +) error { + if d.Postgresql != nil { + return d.Postgresql.PopulateVersionIfEmpty(logger, encryptor, d.ID) + } + if d.Mysql != nil { + return d.Mysql.PopulateVersionIfEmpty(logger, encryptor, d.ID) + } + return nil +} + +func (d *Database) Update(incoming *Database) { + d.Name = incoming.Name + d.Type = incoming.Type + d.Notifiers = incoming.Notifiers + + switch d.Type { + case DatabaseTypePostgres: + if d.Postgresql != nil && incoming.Postgresql != nil { + d.Postgresql.Update(incoming.Postgresql) + } + case DatabaseTypeMysql: + if d.Mysql != nil && incoming.Mysql != nil { + d.Mysql.Update(incoming.Mysql) + } + } +} + +func (d *Database) getSpecificDatabase() DatabaseConnector { + switch d.Type { + case DatabaseTypePostgres: + return d.Postgresql + case DatabaseTypeMysql: + return d.Mysql + } + panic("invalid database type: " + string(d.Type)) +} +``` + +--- + +## Service Updates + +### `backend/internal/features/databases/service.go` - CopyDatabase + +```go +func (s *DatabaseService) CopyDatabase( + user *users_models.User, + databaseID uuid.UUID, +) (*Database, error) { + existingDatabase, err := s.dbRepository.FindByID(databaseID) + if err != nil { + return nil, err + } + + // ... permission checks ... + + newDatabase := &Database{ + ID: uuid.Nil, + WorkspaceID: existingDatabase.WorkspaceID, + Name: existingDatabase.Name + " (Copy)", + Type: existingDatabase.Type, + Notifiers: existingDatabase.Notifiers, + LastBackupTime: nil, + LastBackupErrorMessage: nil, + HealthStatus: existingDatabase.HealthStatus, + } + + switch existingDatabase.Type { + case DatabaseTypePostgres: + if existingDatabase.Postgresql != nil { + newDatabase.Postgresql = &postgresql.PostgresqlDatabase{ + // ... copy fields ... + } + } + case DatabaseTypeMysql: + if existingDatabase.Mysql != nil { + newDatabase.Mysql = &mysql.MysqlDatabase{ + ID: uuid.Nil, + DatabaseID: nil, + Version: existingDatabase.Mysql.Version, + Host: existingDatabase.Mysql.Host, + Port: existingDatabase.Mysql.Port, + Username: existingDatabase.Mysql.Username, + Password: existingDatabase.Mysql.Password, + Database: existingDatabase.Mysql.Database, + UseTLS: existingDatabase.Mysql.UseTLS, + } + } + } + + // ... rest of method ... +} +``` + +--- + +## Config Updates + +### `backend/internal/config/config.go` additions + +```go +type EnvVariables struct { + // ... existing fields ... + + MysqlInstallDir string `env:"MYSQL_INSTALL_DIR"` + + // Testing MySQL + TestMysql57Port string `env:"TEST_MYSQL_57_PORT"` + TestMysql80Port string `env:"TEST_MYSQL_80_PORT"` + TestMysql84Port string `env:"TEST_MYSQL_84_PORT"` +} + +// In loadEnvVariables(): +env.MysqlInstallDir = filepath.Join(backendRoot, "tools", "mysql") +tools.VerifyMysqlInstallation(log, env.EnvMode, env.MysqlInstallDir) + +if env.IsTesting { + // ... existing checks ... + + if env.TestMysql57Port == "" { + log.Error("TEST_MYSQL_57_PORT is empty") + os.Exit(1) + } + if env.TestMysql80Port == "" { + log.Error("TEST_MYSQL_80_PORT is empty") + os.Exit(1) + } + if env.TestMysql84Port == "" { + log.Error("TEST_MYSQL_84_PORT is empty") + os.Exit(1) + } +} +``` + +--- + +## Test Implementation + +### `backend/internal/features/tests/mysql_backup_restore_test.go` + +```go +package tests + +import ( + "database/sql" + "fmt" + "net/http" + "os" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/gin-gonic/gin" + _ "github.com/go-sql-driver/mysql" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + + "postgresus-backend/internal/config" + "postgresus-backend/internal/features/backups/backups" + backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/databases" + mysqltypes "postgresus-backend/internal/features/databases/databases/mysql" + "postgresus-backend/internal/features/restores" + restores_enums "postgresus-backend/internal/features/restores/enums" + restores_models "postgresus-backend/internal/features/restores/models" + "postgresus-backend/internal/features/storages" + users_enums "postgresus-backend/internal/features/users/enums" + users_testing "postgresus-backend/internal/features/users/testing" + workspaces_controllers "postgresus-backend/internal/features/workspaces/controllers" + workspaces_testing "postgresus-backend/internal/features/workspaces/testing" + test_utils "postgresus-backend/internal/util/testing" +) + +const createMysqlTestDataQuery = ` +DROP TABLE IF EXISTS test_data; + +CREATE TABLE test_data ( + id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(255) NOT NULL, + value INT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +INSERT INTO test_data (name, value) VALUES + ('test1', 100), + ('test2', 200), + ('test3', 300); +` + +type MysqlContainer struct { + Host string + Port int + Username string + Password string + Database string + Version string + DB *sql.DB +} + +func Test_BackupAndRestoreMysql_RestoreIsSuccessful(t *testing.T) { + env := config.GetEnv() + cases := []struct { + name string + version string + port string + }{ + {"MySQL 5.7", "5.7", env.TestMysql57Port}, + {"MySQL 8.0", "8.0", env.TestMysql80Port}, + {"MySQL 8.4", "8.4", env.TestMysql84Port}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + testMysqlBackupRestoreForVersion(t, tc.version, tc.port) + }) + } +} + +func Test_BackupAndRestoreMysqlWithEncryption_RestoreIsSuccessful(t *testing.T) { + env := config.GetEnv() + cases := []struct { + name string + version string + port string + }{ + {"MySQL 5.7", "5.7", env.TestMysql57Port}, + {"MySQL 8.0", "8.0", env.TestMysql80Port}, + {"MySQL 8.4", "8.4", env.TestMysql84Port}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + testMysqlBackupRestoreWithEncryptionForVersion(t, tc.version, tc.port) + }) + } +} + +func testMysqlBackupRestoreForVersion(t *testing.T, mysqlVersion string, port string) { + container, err := connectToMysqlContainer(mysqlVersion, port) + assert.NoError(t, err) + defer container.DB.Close() + + _, err = container.DB.Exec(createMysqlTestDataQuery) + assert.NoError(t, err) + + router := createMysqlTestRouter() + user := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("Test Workspace", user, router) + + storage := storages.CreateTestStorage(workspace.ID) + + database := createMysqlDatabaseViaAPI( + t, router, "Test MySQL Database", workspace.ID, + container.Host, container.Port, + container.Username, container.Password, container.Database, + user.Token, + ) + + enableBackupsViaAPI( + t, router, database.ID, storage.ID, + backups_config.BackupEncryptionNone, user.Token, + ) + + createBackupViaAPI(t, router, database.ID, user.Token) + + backup := waitForBackupCompletion(t, router, database.ID, user.Token, 5*time.Minute) + assert.Equal(t, backups.BackupStatusCompleted, backup.Status) + + // Create new database for restore + newDBName := "restoreddb" + _, err = container.DB.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s;", newDBName)) + assert.NoError(t, err) + _, err = container.DB.Exec(fmt.Sprintf("CREATE DATABASE %s;", newDBName)) + assert.NoError(t, err) + + createMysqlRestoreViaAPI( + t, router, backup.ID, + container.Host, container.Port, + container.Username, container.Password, newDBName, + user.Token, + ) + + restore := waitForRestoreCompletion(t, router, backup.ID, user.Token, 5*time.Minute) + assert.Equal(t, restores_enums.RestoreStatusCompleted, restore.Status) + + // Verify data integrity + newDSN := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", + container.Username, container.Password, container.Host, container.Port, newDBName) + newDB, err := sql.Open("mysql", newDSN) + assert.NoError(t, err) + defer newDB.Close() + + var count int + err = newDB.QueryRow("SELECT COUNT(*) FROM test_data").Scan(&count) + assert.NoError(t, err) + assert.Equal(t, 3, count) + + // Cleanup + err = os.Remove(filepath.Join(config.GetEnv().DataFolder, backup.ID.String())) + if err != nil { + t.Logf("Warning: Failed to delete backup file: %v", err) + } + + test_utils.MakeDeleteRequest( + t, router, + "/api/v1/databases/"+database.ID.String(), + "Bearer "+user.Token, + http.StatusNoContent, + ) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) +} + +func connectToMysqlContainer(version string, port string) (*MysqlContainer, error) { + dbName := "testdb" + password := "testpassword" + username := "testuser" + host := "localhost" + + portInt, err := strconv.Atoi(port) + if err != nil { + return nil, fmt.Errorf("failed to parse port: %w", err) + } + + dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=true", + username, password, host, portInt, dbName) + + db, err := sql.Open("mysql", dsn) + if err != nil { + return nil, fmt.Errorf("failed to connect to database: %w", err) + } + + if err = db.Ping(); err != nil { + return nil, fmt.Errorf("failed to ping database: %w", err) + } + + return &MysqlContainer{ + Host: host, + Port: portInt, + Username: username, + Password: password, + Database: dbName, + DB: db, + }, nil +} + +func createMysqlDatabaseViaAPI( + t *testing.T, + router *gin.Engine, + name string, + workspaceID uuid.UUID, + host string, + port int, + username string, + password string, + database string, + token string, +) *databases.Database { + request := databases.Database{ + Name: name, + WorkspaceID: &workspaceID, + Type: databases.DatabaseTypeMysql, + Mysql: &mysqltypes.MysqlDatabase{ + Host: host, + Port: port, + Username: username, + Password: password, + Database: &database, + }, + } + + w := workspaces_testing.MakeAPIRequest( + router, + "POST", + "/api/v1/databases/create", + "Bearer "+token, + request, + ) + + if w.Code != http.StatusCreated { + t.Fatalf("Failed to create database. Status: %d, Body: %s", w.Code, w.Body.String()) + } + + var createdDatabase databases.Database + if err := json.Unmarshal(w.Body.Bytes(), &createdDatabase); err != nil { + t.Fatalf("Failed to unmarshal database response: %v", err) + } + + return &createdDatabase +} + +func createMysqlRestoreViaAPI( + t *testing.T, + router *gin.Engine, + backupID uuid.UUID, + host string, + port int, + username string, + password string, + database string, + token string, +) { + request := restores.RestoreBackupRequest{ + MysqlDatabase: &mysqltypes.MysqlDatabase{ + Host: host, + Port: port, + Username: username, + Password: password, + Database: &database, + }, + } + + test_utils.MakePostRequest( + t, router, + fmt.Sprintf("/api/v1/restores/%s/restore", backupID.String()), + "Bearer "+token, + request, + http.StatusOK, + ) +} +``` + +--- + +## Docker Compose for Testing + +### `backend/docker-compose.test.yml` additions + +```yaml +services: + # ... existing PostgreSQL services ... + + mysql57: + image: mysql:5.7 + container_name: postgresus-test-mysql57 + ports: + - "${TEST_MYSQL_57_PORT:-33057}:3306" + environment: + MYSQL_ROOT_PASSWORD: rootpassword + MYSQL_DATABASE: testdb + MYSQL_USER: testuser + MYSQL_PASSWORD: testpassword + command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci + healthcheck: + test: + [ + "CMD", + "mysqladmin", + "ping", + "-h", + "localhost", + "-u", + "root", + "-prootpassword", + ] + interval: 5s + timeout: 5s + retries: 10 + + mysql80: + image: mysql:8.0 + container_name: postgresus-test-mysql80 + ports: + - "${TEST_MYSQL_80_PORT:-33080}:3306" + environment: + MYSQL_ROOT_PASSWORD: rootpassword + MYSQL_DATABASE: testdb + MYSQL_USER: testuser + MYSQL_PASSWORD: testpassword + command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci --default-authentication-plugin=mysql_native_password + healthcheck: + test: + [ + "CMD", + "mysqladmin", + "ping", + "-h", + "localhost", + "-u", + "root", + "-prootpassword", + ] + interval: 5s + timeout: 5s + retries: 10 + + mysql84: + image: mysql:8.4 + container_name: postgresus-test-mysql84 + ports: + - "${TEST_MYSQL_84_PORT:-33084}:3306" + environment: + MYSQL_ROOT_PASSWORD: rootpassword + MYSQL_DATABASE: testdb + MYSQL_USER: testuser + MYSQL_PASSWORD: testpassword + command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci + healthcheck: + test: + [ + "CMD", + "mysqladmin", + "ping", + "-h", + "localhost", + "-u", + "root", + "-prootpassword", + ] + interval: 5s + timeout: 5s + retries: 10 +``` + +--- + +## MySQL Client Tools Installation + +### For Docker (Production) + +Add to Dockerfile: + +```dockerfile +# Install MySQL client tools for all supported versions +RUN apt-get update && apt-get install -y \ + mysql-client-5.7 \ + mysql-client-8.0 \ + && rm -rf /var/lib/apt/lists/* + +# Or use official MySQL APT repository for specific versions +RUN wget https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb \ + && dpkg -i mysql-apt-config_0.8.29-1_all.deb \ + && apt-get update \ + && apt-get install -y mysql-client +``` + +### For Development (Windows/macOS) + +Create `backend/tools/mysql/readme.md`: + +````markdown +# MySQL Client Tools Setup + +## Windows + +1. Download MySQL Community Server (client only) from: + - MySQL 5.7: https://dev.mysql.com/downloads/mysql/5.7.html + - MySQL 8.0: https://dev.mysql.com/downloads/mysql/8.0.html + - MySQL 8.4: https://dev.mysql.com/downloads/mysql/8.4.html + +2. Extract to: + - `backend/tools/mysql/mysql-5.7/bin/` + - `backend/tools/mysql/mysql-8.0/bin/` + - `backend/tools/mysql/mysql-8.4/bin/` + +3. Required executables: + - `mysqldump.exe` + - `mysql.exe` + +## macOS (Homebrew) + +```bash +brew install mysql-client@5.7 +brew install mysql-client@8.0 +brew install mysql-client@8.4 +``` +```` + +Then symlink or copy to tools directory. + +``` + +--- + +## Frontend Changes (Overview) + +The frontend will need updates to: + +1. **Database Type Selection** - Add MySQL option in database creation form +2. **MySQL Connection Form** - Similar to PostgreSQL but with: + - Host, Port, Username, Password + - Database name (single database per config) + - Use TLS checkbox (instead of IsHttps) +3. **Version Display** - Show MySQL version in database details +4. **Restore Form** - Add MySQL target configuration option + +--- + +## Implementation Order + +1. **Phase 1: Core Infrastructure** + - [ ] Create `MysqlVersion` enums and `mysql.go` tools + - [ ] Create `mysql/model.go` with `MysqlDatabase` struct + - [ ] Add migration for `mysql_databases` table + - [ ] Update `databases/model.go` to include MySQL + - [ ] Update `databases/enums.go` with `DatabaseTypeMysql` + - [ ] Update `databases/repository.go` to preload MySQL + +2. **Phase 2: Backup Implementation** + - [ ] Create `usecases/mysql/create_backup_uc.go` + - [ ] Create `usecases/mysql/di.go` + - [ ] Update `usecases/create_backup_uc.go` to route to MySQL + - [ ] Update `usecases/di.go` to wire MySQL use case + +3. **Phase 3: Restore Implementation** + - [ ] Create `restores/usecases/mysql/restore_backup_uc.go` + - [ ] Create `restores/usecases/mysql/di.go` + - [ ] Update `restores/usecases/restore_backup_uc.go` to route to MySQL + - [ ] Update `restores/service.go` for MySQL restore DTO + +4. **Phase 4: Service Layer Updates** + - [ ] Update `databases/service.go` - CopyDatabase, IsUserReadOnly, CreateReadOnlyUser + - [ ] Update `config/config.go` with MySQL configuration + - [ ] Add MySQL installation verification + +5. **Phase 5: Testing** + - [ ] Add Docker Compose services for MySQL containers + - [ ] Create `mysql_backup_restore_test.go` + - [ ] Test all MySQL versions (5.7, 8.0, 8.4) + - [ ] Test encryption with MySQL backups + +6. **Phase 6: Frontend** + - [ ] Add MySQL database type option + - [ ] Create MySQL connection form + - [ ] Update restore dialog for MySQL + +--- + +## Notes and Considerations + +### MySQL vs PostgreSQL Differences + +| Feature | PostgreSQL | MySQL | +|---------|------------|-------| +| Backup format | Custom (-Fc) with built-in compression | Plain SQL + zstd compression | +| Network compression | N/A (local compression only) | zstd (8.0+) or zlib (5.7) via protocol | +| Parallel dump | Supported (--jobs) | Not in mysqldump (consider mydumper) | +| Schemas | Multiple schemas per database | One schema = one database | +| Password file | .pgpass | .my.cnf | +| SSL flag | IsHttps / PGSSLMODE | UseTLS / --ssl-mode | +| Version detection | SELECT version() | SELECT VERSION() | + +### Security Considerations + +1. **Password handling**: Use `.my.cnf` temp file, not `MYSQL_PWD` env var (deprecated and insecure) +2. **File permissions**: `.my.cnf` must be 0600 +3. **TLS**: Support SSL/TLS connections for cloud databases + +### Cloud Database Compatibility + +- **AWS RDS**: Works with standard mysqldump +- **Google Cloud SQL**: Works with standard mysqldump +- **Azure Database for MySQL**: Works with standard mysqldump +- **PlanetScale**: May require `--ssl-mode=VERIFY_IDENTITY` and specific TLS configuration + +### Compression Strategy + +MySQL backup uses **two-layer compression** to minimize both network and storage usage: + +#### Layer 1: Network Compression (MySQL Protocol) + +Compresses data during transfer between MySQL server and mysqldump client: + +| MySQL Version | Algorithm | Flag | Level | +|---------------|-----------|------|-------| +| 5.7 | zlib | `--compress` | N/A (fixed) | +| 8.0+ | zstd | `--compression-algorithms=zstd` | 3 (medium, 1-19 range) | +| 8.0+ fallback | zlib | `--compress` | N/A | + +**Why zstd for 8.0+?** +- ~30% faster compression than zlib +- ~10% better compression ratio +- Lower CPU usage + +#### Layer 2: Storage Compression (zstd) + +Compresses mysqldump SQL output before storage: + +``` + +MySQL Server --[zstd/zlib network]--> mysqldump --[zstd level 3]--> [encryption] --> storage + +``` + +| Setting | Value | Notes | +|---------|-------|-------| +| Algorithm | zstd | `github.com/klauspost/compress/zstd` (pure Go) | +| Level | 3 | Medium (1-19 range, 3 is default/balanced) | + +**Why zstd over gzip?** + +| Metric | gzip (level 6) | zstd (level 3) | Improvement | +|--------|---------------|----------------|-------------| +| Compression speed | ~25 MB/s | ~300 MB/s | **12x faster** | +| Decompression speed | ~250 MB/s | ~800 MB/s | **3x faster** | +| Compression ratio | ~75% | ~75% | Same | +| CPU usage | High | Low | **Much lower** | + +#### Compression Ratio Expectations + +| Data Type | Network Compression | Storage Compression | Total Ratio | +|-----------|--------------------|--------------------|-------------| +| Text-heavy tables | 60-70% | 80-90% | 90-95% | +| Mixed data | 40-50% | 70-80% | 85-90% | +| Binary/BLOB data | 10-20% | 20-30% | 30-40% | + +#### Consistent Algorithm Benefit + +Using zstd for both layers provides: +- Consistent behavior and tuning +- Single dependency (`github.com/klauspost/compress/zstd`) +- Optimal performance for parallel backup scenarios +``` diff --git a/backend/Makefile b/backend/Makefile index cf5535d..284a13e 100644 --- a/backend/Makefile +++ b/backend/Makefile @@ -2,7 +2,7 @@ run: go run cmd/main.go test: - go test -p=1 -count=1 -failfast .\internal\... + go test -p=1 -count=1 -failfast -timeout 10m .\internal\... lint: golangci-lint fmt && golangci-lint run diff --git a/backend/docker-compose.yml.example b/backend/docker-compose.yml.example index e4b03bd..31ea1a1 100644 --- a/backend/docker-compose.yml.example +++ b/backend/docker-compose.yml.example @@ -422,3 +422,116 @@ services: interval: 5s timeout: 5s retries: 10 + + # Test MongoDB containers + test-mongodb-40: + image: mongo:4.0 + container_name: test-mongodb-40 + ports: + - "${TEST_MONGODB_40_PORT:-27040}:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: rootpassword + MONGO_INITDB_DATABASE: testdb + command: mongod --auth + healthcheck: + test: ["CMD", "mongo", "--eval", "db.adminCommand('ping')"] + interval: 5s + timeout: 5s + retries: 10 + + test-mongodb-42: + image: mongo:4.2 + container_name: test-mongodb-42 + ports: + - "${TEST_MONGODB_42_PORT:-27042}:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: rootpassword + MONGO_INITDB_DATABASE: testdb + command: mongod --auth + healthcheck: + test: ["CMD", "mongo", "--eval", "db.adminCommand('ping')"] + interval: 5s + timeout: 5s + retries: 10 + + test-mongodb-44: + image: mongo:4.4 + container_name: test-mongodb-44 + ports: + - "${TEST_MONGODB_44_PORT:-27044}:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: rootpassword + MONGO_INITDB_DATABASE: testdb + command: mongod --auth + healthcheck: + test: ["CMD", "mongo", "--eval", "db.adminCommand('ping')"] + interval: 5s + timeout: 5s + retries: 10 + + test-mongodb-50: + image: mongo:5.0 + container_name: test-mongodb-50 + ports: + - "${TEST_MONGODB_50_PORT:-27050}:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: rootpassword + MONGO_INITDB_DATABASE: testdb + command: mongod --auth + healthcheck: + test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"] + interval: 5s + timeout: 5s + retries: 10 + + test-mongodb-60: + image: mongo:6.0 + container_name: test-mongodb-60 + ports: + - "${TEST_MONGODB_60_PORT:-27060}:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: rootpassword + MONGO_INITDB_DATABASE: testdb + command: mongod --auth + healthcheck: + test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"] + interval: 5s + timeout: 5s + retries: 10 + + test-mongodb-70: + image: mongo:7.0 + container_name: test-mongodb-70 + ports: + - "${TEST_MONGODB_70_PORT:-27070}:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: rootpassword + MONGO_INITDB_DATABASE: testdb + command: mongod --auth + healthcheck: + test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"] + interval: 5s + timeout: 5s + retries: 10 + + test-mongodb-80: + image: mongo:8.0 + container_name: test-mongodb-80 + ports: + - "${TEST_MONGODB_80_PORT:-27080}:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: root + MONGO_INITDB_ROOT_PASSWORD: rootpassword + MONGO_INITDB_DATABASE: testdb + command: mongod --auth + healthcheck: + test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"] + interval: 5s + timeout: 5s + retries: 10 diff --git a/backend/go.mod b/backend/go.mod index d2ad648..2b1c9c8 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -25,6 +25,7 @@ require ( github.com/swaggo/files v1.0.1 github.com/swaggo/gin-swagger v1.6.0 github.com/swaggo/swag v1.16.4 + go.mongodb.org/mongo-driver v1.17.6 golang.org/x/crypto v0.46.0 golang.org/x/time v0.14.0 gorm.io/driver/postgres v1.5.11 @@ -109,6 +110,7 @@ require ( github.com/gofrs/flock v0.13.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.3.0 // indirect + github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.1.3 // indirect github.com/gorilla/schema v1.4.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -137,6 +139,7 @@ require ( github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-runewidth v0.0.19 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/montanaflynn/stats v0.7.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/ncw/swift/v2 v2.0.5 // indirect github.com/oklog/ulid v1.3.1 // indirect @@ -170,13 +173,15 @@ require ( github.com/ulikunitz/xz v0.5.15 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect github.com/yunify/qingstor-sdk-go/v3 v3.2.0 // indirect github.com/zeebo/blake3 v0.2.4 // indirect github.com/zeebo/errs v1.4.0 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.etcd.io/bbolt v1.4.3 // indirect - go.mongodb.org/mongo-driver v1.17.6 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect golang.org/x/term v0.38.0 // indirect diff --git a/backend/go.sum b/backend/go.sum index 90dbc57..892ee34 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -344,6 +344,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= @@ -522,6 +524,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/ncw/swift/v2 v2.0.5 h1:9o5Gsd7bInAFEqsGPcaUdsboMbqf8lnNtxqWKFT9iz8= @@ -660,6 +664,12 @@ github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/ github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -905,6 +915,7 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 73f8781..5ac53b0 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -27,6 +27,7 @@ type EnvVariables struct { PostgresesInstallDir string `env:"POSTGRES_INSTALL_DIR"` MysqlInstallDir string `env:"MYSQL_INSTALL_DIR"` MariadbInstallDir string `env:"MARIADB_INSTALL_DIR"` + MongodbInstallDir string `env:"MONGODB_INSTALL_DIR"` DataFolder string TempFolder string @@ -69,6 +70,14 @@ type EnvVariables struct { TestMariadb118Port string `env:"TEST_MARIADB_118_PORT"` TestMariadb120Port string `env:"TEST_MARIADB_120_PORT"` + TestMongodb40Port string `env:"TEST_MONGODB_40_PORT"` + TestMongodb42Port string `env:"TEST_MONGODB_42_PORT"` + TestMongodb44Port string `env:"TEST_MONGODB_44_PORT"` + TestMongodb50Port string `env:"TEST_MONGODB_50_PORT"` + TestMongodb60Port string `env:"TEST_MONGODB_60_PORT"` + TestMongodb70Port string `env:"TEST_MONGODB_70_PORT"` + TestMongodb80Port string `env:"TEST_MONGODB_80_PORT"` + // oauth GitHubClientID string `env:"GITHUB_CLIENT_ID"` GitHubClientSecret string `env:"GITHUB_CLIENT_SECRET"` @@ -176,6 +185,9 @@ func loadEnvVariables() { env.MariadbInstallDir = filepath.Join(backendRoot, "tools", "mariadb") tools.VerifyMariadbInstallation(log, env.EnvMode, env.MariadbInstallDir) + env.MongodbInstallDir = filepath.Join(backendRoot, "tools", "mongodb") + tools.VerifyMongodbInstallation(log, env.EnvMode, env.MongodbInstallDir) + // Store the data and temp folders one level below the root // (projectRoot/postgresus-data -> /postgresus-data) env.DataFolder = filepath.Join(filepath.Dir(backendRoot), "postgresus-data", "backups") diff --git a/backend/internal/features/backups/backups/usecases/create_backup_uc.go b/backend/internal/features/backups/backups/usecases/create_backup_uc.go index c8c3bf1..7cd3d52 100644 --- a/backend/internal/features/backups/backups/usecases/create_backup_uc.go +++ b/backend/internal/features/backups/backups/usecases/create_backup_uc.go @@ -6,6 +6,7 @@ import ( usecases_common "postgresus-backend/internal/features/backups/backups/usecases/common" usecases_mariadb "postgresus-backend/internal/features/backups/backups/usecases/mariadb" + usecases_mongodb "postgresus-backend/internal/features/backups/backups/usecases/mongodb" usecases_mysql "postgresus-backend/internal/features/backups/backups/usecases/mysql" usecases_postgresql "postgresus-backend/internal/features/backups/backups/usecases/postgresql" backups_config "postgresus-backend/internal/features/backups/config" @@ -19,6 +20,7 @@ type CreateBackupUsecase struct { CreatePostgresqlBackupUsecase *usecases_postgresql.CreatePostgresqlBackupUsecase CreateMysqlBackupUsecase *usecases_mysql.CreateMysqlBackupUsecase CreateMariadbBackupUsecase *usecases_mariadb.CreateMariadbBackupUsecase + CreateMongodbBackupUsecase *usecases_mongodb.CreateMongodbBackupUsecase } func (uc *CreateBackupUsecase) Execute( @@ -60,6 +62,16 @@ func (uc *CreateBackupUsecase) Execute( backupProgressListener, ) + case databases.DatabaseTypeMongodb: + return uc.CreateMongodbBackupUsecase.Execute( + ctx, + backupID, + backupConfig, + database, + storage, + backupProgressListener, + ) + default: return nil, errors.New("database type not supported") } diff --git a/backend/internal/features/backups/backups/usecases/di.go b/backend/internal/features/backups/backups/usecases/di.go index cd678e1..7aa87db 100644 --- a/backend/internal/features/backups/backups/usecases/di.go +++ b/backend/internal/features/backups/backups/usecases/di.go @@ -2,6 +2,7 @@ package usecases import ( usecases_mariadb "postgresus-backend/internal/features/backups/backups/usecases/mariadb" + usecases_mongodb "postgresus-backend/internal/features/backups/backups/usecases/mongodb" usecases_mysql "postgresus-backend/internal/features/backups/backups/usecases/mysql" usecases_postgresql "postgresus-backend/internal/features/backups/backups/usecases/postgresql" ) @@ -10,6 +11,7 @@ var createBackupUsecase = &CreateBackupUsecase{ usecases_postgresql.GetCreatePostgresqlBackupUsecase(), usecases_mysql.GetCreateMysqlBackupUsecase(), usecases_mariadb.GetCreateMariadbBackupUsecase(), + usecases_mongodb.GetCreateMongodbBackupUsecase(), } func GetCreateBackupUsecase() *CreateBackupUsecase { diff --git a/backend/internal/features/backups/backups/usecases/mariadb/create_backup_uc.go b/backend/internal/features/backups/backups/usecases/mariadb/create_backup_uc.go index 31b3e40..6afd294 100644 --- a/backend/internal/features/backups/backups/usecases/mariadb/create_backup_uc.go +++ b/backend/internal/features/backups/backups/usecases/mariadb/create_backup_uc.go @@ -34,7 +34,7 @@ const ( shutdownCheckInterval = 1 * time.Second copyBufferSize = 8 * 1024 * 1024 progressReportIntervalMB = 1.0 - zstdStorageCompressionLevel = 3 + zstdStorageCompressionLevel = 5 exitCodeGenericError = 1 exitCodeConnectionError = 2 ) diff --git a/backend/internal/features/backups/backups/usecases/mongodb/create_backup_uc.go b/backend/internal/features/backups/backups/usecases/mongodb/create_backup_uc.go new file mode 100644 index 0000000..8d989ca --- /dev/null +++ b/backend/internal/features/backups/backups/usecases/mongodb/create_backup_uc.go @@ -0,0 +1,430 @@ +package usecases_mongodb + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "log/slog" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/google/uuid" + + "postgresus-backend/internal/config" + backup_encryption "postgresus-backend/internal/features/backups/backups/encryption" + usecases_common "postgresus-backend/internal/features/backups/backups/usecases/common" + backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/databases" + mongodbtypes "postgresus-backend/internal/features/databases/databases/mongodb" + encryption_secrets "postgresus-backend/internal/features/encryption/secrets" + "postgresus-backend/internal/features/storages" + "postgresus-backend/internal/util/encryption" + "postgresus-backend/internal/util/tools" +) + +const ( + backupTimeout = 23 * time.Hour + shutdownCheckInterval = 1 * time.Second + copyBufferSize = 8 * 1024 * 1024 + progressReportIntervalMB = 1.0 +) + +type CreateMongodbBackupUsecase struct { + logger *slog.Logger + secretKeyService *encryption_secrets.SecretKeyService + fieldEncryptor encryption.FieldEncryptor +} + +type writeResult struct { + bytesWritten int + writeErr error +} + +func (uc *CreateMongodbBackupUsecase) Execute( + ctx context.Context, + backupID uuid.UUID, + backupConfig *backups_config.BackupConfig, + db *databases.Database, + storage *storages.Storage, + backupProgressListener func(completedMBs float64), +) (*usecases_common.BackupMetadata, error) { + uc.logger.Info( + "Creating MongoDB backup via mongodump", + "databaseId", db.ID, + "storageId", storage.ID, + ) + + if !backupConfig.IsBackupsEnabled { + return nil, fmt.Errorf("backups are not enabled for this database: \"%s\"", db.Name) + } + + mdb := db.Mongodb + if mdb == nil { + return nil, fmt.Errorf("mongodb database configuration is required") + } + + if mdb.Database == "" { + return nil, fmt.Errorf("database name is required for mongodump backups") + } + + decryptedPassword, err := uc.fieldEncryptor.Decrypt(db.ID, mdb.Password) + if err != nil { + return nil, fmt.Errorf("failed to decrypt database password: %w", err) + } + + args := uc.buildMongodumpArgs(mdb, decryptedPassword) + + return uc.streamToStorage( + ctx, + backupID, + backupConfig, + tools.GetMongodbExecutable( + tools.MongodbExecutableMongodump, + config.GetEnv().EnvMode, + config.GetEnv().MongodbInstallDir, + ), + args, + storage, + backupProgressListener, + ) +} + +func (uc *CreateMongodbBackupUsecase) buildMongodumpArgs( + mdb *mongodbtypes.MongodbDatabase, + password string, +) []string { + uri := mdb.BuildMongodumpURI(password) + + args := []string{ + "--uri=" + uri, + "--db=" + mdb.Database, + "--archive", + "--gzip", + } + + return args +} + +func (uc *CreateMongodbBackupUsecase) streamToStorage( + parentCtx context.Context, + backupID uuid.UUID, + backupConfig *backups_config.BackupConfig, + mongodumpBin string, + args []string, + storage *storages.Storage, + backupProgressListener func(completedMBs float64), +) (*usecases_common.BackupMetadata, error) { + uc.logger.Info("Streaming MongoDB backup to storage", "mongodumpBin", mongodumpBin) + + ctx, cancel := uc.createBackupContext(parentCtx) + defer cancel() + + cmd := exec.CommandContext(ctx, mongodumpBin, args...) + + safeArgs := make([]string, len(args)) + for i, arg := range args { + if len(arg) > 6 && arg[:6] == "--uri=" { + safeArgs[i] = "--uri=mongodb://***:***@***" + } else { + safeArgs[i] = arg + } + } + uc.logger.Info("Executing MongoDB backup command", "command", mongodumpBin, "args", safeArgs) + + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, + "LC_ALL=C.UTF-8", + "LANG=C.UTF-8", + ) + + pgStdout, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("stdout pipe: %w", err) + } + + pgStderr, err := cmd.StderrPipe() + if err != nil { + return nil, fmt.Errorf("stderr pipe: %w", err) + } + + stderrCh := make(chan []byte, 1) + go func() { + stderrOutput, _ := io.ReadAll(pgStderr) + stderrCh <- stderrOutput + }() + + storageReader, storageWriter := io.Pipe() + + finalWriter, encryptionWriter, backupMetadata, err := uc.setupBackupEncryption( + backupID, + backupConfig, + storageWriter, + ) + if err != nil { + return nil, err + } + + countingWriter := usecases_common.NewCountingWriter(finalWriter) + + saveErrCh := make(chan error, 1) + go func() { + saveErr := storage.SaveFile(ctx, uc.fieldEncryptor, uc.logger, backupID, storageReader) + saveErrCh <- saveErr + }() + + if err = cmd.Start(); err != nil { + return nil, fmt.Errorf("start %s: %w", filepath.Base(mongodumpBin), err) + } + + copyResultCh := make(chan error, 1) + bytesWrittenCh := make(chan int64, 1) + go func() { + bytesWritten, copyErr := uc.copyWithShutdownCheck( + ctx, + countingWriter, + pgStdout, + backupProgressListener, + ) + bytesWrittenCh <- bytesWritten + copyResultCh <- copyErr + }() + + copyErr := <-copyResultCh + bytesWritten := <-bytesWrittenCh + waitErr := cmd.Wait() + + select { + case <-ctx.Done(): + uc.cleanupOnCancellation(encryptionWriter, storageWriter, saveErrCh) + return nil, uc.checkCancellationReason() + default: + } + + if err := uc.closeWriters(encryptionWriter, storageWriter); err != nil { + <-saveErrCh + return nil, err + } + + saveErr := <-saveErrCh + stderrOutput := <-stderrCh + + if waitErr == nil && copyErr == nil && saveErr == nil && backupProgressListener != nil { + sizeMB := float64(bytesWritten) / (1024 * 1024) + backupProgressListener(sizeMB) + } + + switch { + case waitErr != nil: + return nil, uc.buildMongodumpErrorMessage(waitErr, stderrOutput, mongodumpBin) + case copyErr != nil: + return nil, fmt.Errorf("copy to storage: %w", copyErr) + case saveErr != nil: + return nil, fmt.Errorf("save to storage: %w", saveErr) + } + + return &backupMetadata, nil +} + +func (uc *CreateMongodbBackupUsecase) createBackupContext( + parentCtx context.Context, +) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithTimeout(parentCtx, backupTimeout) + + go func() { + ticker := time.NewTicker(shutdownCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if config.IsShouldShutdown() { + cancel() + return + } + } + } + }() + + return ctx, cancel +} + +func (uc *CreateMongodbBackupUsecase) setupBackupEncryption( + backupID uuid.UUID, + backupConfig *backups_config.BackupConfig, + storageWriter io.WriteCloser, +) (io.Writer, *backup_encryption.EncryptionWriter, usecases_common.BackupMetadata, error) { + backupMetadata := usecases_common.BackupMetadata{ + Encryption: backups_config.BackupEncryptionNone, + } + + if backupConfig.Encryption != backups_config.BackupEncryptionEncrypted { + return storageWriter, nil, backupMetadata, nil + } + + salt, err := backup_encryption.GenerateSalt() + if err != nil { + return nil, nil, backupMetadata, fmt.Errorf("failed to generate salt: %w", err) + } + + nonce, err := backup_encryption.GenerateNonce() + if err != nil { + return nil, nil, backupMetadata, fmt.Errorf("failed to generate nonce: %w", err) + } + + masterKey, err := uc.secretKeyService.GetSecretKey() + if err != nil { + return nil, nil, backupMetadata, fmt.Errorf("failed to get master key: %w", err) + } + + encryptionWriter, err := backup_encryption.NewEncryptionWriter( + storageWriter, + masterKey, + backupID, + salt, + nonce, + ) + if err != nil { + return nil, nil, backupMetadata, fmt.Errorf("failed to create encryption writer: %w", err) + } + + saltBase64 := base64.StdEncoding.EncodeToString(salt) + nonceBase64 := base64.StdEncoding.EncodeToString(nonce) + + backupMetadata.Encryption = backups_config.BackupEncryptionEncrypted + backupMetadata.EncryptionSalt = &saltBase64 + backupMetadata.EncryptionIV = &nonceBase64 + + return encryptionWriter, encryptionWriter, backupMetadata, nil +} + +func (uc *CreateMongodbBackupUsecase) copyWithShutdownCheck( + ctx context.Context, + dst io.Writer, + src io.Reader, + backupProgressListener func(completedMBs float64), +) (int64, error) { + buf := make([]byte, copyBufferSize) + var totalWritten int64 + var lastReportedMB float64 + + for { + select { + case <-ctx.Done(): + return totalWritten, ctx.Err() + default: + } + + if config.IsShouldShutdown() { + return totalWritten, errors.New("shutdown requested") + } + + nr, readErr := src.Read(buf) + if nr > 0 { + writeResultCh := make(chan writeResult, 1) + go func() { + nw, writeErr := dst.Write(buf[:nr]) + writeResultCh <- writeResult{nw, writeErr} + }() + + var nw int + var writeErr error + + select { + case <-ctx.Done(): + return totalWritten, fmt.Errorf("copy cancelled during write: %w", ctx.Err()) + case result := <-writeResultCh: + nw = result.bytesWritten + writeErr = result.writeErr + } + + if nw < 0 || nr < nw { + nw = 0 + if writeErr == nil { + writeErr = fmt.Errorf("invalid write result") + } + } + + if writeErr != nil { + return totalWritten, writeErr + } + if nr != nw { + return totalWritten, io.ErrShortWrite + } + totalWritten += int64(nw) + + if backupProgressListener != nil { + currentMB := float64(totalWritten) / (1024 * 1024) + if currentMB-lastReportedMB >= progressReportIntervalMB { + backupProgressListener(currentMB) + lastReportedMB = currentMB + } + } + } + if readErr != nil { + if readErr == io.EOF { + return totalWritten, nil + } + return totalWritten, readErr + } + } +} + +func (uc *CreateMongodbBackupUsecase) cleanupOnCancellation( + encryptionWriter *backup_encryption.EncryptionWriter, + storageWriter *io.PipeWriter, + saveErrCh chan error, +) { + if encryptionWriter != nil { + _ = encryptionWriter.Close() + } + _ = storageWriter.CloseWithError(errors.New("backup cancelled")) + <-saveErrCh +} + +func (uc *CreateMongodbBackupUsecase) closeWriters( + encryptionWriter *backup_encryption.EncryptionWriter, + storageWriter *io.PipeWriter, +) error { + if encryptionWriter != nil { + if err := encryptionWriter.Close(); err != nil { + uc.logger.Error("Failed to close encryption writer", "error", err) + return fmt.Errorf("failed to close encryption writer: %w", err) + } + } + if err := storageWriter.Close(); err != nil { + uc.logger.Error("Failed to close storage writer", "error", err) + return fmt.Errorf("failed to close storage writer: %w", err) + } + return nil +} + +func (uc *CreateMongodbBackupUsecase) checkCancellationReason() error { + if config.IsShouldShutdown() { + return errors.New("backup cancelled due to shutdown") + } + return errors.New("backup cancelled due to timeout") +} + +func (uc *CreateMongodbBackupUsecase) buildMongodumpErrorMessage( + waitErr error, + stderrOutput []byte, + mongodumpBin string, +) error { + stderrStr := string(stderrOutput) + + if len(stderrStr) > 0 { + return fmt.Errorf( + "%s failed: %w\nstderr: %s", + filepath.Base(mongodumpBin), + waitErr, + stderrStr, + ) + } + + return fmt.Errorf("%s failed: %w", filepath.Base(mongodumpBin), waitErr) +} diff --git a/backend/internal/features/backups/backups/usecases/mongodb/di.go b/backend/internal/features/backups/backups/usecases/mongodb/di.go new file mode 100644 index 0000000..497a609 --- /dev/null +++ b/backend/internal/features/backups/backups/usecases/mongodb/di.go @@ -0,0 +1,17 @@ +package usecases_mongodb + +import ( + encryption_secrets "postgresus-backend/internal/features/encryption/secrets" + "postgresus-backend/internal/util/encryption" + "postgresus-backend/internal/util/logger" +) + +var createMongodbBackupUsecase = &CreateMongodbBackupUsecase{ + logger.GetLogger(), + encryption_secrets.GetSecretKeyService(), + encryption.GetFieldEncryptor(), +} + +func GetCreateMongodbBackupUsecase() *CreateMongodbBackupUsecase { + return createMongodbBackupUsecase +} diff --git a/backend/internal/features/backups/backups/usecases/mysql/create_backup_uc.go b/backend/internal/features/backups/backups/usecases/mysql/create_backup_uc.go index 9836415..dc80943 100644 --- a/backend/internal/features/backups/backups/usecases/mysql/create_backup_uc.go +++ b/backend/internal/features/backups/backups/usecases/mysql/create_backup_uc.go @@ -34,7 +34,7 @@ const ( shutdownCheckInterval = 1 * time.Second copyBufferSize = 8 * 1024 * 1024 progressReportIntervalMB = 1.0 - zstdStorageCompressionLevel = 3 + zstdStorageCompressionLevel = 5 exitCodeGenericError = 1 exitCodeConnectionError = 2 ) @@ -130,7 +130,7 @@ func (uc *CreateMysqlBackupUsecase) buildMysqldumpArgs(my *mysqltypes.MysqlDatab } func (uc *CreateMysqlBackupUsecase) getNetworkCompressionArgs(version tools.MysqlVersion) []string { - const zstdCompressionLevel = 3 + const zstdCompressionLevel = 5 switch version { case tools.MysqlVersion80, tools.MysqlVersion84: diff --git a/backend/internal/features/databases/controller_test.go b/backend/internal/features/databases/controller_test.go index 5b7c207..18df232 100644 --- a/backend/internal/features/databases/controller_test.go +++ b/backend/internal/features/databases/controller_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/assert" "postgresus-backend/internal/features/databases/databases/mariadb" + "postgresus-backend/internal/features/databases/databases/mongodb" "postgresus-backend/internal/features/databases/databases/postgresql" users_enums "postgresus-backend/internal/features/users/enums" users_testing "postgresus-backend/internal/features/users/testing" @@ -943,6 +944,57 @@ func Test_DatabaseSensitiveDataLifecycle_AllTypes(t *testing.T) { assert.Equal(t, "", database.Mariadb.Password) }, }, + { + name: "MongoDB Database", + databaseType: DatabaseTypeMongodb, + createDatabase: func(workspaceID uuid.UUID) *Database { + return &Database{ + WorkspaceID: &workspaceID, + Name: "Test MongoDB Database", + Type: DatabaseTypeMongodb, + Mongodb: &mongodb.MongodbDatabase{ + Version: tools.MongodbVersion70, + Host: "localhost", + Port: 27017, + Username: "root", + Password: "original-password-secret", + Database: "test_db", + AuthDatabase: "admin", + IsHttps: false, + }, + } + }, + updateDatabase: func(workspaceID uuid.UUID, databaseID uuid.UUID) *Database { + return &Database{ + ID: databaseID, + WorkspaceID: &workspaceID, + Name: "Updated MongoDB Database", + Type: DatabaseTypeMongodb, + Mongodb: &mongodb.MongodbDatabase{ + Version: tools.MongodbVersion80, + Host: "updated-host", + Port: 27018, + Username: "updated_user", + Password: "", + Database: "updated_test_db", + AuthDatabase: "admin", + IsHttps: false, + }, + } + }, + verifySensitiveData: func(t *testing.T, database *Database) { + assert.True(t, strings.HasPrefix(database.Mongodb.Password, "enc:"), + "Password should be encrypted in database") + + encryptor := encryption.GetFieldEncryptor() + decrypted, err := encryptor.Decrypt(database.ID, database.Mongodb.Password) + assert.NoError(t, err) + assert.Equal(t, "original-password-secret", decrypted) + }, + verifyHiddenData: func(t *testing.T, database *Database) { + assert.Equal(t, "", database.Mongodb.Password) + }, + }, } for _, tc := range testCases { diff --git a/backend/internal/features/databases/databases/mongodb/model.go b/backend/internal/features/databases/databases/mongodb/model.go new file mode 100644 index 0000000..cf29817 --- /dev/null +++ b/backend/internal/features/databases/databases/mongodb/model.go @@ -0,0 +1,426 @@ +package mongodb + +import ( + "context" + "errors" + "fmt" + "log/slog" + "regexp" + "time" + + "postgresus-backend/internal/util/encryption" + "postgresus-backend/internal/util/tools" + + "github.com/google/uuid" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +type MongodbDatabase struct { + ID uuid.UUID `json:"id" gorm:"primaryKey;type:uuid;default:gen_random_uuid()"` + DatabaseID *uuid.UUID `json:"databaseId" gorm:"type:uuid;column:database_id"` + + Version tools.MongodbVersion `json:"version" gorm:"type:text;not null"` + + Host string `json:"host" gorm:"type:text;not null"` + Port int `json:"port" gorm:"type:int;not null"` + Username string `json:"username" gorm:"type:text;not null"` + Password string `json:"password" gorm:"type:text;not null"` + Database string `json:"database" gorm:"type:text;not null"` + AuthDatabase string `json:"authDatabase" gorm:"type:text;not null;default:'admin'"` + IsHttps bool `json:"isHttps" gorm:"type:boolean;default:false"` +} + +func (m *MongodbDatabase) TableName() string { + return "mongodb_databases" +} + +func (m *MongodbDatabase) Validate() error { + if m.Host == "" { + return errors.New("host is required") + } + if m.Port == 0 { + return errors.New("port is required") + } + if m.Username == "" { + return errors.New("username is required") + } + if m.Password == "" { + return errors.New("password is required") + } + if m.Database == "" { + return errors.New("database is required") + } + return nil +} + +func (m *MongodbDatabase) TestConnection( + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) error { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID) + if err != nil { + return fmt.Errorf("failed to decrypt password: %w", err) + } + + uri := m.buildConnectionURI(password) + + clientOptions := options.Client().ApplyURI(uri) + client, err := mongo.Connect(ctx, clientOptions) + if err != nil { + return fmt.Errorf("failed to connect to MongoDB: %w", err) + } + defer func() { + if disconnectErr := client.Disconnect(ctx); disconnectErr != nil { + logger.Error("Failed to disconnect from MongoDB", "error", disconnectErr) + } + }() + + if err := client.Ping(ctx, nil); err != nil { + return fmt.Errorf("failed to ping MongoDB database '%s': %w", m.Database, err) + } + + detectedVersion, err := detectMongodbVersion(ctx, client) + if err != nil { + return err + } + m.Version = detectedVersion + + return nil +} + +func (m *MongodbDatabase) HideSensitiveData() { + if m == nil { + return + } + m.Password = "" +} + +func (m *MongodbDatabase) Update(incoming *MongodbDatabase) { + m.Version = incoming.Version + m.Host = incoming.Host + m.Port = incoming.Port + m.Username = incoming.Username + m.Database = incoming.Database + m.AuthDatabase = incoming.AuthDatabase + m.IsHttps = incoming.IsHttps + + if incoming.Password != "" { + m.Password = incoming.Password + } +} + +func (m *MongodbDatabase) EncryptSensitiveFields( + databaseID uuid.UUID, + encryptor encryption.FieldEncryptor, +) error { + if m.Password != "" { + encrypted, err := encryptor.Encrypt(databaseID, m.Password) + if err != nil { + return err + } + m.Password = encrypted + } + return nil +} + +func (m *MongodbDatabase) PopulateVersionIfEmpty( + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) error { + if m.Version != "" { + return nil + } + return m.PopulateVersion(logger, encryptor, databaseID) +} + +func (m *MongodbDatabase) PopulateVersion( + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) error { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID) + if err != nil { + return fmt.Errorf("failed to decrypt password: %w", err) + } + + uri := m.buildConnectionURI(password) + + clientOptions := options.Client().ApplyURI(uri) + client, err := mongo.Connect(ctx, clientOptions) + if err != nil { + return fmt.Errorf("failed to connect to database: %w", err) + } + defer func() { + if disconnectErr := client.Disconnect(ctx); disconnectErr != nil { + logger.Error("Failed to disconnect", "error", disconnectErr) + } + }() + + detectedVersion, err := detectMongodbVersion(ctx, client) + if err != nil { + return err + } + + m.Version = detectedVersion + return nil +} + +func (m *MongodbDatabase) IsUserReadOnly( + ctx context.Context, + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) (bool, error) { + password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID) + if err != nil { + return false, fmt.Errorf("failed to decrypt password: %w", err) + } + + uri := m.buildConnectionURI(password) + + clientOptions := options.Client().ApplyURI(uri) + client, err := mongo.Connect(ctx, clientOptions) + if err != nil { + return false, fmt.Errorf("failed to connect to database: %w", err) + } + defer func() { + if disconnectErr := client.Disconnect(ctx); disconnectErr != nil { + logger.Error("Failed to disconnect", "error", disconnectErr) + } + }() + + authDB := m.AuthDatabase + if authDB == "" { + authDB = "admin" + } + + adminDB := client.Database(authDB) + var result bson.M + err = adminDB.RunCommand(ctx, bson.D{ + {Key: "usersInfo", Value: bson.D{ + {Key: "user", Value: m.Username}, + {Key: "db", Value: authDB}, + }}, + }).Decode(&result) + if err != nil { + return false, fmt.Errorf("failed to get user info: %w", err) + } + + writeRoles := []string{ + "readWrite", "readWriteAnyDatabase", "dbAdmin", "dbAdminAnyDatabase", + "userAdmin", "userAdminAnyDatabase", "clusterAdmin", "root", + "dbOwner", "backup", "restore", + } + + users, ok := result["users"].(bson.A) + if !ok || len(users) == 0 { + return true, nil + } + + user, ok := users[0].(bson.M) + if !ok { + return true, nil + } + + roles, ok := user["roles"].(bson.A) + if !ok { + return true, nil + } + + for _, roleDoc := range roles { + role, ok := roleDoc.(bson.M) + if !ok { + continue + } + roleName, _ := role["role"].(string) + for _, writeRole := range writeRoles { + if roleName == writeRole { + return false, nil + } + } + } + + return true, nil +} + +func (m *MongodbDatabase) CreateReadOnlyUser( + ctx context.Context, + logger *slog.Logger, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) (string, string, error) { + password, err := decryptPasswordIfNeeded(m.Password, encryptor, databaseID) + if err != nil { + return "", "", fmt.Errorf("failed to decrypt password: %w", err) + } + + uri := m.buildConnectionURI(password) + + clientOptions := options.Client().ApplyURI(uri) + client, err := mongo.Connect(ctx, clientOptions) + if err != nil { + return "", "", fmt.Errorf("failed to connect to database: %w", err) + } + defer func() { + if disconnectErr := client.Disconnect(ctx); disconnectErr != nil { + logger.Error("Failed to disconnect", "error", disconnectErr) + } + }() + + authDB := m.AuthDatabase + if authDB == "" { + authDB = "admin" + } + + maxRetries := 3 + for attempt := range maxRetries { + newUsername := fmt.Sprintf("postgresus-%s", uuid.New().String()[:8]) + newPassword := uuid.New().String() + + adminDB := client.Database(authDB) + err = adminDB.RunCommand(ctx, bson.D{ + {Key: "createUser", Value: newUsername}, + {Key: "pwd", Value: newPassword}, + {Key: "roles", Value: bson.A{ + bson.D{ + {Key: "role", Value: "backup"}, + {Key: "db", Value: "admin"}, + }, + bson.D{ + {Key: "role", Value: "read"}, + {Key: "db", Value: m.Database}, + }, + }}, + }).Err() + + if err != nil { + if attempt < maxRetries-1 { + continue + } + return "", "", fmt.Errorf("failed to create user: %w", err) + } + + logger.Info( + "Read-only MongoDB user created successfully", + "username", newUsername, + ) + return newUsername, newPassword, nil + } + + return "", "", errors.New("failed to generate unique username after 3 attempts") +} + +// buildConnectionURI builds a MongoDB connection URI +func (m *MongodbDatabase) buildConnectionURI(password string) string { + authDB := m.AuthDatabase + if authDB == "" { + authDB = "admin" + } + + tlsOption := "false" + if m.IsHttps { + tlsOption = "true" + } + + return fmt.Sprintf( + "mongodb://%s:%s@%s:%d/%s?authSource=%s&tls=%s&connectTimeoutMS=15000", + m.Username, + password, + m.Host, + m.Port, + m.Database, + authDB, + tlsOption, + ) +} + +// BuildMongodumpURI builds a URI suitable for mongodump (without database in path) +func (m *MongodbDatabase) BuildMongodumpURI(password string) string { + authDB := m.AuthDatabase + if authDB == "" { + authDB = "admin" + } + + tlsOption := "false" + if m.IsHttps { + tlsOption = "true" + } + + return fmt.Sprintf( + "mongodb://%s:%s@%s:%d/?authSource=%s&tls=%s&connectTimeoutMS=15000", + m.Username, + password, + m.Host, + m.Port, + authDB, + tlsOption, + ) +} + +// detectMongodbVersion gets MongoDB server version from buildInfo command +func detectMongodbVersion(ctx context.Context, client *mongo.Client) (tools.MongodbVersion, error) { + adminDB := client.Database("admin") + var result bson.M + err := adminDB.RunCommand(ctx, bson.D{{Key: "buildInfo", Value: 1}}).Decode(&result) + if err != nil { + return "", fmt.Errorf("failed to get MongoDB version: %w", err) + } + + versionStr, ok := result["version"].(string) + if !ok { + return "", errors.New("could not parse MongoDB version from buildInfo") + } + + re := regexp.MustCompile(`^(\d+)\.(\d+)`) + matches := re.FindStringSubmatch(versionStr) + if len(matches) < 3 { + return "", fmt.Errorf("could not parse MongoDB version: %s", versionStr) + } + + major := matches[1] + minor := matches[2] + versionKey := fmt.Sprintf("%s.%s", major, minor) + + switch versionKey { + case "4.0": + return tools.MongodbVersion40, nil + case "4.2": + return tools.MongodbVersion42, nil + case "4.4": + return tools.MongodbVersion44, nil + case "5.0": + return tools.MongodbVersion50, nil + case "6.0": + return tools.MongodbVersion60, nil + case "7.0": + return tools.MongodbVersion70, nil + case "8.0": + return tools.MongodbVersion80, nil + default: + return "", fmt.Errorf( + "unsupported MongoDB version: %s (supported: 4.0, 4.2, 4.4, 5.0, 6.0, 7.0, 8.0)", + versionKey, + ) + } +} + +func decryptPasswordIfNeeded( + password string, + encryptor encryption.FieldEncryptor, + databaseID uuid.UUID, +) (string, error) { + if encryptor == nil { + return password, nil + } + return encryptor.Decrypt(databaseID, password) +} diff --git a/backend/internal/features/databases/databases/mongodb/readonly_user_test.go b/backend/internal/features/databases/databases/mongodb/readonly_user_test.go new file mode 100644 index 0000000..f8b4d16 --- /dev/null +++ b/backend/internal/features/databases/databases/mongodb/readonly_user_test.go @@ -0,0 +1,309 @@ +package mongodb + +import ( + "context" + "fmt" + "log/slog" + "os" + "strconv" + "strings" + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + "postgresus-backend/internal/config" + "postgresus-backend/internal/util/tools" +) + +func Test_IsUserReadOnly_AdminUser_ReturnsFalse(t *testing.T) { + env := config.GetEnv() + cases := []struct { + name string + version tools.MongodbVersion + port string + }{ + {"MongoDB 4.0", tools.MongodbVersion40, env.TestMongodb40Port}, + {"MongoDB 4.2", tools.MongodbVersion42, env.TestMongodb42Port}, + {"MongoDB 4.4", tools.MongodbVersion44, env.TestMongodb44Port}, + {"MongoDB 5.0", tools.MongodbVersion50, env.TestMongodb50Port}, + {"MongoDB 6.0", tools.MongodbVersion60, env.TestMongodb60Port}, + {"MongoDB 7.0", tools.MongodbVersion70, env.TestMongodb70Port}, + {"MongoDB 8.0", tools.MongodbVersion80, env.TestMongodb80Port}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + container := connectToMongodbContainer(t, tc.port, tc.version) + defer container.Client.Disconnect(context.Background()) + + mongodbModel := createMongodbModel(container) + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + ctx := context.Background() + + isReadOnly, err := mongodbModel.IsUserReadOnly(ctx, logger, nil, uuid.New()) + assert.NoError(t, err) + assert.False(t, isReadOnly, "Root user should not be read-only") + }) + } +} + +func Test_CreateReadOnlyUser_UserCanReadButNotWrite(t *testing.T) { + env := config.GetEnv() + cases := []struct { + name string + version tools.MongodbVersion + port string + }{ + {"MongoDB 4.0", tools.MongodbVersion40, env.TestMongodb40Port}, + {"MongoDB 4.2", tools.MongodbVersion42, env.TestMongodb42Port}, + {"MongoDB 4.4", tools.MongodbVersion44, env.TestMongodb44Port}, + {"MongoDB 5.0", tools.MongodbVersion50, env.TestMongodb50Port}, + {"MongoDB 6.0", tools.MongodbVersion60, env.TestMongodb60Port}, + {"MongoDB 7.0", tools.MongodbVersion70, env.TestMongodb70Port}, + {"MongoDB 8.0", tools.MongodbVersion80, env.TestMongodb80Port}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + container := connectToMongodbContainer(t, tc.port, tc.version) + defer container.Client.Disconnect(context.Background()) + + ctx := context.Background() + db := container.Client.Database(container.Database) + + _ = db.Collection("readonly_test").Drop(ctx) + _ = db.Collection("hack_collection").Drop(ctx) + + _, err := db.Collection("readonly_test").InsertMany(ctx, []interface{}{ + bson.M{"data": "test1"}, + bson.M{"data": "test2"}, + }) + assert.NoError(t, err) + + mongodbModel := createMongodbModel(container) + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + + username, password, err := mongodbModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New()) + assert.NoError(t, err) + assert.NotEmpty(t, username) + assert.NotEmpty(t, password) + assert.True(t, strings.HasPrefix(username, "postgresus-")) + + if err != nil { + return + } + + readOnlyClient := connectWithCredentials(t, container, username, password) + defer readOnlyClient.Disconnect(ctx) + + readOnlyDB := readOnlyClient.Database(container.Database) + + var count int64 + count, err = readOnlyDB.Collection("readonly_test").CountDocuments(ctx, bson.M{}) + assert.NoError(t, err) + assert.Equal(t, int64(2), count) + + _, err = readOnlyDB.Collection("readonly_test"). + InsertOne(ctx, bson.M{"data": "should-fail"}) + assert.Error(t, err) + assertWriteDenied(t, err) + + _, err = readOnlyDB.Collection("readonly_test").UpdateOne( + ctx, + bson.M{"data": "test1"}, + bson.M{"$set": bson.M{"data": "hacked"}}, + ) + assert.Error(t, err) + assertWriteDenied(t, err) + + _, err = readOnlyDB.Collection("readonly_test").DeleteOne(ctx, bson.M{"data": "test1"}) + assert.Error(t, err) + assertWriteDenied(t, err) + + err = readOnlyDB.CreateCollection(ctx, "hack_collection") + assert.Error(t, err) + assertWriteDenied(t, err) + + dropUserSafe(container.Client, username, container.AuthDatabase) + }) + } +} + +func Test_ReadOnlyUser_FutureCollections_CanSelect(t *testing.T) { + env := config.GetEnv() + container := connectToMongodbContainer(t, env.TestMongodb70Port, tools.MongodbVersion70) + defer container.Client.Disconnect(context.Background()) + + ctx := context.Background() + db := container.Client.Database(container.Database) + + mongodbModel := createMongodbModel(container) + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + + username, password, err := mongodbModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New()) + assert.NoError(t, err) + + _ = db.Collection("future_collection").Drop(ctx) + _, err = db.Collection("future_collection").InsertOne(ctx, bson.M{"data": "future_data"}) + assert.NoError(t, err) + + readOnlyClient := connectWithCredentials(t, container, username, password) + defer readOnlyClient.Disconnect(ctx) + + readOnlyDB := readOnlyClient.Database(container.Database) + + var result bson.M + err = readOnlyDB.Collection("future_collection").FindOne(ctx, bson.M{}).Decode(&result) + assert.NoError(t, err) + assert.Equal(t, "future_data", result["data"]) + + dropUserSafe(container.Client, username, container.AuthDatabase) +} + +func Test_ReadOnlyUser_CannotDropOrModifyCollections(t *testing.T) { + env := config.GetEnv() + container := connectToMongodbContainer(t, env.TestMongodb70Port, tools.MongodbVersion70) + defer container.Client.Disconnect(context.Background()) + + ctx := context.Background() + db := container.Client.Database(container.Database) + + _ = db.Collection("drop_test").Drop(ctx) + _, err := db.Collection("drop_test").InsertOne(ctx, bson.M{"data": "test1"}) + assert.NoError(t, err) + + mongodbModel := createMongodbModel(container) + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + + username, password, err := mongodbModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New()) + assert.NoError(t, err) + + readOnlyClient := connectWithCredentials(t, container, username, password) + defer readOnlyClient.Disconnect(ctx) + + readOnlyDB := readOnlyClient.Database(container.Database) + + err = readOnlyDB.Collection("drop_test").Drop(ctx) + assert.Error(t, err) + assertWriteDenied(t, err) + + _, err = readOnlyDB.Collection("drop_test").Indexes().CreateOne(ctx, mongo.IndexModel{ + Keys: bson.D{{Key: "data", Value: 1}}, + }) + assert.Error(t, err) + assertWriteDenied(t, err) + + dropUserSafe(container.Client, username, container.AuthDatabase) +} + +type MongodbContainer struct { + Host string + Port int + Username string + Password string + Database string + AuthDatabase string + Version tools.MongodbVersion + Client *mongo.Client +} + +func connectToMongodbContainer( + t *testing.T, + port string, + version tools.MongodbVersion, +) *MongodbContainer { + if port == "" { + t.Skipf("MongoDB port not configured for version %s", version) + } + + dbName := "testdb" + host := "127.0.0.1" + username := "root" + password := "rootpassword" + authDatabase := "admin" + + portInt, err := strconv.Atoi(port) + assert.NoError(t, err) + + uri := fmt.Sprintf( + "mongodb://%s:%s@%s:%d/%s?authSource=%s", + username, password, host, portInt, dbName, authDatabase, + ) + + ctx := context.Background() + clientOptions := options.Client().ApplyURI(uri) + client, err := mongo.Connect(ctx, clientOptions) + if err != nil { + t.Skipf("Failed to connect to MongoDB %s: %v", version, err) + } + + if err := client.Ping(ctx, nil); err != nil { + t.Skipf("Failed to ping MongoDB %s: %v", version, err) + } + + return &MongodbContainer{ + Host: host, + Port: portInt, + Username: username, + Password: password, + Database: dbName, + AuthDatabase: authDatabase, + Version: version, + Client: client, + } +} + +func createMongodbModel(container *MongodbContainer) *MongodbDatabase { + return &MongodbDatabase{ + Version: container.Version, + Host: container.Host, + Port: container.Port, + Username: container.Username, + Password: container.Password, + Database: container.Database, + AuthDatabase: container.AuthDatabase, + IsHttps: false, + } +} + +func connectWithCredentials( + t *testing.T, + container *MongodbContainer, + username, password string, +) *mongo.Client { + uri := fmt.Sprintf( + "mongodb://%s:%s@%s:%d/%s?authSource=%s", + username, password, container.Host, container.Port, + container.Database, container.AuthDatabase, + ) + + ctx := context.Background() + clientOptions := options.Client().ApplyURI(uri) + client, err := mongo.Connect(ctx, clientOptions) + assert.NoError(t, err) + + return client +} + +func dropUserSafe(client *mongo.Client, username, authDatabase string) { + ctx := context.Background() + adminDB := client.Database(authDatabase) + _ = adminDB.RunCommand(ctx, bson.D{{Key: "dropUser", Value: username}}) +} + +func assertWriteDenied(t *testing.T, err error) { + errStr := strings.ToLower(err.Error()) + assert.True(t, + strings.Contains(errStr, "not authorized") || + strings.Contains(errStr, "unauthorized") || + strings.Contains(errStr, "permission denied"), + "Expected authorization error, got: %v", err) +} diff --git a/backend/internal/features/databases/enums.go b/backend/internal/features/databases/enums.go index 291de1a..83f15e6 100644 --- a/backend/internal/features/databases/enums.go +++ b/backend/internal/features/databases/enums.go @@ -6,6 +6,7 @@ const ( DatabaseTypePostgres DatabaseType = "POSTGRES" DatabaseTypeMysql DatabaseType = "MYSQL" DatabaseTypeMariadb DatabaseType = "MARIADB" + DatabaseTypeMongodb DatabaseType = "MONGODB" ) type HealthStatus string diff --git a/backend/internal/features/databases/model.go b/backend/internal/features/databases/model.go index fc0a3c9..5262320 100644 --- a/backend/internal/features/databases/model.go +++ b/backend/internal/features/databases/model.go @@ -4,6 +4,7 @@ import ( "errors" "log/slog" "postgresus-backend/internal/features/databases/databases/mariadb" + "postgresus-backend/internal/features/databases/databases/mongodb" "postgresus-backend/internal/features/databases/databases/mysql" "postgresus-backend/internal/features/databases/databases/postgresql" "postgresus-backend/internal/features/notifiers" @@ -25,6 +26,7 @@ type Database struct { Postgresql *postgresql.PostgresqlDatabase `json:"postgresql,omitempty" gorm:"foreignKey:DatabaseID"` Mysql *mysql.MysqlDatabase `json:"mysql,omitempty" gorm:"foreignKey:DatabaseID"` Mariadb *mariadb.MariadbDatabase `json:"mariadb,omitempty" gorm:"foreignKey:DatabaseID"` + Mongodb *mongodb.MongodbDatabase `json:"mongodb,omitempty" gorm:"foreignKey:DatabaseID"` Notifiers []notifiers.Notifier `json:"notifiers" gorm:"many2many:database_notifiers;"` @@ -57,6 +59,11 @@ func (d *Database) Validate() error { return errors.New("mariadb database is required") } return d.Mariadb.Validate() + case DatabaseTypeMongodb: + if d.Mongodb == nil { + return errors.New("mongodb database is required") + } + return d.Mongodb.Validate() default: return errors.New("invalid database type: " + string(d.Type)) } @@ -91,6 +98,9 @@ func (d *Database) EncryptSensitiveFields(encryptor encryption.FieldEncryptor) e if d.Mariadb != nil { return d.Mariadb.EncryptSensitiveFields(d.ID, encryptor) } + if d.Mongodb != nil { + return d.Mongodb.EncryptSensitiveFields(d.ID, encryptor) + } return nil } @@ -107,6 +117,9 @@ func (d *Database) PopulateVersionIfEmpty( if d.Mariadb != nil { return d.Mariadb.PopulateVersionIfEmpty(logger, encryptor, d.ID) } + if d.Mongodb != nil { + return d.Mongodb.PopulateVersionIfEmpty(logger, encryptor, d.ID) + } return nil } @@ -128,6 +141,10 @@ func (d *Database) Update(incoming *Database) { if d.Mariadb != nil && incoming.Mariadb != nil { d.Mariadb.Update(incoming.Mariadb) } + case DatabaseTypeMongodb: + if d.Mongodb != nil && incoming.Mongodb != nil { + d.Mongodb.Update(incoming.Mongodb) + } } } @@ -139,6 +156,8 @@ func (d *Database) getSpecificDatabase() DatabaseConnector { return d.Mysql case DatabaseTypeMariadb: return d.Mariadb + case DatabaseTypeMongodb: + return d.Mongodb } panic("invalid database type: " + string(d.Type)) diff --git a/backend/internal/features/databases/repository.go b/backend/internal/features/databases/repository.go index 6268ac6..1ceadcf 100644 --- a/backend/internal/features/databases/repository.go +++ b/backend/internal/features/databases/repository.go @@ -3,6 +3,7 @@ package databases import ( "errors" "postgresus-backend/internal/features/databases/databases/mariadb" + "postgresus-backend/internal/features/databases/databases/mongodb" "postgresus-backend/internal/features/databases/databases/mysql" "postgresus-backend/internal/features/databases/databases/postgresql" "postgresus-backend/internal/storage" @@ -38,17 +39,22 @@ func (r *DatabaseRepository) Save(database *Database) (*Database, error) { return errors.New("mariadb configuration is required for MariaDB database") } database.Mariadb.DatabaseID = &database.ID + case DatabaseTypeMongodb: + if database.Mongodb == nil { + return errors.New("mongodb configuration is required for MongoDB database") + } + database.Mongodb.DatabaseID = &database.ID } if isNew { if err := tx.Create(database). - Omit("Postgresql", "Mysql", "Mariadb", "Notifiers"). + Omit("Postgresql", "Mysql", "Mariadb", "Mongodb", "Notifiers"). Error; err != nil { return err } } else { if err := tx.Save(database). - Omit("Postgresql", "Mysql", "Mariadb", "Notifiers"). + Omit("Postgresql", "Mysql", "Mariadb", "Mongodb", "Notifiers"). Error; err != nil { return err } @@ -91,6 +97,18 @@ func (r *DatabaseRepository) Save(database *Database) (*Database, error) { return err } } + case DatabaseTypeMongodb: + database.Mongodb.DatabaseID = &database.ID + if database.Mongodb.ID == uuid.Nil { + database.Mongodb.ID = uuid.New() + if err := tx.Create(database.Mongodb).Error; err != nil { + return err + } + } else { + if err := tx.Save(database.Mongodb).Error; err != nil { + return err + } + } } if err := tx. @@ -118,6 +136,7 @@ func (r *DatabaseRepository) FindByID(id uuid.UUID) (*Database, error) { Preload("Postgresql"). Preload("Mysql"). Preload("Mariadb"). + Preload("Mongodb"). Preload("Notifiers"). Where("id = ?", id). First(&database).Error; err != nil { @@ -135,6 +154,7 @@ func (r *DatabaseRepository) FindByWorkspaceID(workspaceID uuid.UUID) ([]*Databa Preload("Postgresql"). Preload("Mysql"). Preload("Mariadb"). + Preload("Mongodb"). Preload("Notifiers"). Where("workspace_id = ?", workspaceID). Order("CASE WHEN health_status = 'UNAVAILABLE' THEN 1 WHEN health_status = 'AVAILABLE' THEN 2 WHEN health_status IS NULL THEN 3 ELSE 4 END, name ASC"). @@ -177,6 +197,12 @@ func (r *DatabaseRepository) Delete(id uuid.UUID) error { Delete(&mariadb.MariadbDatabase{}).Error; err != nil { return err } + case DatabaseTypeMongodb: + if err := tx. + Where("database_id = ?", id). + Delete(&mongodb.MongodbDatabase{}).Error; err != nil { + return err + } } if err := tx.Delete(&Database{}, id).Error; err != nil { @@ -209,6 +235,7 @@ func (r *DatabaseRepository) GetAllDatabases() ([]*Database, error) { Preload("Postgresql"). Preload("Mysql"). Preload("Mariadb"). + Preload("Mongodb"). Preload("Notifiers"). Find(&databases).Error; err != nil { return nil, err diff --git a/backend/internal/features/databases/service.go b/backend/internal/features/databases/service.go index d65757d..ec3d562 100644 --- a/backend/internal/features/databases/service.go +++ b/backend/internal/features/databases/service.go @@ -9,6 +9,7 @@ import ( audit_logs "postgresus-backend/internal/features/audit_logs" "postgresus-backend/internal/features/databases/databases/mariadb" + "postgresus-backend/internal/features/databases/databases/mongodb" "postgresus-backend/internal/features/databases/databases/mysql" "postgresus-backend/internal/features/databases/databases/postgresql" "postgresus-backend/internal/features/notifiers" @@ -434,6 +435,21 @@ func (s *DatabaseService) CopyDatabase( IsHttps: existingDatabase.Mariadb.IsHttps, } } + case DatabaseTypeMongodb: + if existingDatabase.Mongodb != nil { + newDatabase.Mongodb = &mongodb.MongodbDatabase{ + ID: uuid.Nil, + DatabaseID: nil, + Version: existingDatabase.Mongodb.Version, + Host: existingDatabase.Mongodb.Host, + Port: existingDatabase.Mongodb.Port, + Username: existingDatabase.Mongodb.Username, + Password: existingDatabase.Mongodb.Password, + Database: existingDatabase.Mongodb.Database, + AuthDatabase: existingDatabase.Mongodb.AuthDatabase, + IsHttps: existingDatabase.Mongodb.IsHttps, + } + } } if err := newDatabase.Validate(); err != nil { @@ -573,6 +589,13 @@ func (s *DatabaseService) IsUserReadOnly( s.fieldEncryptor, usingDatabase.ID, ) + case DatabaseTypeMongodb: + return usingDatabase.Mongodb.IsUserReadOnly( + ctx, + s.logger, + s.fieldEncryptor, + usingDatabase.ID, + ) default: return false, errors.New("read-only check not supported for this database type") } @@ -646,6 +669,10 @@ func (s *DatabaseService) CreateReadOnlyUser( username, password, err = usingDatabase.Mariadb.CreateReadOnlyUser( ctx, s.logger, s.fieldEncryptor, usingDatabase.ID, ) + case DatabaseTypeMongodb: + username, password, err = usingDatabase.Mongodb.CreateReadOnlyUser( + ctx, s.logger, s.fieldEncryptor, usingDatabase.ID, + ) default: return "", "", errors.New("read-only user creation not supported for this database type") } diff --git a/backend/internal/features/healthcheck/attempt/check_database_health_uc.go b/backend/internal/features/healthcheck/attempt/check_database_health_uc.go index 953dfff..e9a132f 100644 --- a/backend/internal/features/healthcheck/attempt/check_database_health_uc.go +++ b/backend/internal/features/healthcheck/attempt/check_database_health_uc.go @@ -191,6 +191,10 @@ func (uc *CheckDatabaseHealthUseCase) validateDatabase( if database.Mariadb == nil { return fmt.Errorf("database MariaDB config is not set") } + case databases.DatabaseTypeMongodb: + if database.Mongodb == nil { + return fmt.Errorf("database MongoDB config is not set") + } default: return fmt.Errorf("unsupported database type: %s", database.Type) } diff --git a/backend/internal/features/restores/dto.go b/backend/internal/features/restores/dto.go index caf4383..0cc5a7a 100644 --- a/backend/internal/features/restores/dto.go +++ b/backend/internal/features/restores/dto.go @@ -2,6 +2,7 @@ package restores import ( "postgresus-backend/internal/features/databases/databases/mariadb" + "postgresus-backend/internal/features/databases/databases/mongodb" "postgresus-backend/internal/features/databases/databases/mysql" "postgresus-backend/internal/features/databases/databases/postgresql" ) @@ -10,4 +11,5 @@ type RestoreBackupRequest struct { PostgresqlDatabase *postgresql.PostgresqlDatabase `json:"postgresqlDatabase"` MysqlDatabase *mysql.MysqlDatabase `json:"mysqlDatabase"` MariadbDatabase *mariadb.MariadbDatabase `json:"mariadbDatabase"` + MongodbDatabase *mongodb.MongodbDatabase `json:"mongodbDatabase"` } diff --git a/backend/internal/features/restores/service.go b/backend/internal/features/restores/service.go index 85f03ae..3eb6653 100644 --- a/backend/internal/features/restores/service.go +++ b/backend/internal/features/restores/service.go @@ -171,6 +171,10 @@ func (s *RestoreService) RestoreBackup( if requestDTO.MariadbDatabase == nil { return errors.New("mariadb database is required") } + case databases.DatabaseTypeMongodb: + if requestDTO.MongodbDatabase == nil { + return errors.New("mongodb database is required") + } } restore := models.Restore{ @@ -215,6 +219,7 @@ func (s *RestoreService) RestoreBackup( Postgresql: requestDTO.PostgresqlDatabase, Mysql: requestDTO.MysqlDatabase, Mariadb: requestDTO.MariadbDatabase, + Mongodb: requestDTO.MongodbDatabase, } if err := restoringToDB.PopulateVersionIfEmpty(s.logger, s.fieldEncryptor); err != nil { @@ -293,6 +298,16 @@ func (s *RestoreService) validateVersionCompatibility( return err } } + if requestDTO.MongodbDatabase != nil { + err := requestDTO.MongodbDatabase.PopulateVersion( + s.logger, + s.fieldEncryptor, + backupDatabase.ID, + ) + if err != nil { + return err + } + } switch backupDatabase.Type { case databases.DatabaseTypePostgres: @@ -331,6 +346,18 @@ func (s *RestoreService) validateVersionCompatibility( `Should be restored to the same version as the backup database or higher. ` + `For example, you can restore MariaDB 10.11 backup to MariaDB 10.11, 11.4 or higher. But cannot restore to 10.6`) } + case databases.DatabaseTypeMongodb: + if requestDTO.MongodbDatabase == nil { + return errors.New("mongodb database configuration is required for restore") + } + if tools.IsMongodbBackupVersionHigherThanRestoreVersion( + backupDatabase.Mongodb.Version, + requestDTO.MongodbDatabase.Version, + ) { + return errors.New(`backup database version is higher than restore database version. ` + + `Should be restored to the same version as the backup database or higher. ` + + `For example, you can restore MongoDB 6.0 backup to MongoDB 6.0, 7.0 or higher. But cannot restore to 5.0`) + } } return nil } diff --git a/backend/internal/features/restores/usecases/di.go b/backend/internal/features/restores/usecases/di.go index 2851ab2..526f01b 100644 --- a/backend/internal/features/restores/usecases/di.go +++ b/backend/internal/features/restores/usecases/di.go @@ -2,6 +2,7 @@ package usecases import ( usecases_mariadb "postgresus-backend/internal/features/restores/usecases/mariadb" + usecases_mongodb "postgresus-backend/internal/features/restores/usecases/mongodb" usecases_mysql "postgresus-backend/internal/features/restores/usecases/mysql" usecases_postgresql "postgresus-backend/internal/features/restores/usecases/postgresql" ) @@ -10,6 +11,7 @@ var restoreBackupUsecase = &RestoreBackupUsecase{ usecases_postgresql.GetRestorePostgresqlBackupUsecase(), usecases_mysql.GetRestoreMysqlBackupUsecase(), usecases_mariadb.GetRestoreMariadbBackupUsecase(), + usecases_mongodb.GetRestoreMongodbBackupUsecase(), } func GetRestoreBackupUsecase() *RestoreBackupUsecase { diff --git a/backend/internal/features/restores/usecases/mongodb/di.go b/backend/internal/features/restores/usecases/mongodb/di.go new file mode 100644 index 0000000..b3adb16 --- /dev/null +++ b/backend/internal/features/restores/usecases/mongodb/di.go @@ -0,0 +1,15 @@ +package usecases_mongodb + +import ( + encryption_secrets "postgresus-backend/internal/features/encryption/secrets" + "postgresus-backend/internal/util/logger" +) + +var restoreMongodbBackupUsecase = &RestoreMongodbBackupUsecase{ + logger.GetLogger(), + encryption_secrets.GetSecretKeyService(), +} + +func GetRestoreMongodbBackupUsecase() *RestoreMongodbBackupUsecase { + return restoreMongodbBackupUsecase +} diff --git a/backend/internal/features/restores/usecases/mongodb/restore_backup_uc.go b/backend/internal/features/restores/usecases/mongodb/restore_backup_uc.go new file mode 100644 index 0000000..ac0f245 --- /dev/null +++ b/backend/internal/features/restores/usecases/mongodb/restore_backup_uc.go @@ -0,0 +1,419 @@ +package usecases_mongodb + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "log/slog" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/google/uuid" + + "postgresus-backend/internal/config" + "postgresus-backend/internal/features/backups/backups" + "postgresus-backend/internal/features/backups/backups/encryption" + backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/databases" + mongodbtypes "postgresus-backend/internal/features/databases/databases/mongodb" + encryption_secrets "postgresus-backend/internal/features/encryption/secrets" + "postgresus-backend/internal/features/restores/models" + "postgresus-backend/internal/features/storages" + util_encryption "postgresus-backend/internal/util/encryption" + files_utils "postgresus-backend/internal/util/files" + "postgresus-backend/internal/util/tools" +) + +const ( + restoreTimeout = 23 * time.Hour +) + +type RestoreMongodbBackupUsecase struct { + logger *slog.Logger + secretKeyService *encryption_secrets.SecretKeyService +} + +func (uc *RestoreMongodbBackupUsecase) Execute( + originalDB *databases.Database, + restoringToDB *databases.Database, + backupConfig *backups_config.BackupConfig, + restore models.Restore, + backup *backups.Backup, + storage *storages.Storage, +) error { + if originalDB.Type != databases.DatabaseTypeMongodb { + return errors.New("database type not supported") + } + + uc.logger.Info( + "Restoring MongoDB backup via mongorestore", + "restoreId", restore.ID, + "backupId", backup.ID, + ) + + mdb := restoringToDB.Mongodb + if mdb == nil { + return fmt.Errorf("mongodb configuration is required for restore") + } + + if mdb.Database == "" { + return fmt.Errorf("target database name is required for mongorestore") + } + + fieldEncryptor := util_encryption.GetFieldEncryptor() + decryptedPassword, err := fieldEncryptor.Decrypt(restoringToDB.ID, mdb.Password) + if err != nil { + return fmt.Errorf("failed to decrypt password: %w", err) + } + + sourceDatabase := "" + if originalDB.Mongodb != nil { + sourceDatabase = originalDB.Mongodb.Database + } + + args := uc.buildMongorestoreArgs(mdb, decryptedPassword, sourceDatabase) + + return uc.restoreFromStorage( + tools.GetMongodbExecutable( + tools.MongodbExecutableMongorestore, + config.GetEnv().EnvMode, + config.GetEnv().MongodbInstallDir, + ), + args, + backup, + storage, + ) +} + +func (uc *RestoreMongodbBackupUsecase) buildMongorestoreArgs( + mdb *mongodbtypes.MongodbDatabase, + password string, + sourceDatabase string, +) []string { + uri := mdb.BuildMongodumpURI(password) + + args := []string{ + "--uri=" + uri, + "--archive", + "--gzip", + "--drop", + } + + if sourceDatabase != "" && sourceDatabase != mdb.Database { + args = append(args, "--nsFrom="+sourceDatabase+".*") + args = append(args, "--nsTo="+mdb.Database+".*") + } else if mdb.Database != "" { + args = append(args, "--nsInclude="+mdb.Database+".*") + } + + return args +} + +func (uc *RestoreMongodbBackupUsecase) restoreFromStorage( + mongorestoreBin string, + args []string, + backup *backups.Backup, + storage *storages.Storage, +) error { + ctx, cancel := context.WithTimeout(context.Background(), restoreTimeout) + defer cancel() + + go func() { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if config.IsShouldShutdown() { + cancel() + return + } + } + } + }() + + tempBackupFile, cleanupFunc, err := uc.downloadBackupToTempFile(ctx, backup, storage) + if err != nil { + return fmt.Errorf("failed to download backup: %w", err) + } + defer cleanupFunc() + + return uc.executeMongoRestore(ctx, mongorestoreBin, args, tempBackupFile, backup) +} + +func (uc *RestoreMongodbBackupUsecase) executeMongoRestore( + ctx context.Context, + mongorestoreBin string, + args []string, + backupFile string, + backup *backups.Backup, +) error { + cmd := exec.CommandContext(ctx, mongorestoreBin, args...) + + safeArgs := make([]string, len(args)) + for i, arg := range args { + if len(arg) > 6 && arg[:6] == "--uri=" { + safeArgs[i] = "--uri=mongodb://***:***@***" + } else { + safeArgs[i] = arg + } + } + uc.logger.Info( + "Executing MongoDB restore command", + "command", + mongorestoreBin, + "args", + safeArgs, + ) + + backupFileHandle, err := os.Open(backupFile) + if err != nil { + return fmt.Errorf("failed to open backup file: %w", err) + } + defer func() { _ = backupFileHandle.Close() }() + + var inputReader io.Reader = backupFileHandle + + if backup.Encryption == backups_config.BackupEncryptionEncrypted { + decryptReader, err := uc.setupDecryption(backupFileHandle, backup) + if err != nil { + return fmt.Errorf("failed to setup decryption: %w", err) + } + inputReader = decryptReader + } + + cmd.Stdin = inputReader + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "LC_ALL=C.UTF-8", "LANG=C.UTF-8") + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return fmt.Errorf("stderr pipe: %w", err) + } + + stderrCh := make(chan []byte, 1) + go func() { + output, _ := io.ReadAll(stderrPipe) + stderrCh <- output + }() + + if err = cmd.Start(); err != nil { + return fmt.Errorf("start mongorestore: %w", err) + } + + waitErr := cmd.Wait() + stderrOutput := <-stderrCh + + if config.IsShouldShutdown() { + return fmt.Errorf("restore cancelled due to shutdown") + } + + if waitErr != nil { + return uc.handleMongoRestoreError(waitErr, stderrOutput, mongorestoreBin) + } + + return nil +} + +func (uc *RestoreMongodbBackupUsecase) downloadBackupToTempFile( + ctx context.Context, + backup *backups.Backup, + storage *storages.Storage, +) (string, func(), error) { + err := files_utils.EnsureDirectories([]string{ + config.GetEnv().TempFolder, + }) + if err != nil { + return "", nil, fmt.Errorf("failed to ensure directories: %w", err) + } + + tempDir, err := os.MkdirTemp(config.GetEnv().TempFolder, "restore_"+uuid.New().String()) + if err != nil { + return "", nil, fmt.Errorf("failed to create temporary directory: %w", err) + } + + cleanupFunc := func() { + _ = os.RemoveAll(tempDir) + } + + tempBackupFile := filepath.Join(tempDir, "backup.archive.gz") + + uc.logger.Info( + "Downloading backup file from storage to temporary file", + "backupId", backup.ID, + "tempFile", tempBackupFile, + "encrypted", backup.Encryption == backups_config.BackupEncryptionEncrypted, + ) + + fieldEncryptor := util_encryption.GetFieldEncryptor() + rawReader, err := storage.GetFile(fieldEncryptor, backup.ID) + if err != nil { + cleanupFunc() + return "", nil, fmt.Errorf("failed to get backup file from storage: %w", err) + } + defer func() { + if err := rawReader.Close(); err != nil { + uc.logger.Error("Failed to close backup reader", "error", err) + } + }() + + tempFile, err := os.Create(tempBackupFile) + if err != nil { + cleanupFunc() + return "", nil, fmt.Errorf("failed to create temporary backup file: %w", err) + } + defer func() { + if err := tempFile.Close(); err != nil { + uc.logger.Error("Failed to close temporary file", "error", err) + } + }() + + _, err = uc.copyWithShutdownCheck(ctx, tempFile, rawReader) + if err != nil { + cleanupFunc() + return "", nil, fmt.Errorf("failed to write backup to temporary file: %w", err) + } + + uc.logger.Info("Backup file written to temporary location", "tempFile", tempBackupFile) + return tempBackupFile, cleanupFunc, nil +} + +func (uc *RestoreMongodbBackupUsecase) setupDecryption( + reader io.Reader, + backup *backups.Backup, +) (io.Reader, error) { + if backup.EncryptionSalt == nil || backup.EncryptionIV == nil { + return nil, errors.New("encrypted backup missing salt or IV") + } + + salt, err := base64.StdEncoding.DecodeString(*backup.EncryptionSalt) + if err != nil { + return nil, fmt.Errorf("failed to decode encryption salt: %w", err) + } + + nonce, err := base64.StdEncoding.DecodeString(*backup.EncryptionIV) + if err != nil { + return nil, fmt.Errorf("failed to decode encryption IV: %w", err) + } + + masterKey, err := uc.secretKeyService.GetSecretKey() + if err != nil { + return nil, fmt.Errorf("failed to get secret key: %w", err) + } + + decryptReader, err := encryption.NewDecryptionReader( + reader, + masterKey, + backup.ID, + salt, + nonce, + ) + if err != nil { + return nil, fmt.Errorf("failed to create decryption reader: %w", err) + } + + return decryptReader, nil +} + +func (uc *RestoreMongodbBackupUsecase) copyWithShutdownCheck( + ctx context.Context, + dst io.Writer, + src io.Reader, +) (int64, error) { + buf := make([]byte, 16*1024*1024) + var totalBytesWritten int64 + + for { + select { + case <-ctx.Done(): + return totalBytesWritten, fmt.Errorf("copy cancelled: %w", ctx.Err()) + default: + } + + if config.IsShouldShutdown() { + return totalBytesWritten, fmt.Errorf("copy cancelled due to shutdown") + } + + bytesRead, readErr := src.Read(buf) + if bytesRead > 0 { + bytesWritten, writeErr := dst.Write(buf[0:bytesRead]) + if bytesWritten < 0 || bytesRead < bytesWritten { + bytesWritten = 0 + if writeErr == nil { + writeErr = fmt.Errorf("invalid write result") + } + } + + if writeErr != nil { + return totalBytesWritten, writeErr + } + + if bytesRead != bytesWritten { + return totalBytesWritten, io.ErrShortWrite + } + + totalBytesWritten += int64(bytesWritten) + } + + if readErr != nil { + if readErr != io.EOF { + return totalBytesWritten, readErr + } + break + } + } + + return totalBytesWritten, nil +} + +func (uc *RestoreMongodbBackupUsecase) handleMongoRestoreError( + waitErr error, + stderrOutput []byte, + mongorestoreBin string, +) error { + stderrStr := string(stderrOutput) + + if containsIgnoreCase(stderrStr, "authentication failed") { + return fmt.Errorf( + "MongoDB authentication failed. Check username and password. stderr: %s", + stderrStr, + ) + } + + if containsIgnoreCase(stderrStr, "connection refused") || + containsIgnoreCase(stderrStr, "server selection error") { + return fmt.Errorf( + "MongoDB connection refused. Check if the server is running and accessible. stderr: %s", + stderrStr, + ) + } + + if containsIgnoreCase(stderrStr, "timeout") { + return fmt.Errorf( + "MongoDB connection timeout. stderr: %s", + stderrStr, + ) + } + + if len(stderrStr) > 0 { + return fmt.Errorf( + "%s failed: %w\nstderr: %s", + filepath.Base(mongorestoreBin), + waitErr, + stderrStr, + ) + } + + return fmt.Errorf("%s failed: %w", filepath.Base(mongorestoreBin), waitErr) +} + +func containsIgnoreCase(str, substr string) bool { + return strings.Contains(strings.ToLower(str), strings.ToLower(substr)) +} diff --git a/backend/internal/features/restores/usecases/restore_backup_uc.go b/backend/internal/features/restores/usecases/restore_backup_uc.go index eb334bb..f40504a 100644 --- a/backend/internal/features/restores/usecases/restore_backup_uc.go +++ b/backend/internal/features/restores/usecases/restore_backup_uc.go @@ -8,6 +8,7 @@ import ( "postgresus-backend/internal/features/databases" "postgresus-backend/internal/features/restores/models" usecases_mariadb "postgresus-backend/internal/features/restores/usecases/mariadb" + usecases_mongodb "postgresus-backend/internal/features/restores/usecases/mongodb" usecases_mysql "postgresus-backend/internal/features/restores/usecases/mysql" usecases_postgresql "postgresus-backend/internal/features/restores/usecases/postgresql" "postgresus-backend/internal/features/storages" @@ -17,6 +18,7 @@ type RestoreBackupUsecase struct { restorePostgresqlBackupUsecase *usecases_postgresql.RestorePostgresqlBackupUsecase restoreMysqlBackupUsecase *usecases_mysql.RestoreMysqlBackupUsecase restoreMariadbBackupUsecase *usecases_mariadb.RestoreMariadbBackupUsecase + restoreMongodbBackupUsecase *usecases_mongodb.RestoreMongodbBackupUsecase } func (uc *RestoreBackupUsecase) Execute( @@ -57,6 +59,15 @@ func (uc *RestoreBackupUsecase) Execute( backup, storage, ) + case databases.DatabaseTypeMongodb: + return uc.restoreMongodbBackupUsecase.Execute( + originalDB, + restoringToDB, + backupConfig, + restore, + backup, + storage, + ) default: return errors.New("database type not supported") } diff --git a/backend/internal/features/tests/mongodb_backup_restore_test.go b/backend/internal/features/tests/mongodb_backup_restore_test.go new file mode 100644 index 0000000..320f7bd --- /dev/null +++ b/backend/internal/features/tests/mongodb_backup_restore_test.go @@ -0,0 +1,686 @@ +package tests + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + "postgresus-backend/internal/config" + "postgresus-backend/internal/features/backups/backups" + backups_config "postgresus-backend/internal/features/backups/config" + "postgresus-backend/internal/features/databases" + mongodbtypes "postgresus-backend/internal/features/databases/databases/mongodb" + "postgresus-backend/internal/features/restores" + restores_enums "postgresus-backend/internal/features/restores/enums" + restores_models "postgresus-backend/internal/features/restores/models" + "postgresus-backend/internal/features/storages" + users_enums "postgresus-backend/internal/features/users/enums" + users_testing "postgresus-backend/internal/features/users/testing" + workspaces_testing "postgresus-backend/internal/features/workspaces/testing" + test_utils "postgresus-backend/internal/util/testing" + "postgresus-backend/internal/util/tools" +) + +type MongodbContainer struct { + Host string + Port int + Username string + Password string + Database string + AuthDatabase string + Version tools.MongodbVersion + Client *mongo.Client +} + +type MongodbTestDataItem struct { + ID string `bson:"_id"` + Name string `bson:"name"` + Value int `bson:"value"` + CreatedAt time.Time `bson:"created_at"` +} + +func Test_BackupAndRestoreMongodb_RestoreIsSuccessful(t *testing.T) { + env := config.GetEnv() + cases := []struct { + name string + version tools.MongodbVersion + port string + }{ + {"MongoDB 4.0", tools.MongodbVersion40, env.TestMongodb40Port}, + {"MongoDB 4.2", tools.MongodbVersion42, env.TestMongodb42Port}, + {"MongoDB 4.4", tools.MongodbVersion44, env.TestMongodb44Port}, + {"MongoDB 5.0", tools.MongodbVersion50, env.TestMongodb50Port}, + {"MongoDB 6.0", tools.MongodbVersion60, env.TestMongodb60Port}, + {"MongoDB 7.0", tools.MongodbVersion70, env.TestMongodb70Port}, + {"MongoDB 8.0", tools.MongodbVersion80, env.TestMongodb80Port}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + testMongodbBackupRestoreForVersion(t, tc.version, tc.port) + }) + } +} + +func Test_BackupAndRestoreMongodbWithEncryption_RestoreIsSuccessful(t *testing.T) { + env := config.GetEnv() + cases := []struct { + name string + version tools.MongodbVersion + port string + }{ + {"MongoDB 4.0", tools.MongodbVersion40, env.TestMongodb40Port}, + {"MongoDB 4.2", tools.MongodbVersion42, env.TestMongodb42Port}, + {"MongoDB 4.4", tools.MongodbVersion44, env.TestMongodb44Port}, + {"MongoDB 5.0", tools.MongodbVersion50, env.TestMongodb50Port}, + {"MongoDB 6.0", tools.MongodbVersion60, env.TestMongodb60Port}, + {"MongoDB 7.0", tools.MongodbVersion70, env.TestMongodb70Port}, + {"MongoDB 8.0", tools.MongodbVersion80, env.TestMongodb80Port}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + testMongodbBackupRestoreWithEncryptionForVersion(t, tc.version, tc.port) + }) + } +} + +func Test_BackupAndRestoreMongodb_WithReadOnlyUser_RestoreIsSuccessful(t *testing.T) { + env := config.GetEnv() + cases := []struct { + name string + version tools.MongodbVersion + port string + }{ + {"MongoDB 4.0", tools.MongodbVersion40, env.TestMongodb40Port}, + {"MongoDB 4.2", tools.MongodbVersion42, env.TestMongodb42Port}, + {"MongoDB 4.4", tools.MongodbVersion44, env.TestMongodb44Port}, + {"MongoDB 5.0", tools.MongodbVersion50, env.TestMongodb50Port}, + {"MongoDB 6.0", tools.MongodbVersion60, env.TestMongodb60Port}, + {"MongoDB 7.0", tools.MongodbVersion70, env.TestMongodb70Port}, + {"MongoDB 8.0", tools.MongodbVersion80, env.TestMongodb80Port}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + testMongodbBackupRestoreWithReadOnlyUserForVersion(t, tc.version, tc.port) + }) + } +} + +func testMongodbBackupRestoreForVersion( + t *testing.T, + mongodbVersion tools.MongodbVersion, + port string, +) { + container, err := connectToMongodbContainer(mongodbVersion, port) + if err != nil { + t.Skipf("Skipping MongoDB %s test: %v", mongodbVersion, err) + return + } + defer container.Client.Disconnect(context.Background()) + + setupMongodbTestData(t, container) + + router := createTestRouter() + user := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace("MongoDB Test Workspace", user, router) + + storage := storages.CreateTestStorage(workspace.ID) + + database := createMongodbDatabaseViaAPI( + t, router, "MongoDB Test Database", workspace.ID, + container.Host, container.Port, + container.Username, container.Password, + container.Database, container.AuthDatabase, + container.Version, + user.Token, + ) + + enableBackupsViaAPI( + t, router, database.ID, storage.ID, + backups_config.BackupEncryptionNone, user.Token, + ) + + createBackupViaAPI(t, router, database.ID, user.Token) + + backup := waitForBackupCompletion(t, router, database.ID, user.Token, 5*time.Minute) + assert.Equal(t, backups.BackupStatusCompleted, backup.Status) + + newDBName := "restoreddb_mongodb_" + uuid.New().String()[:8] + + createMongodbRestoreViaAPI( + t, router, backup.ID, + container.Host, container.Port, + container.Username, container.Password, + newDBName, container.AuthDatabase, + container.Version, + user.Token, + ) + + restore := waitForMongodbRestoreCompletion(t, router, backup.ID, user.Token, 5*time.Minute) + assert.Equal(t, restores_enums.RestoreStatusCompleted, restore.Status) + + verifyMongodbDataIntegrity(t, container, newDBName) + + ctx := context.Background() + _ = container.Client.Database(newDBName).Drop(ctx) + + err = os.Remove(filepath.Join(config.GetEnv().DataFolder, backup.ID.String())) + if err != nil { + t.Logf("Warning: Failed to delete backup file: %v", err) + } + + test_utils.MakeDeleteRequest( + t, + router, + "/api/v1/databases/"+database.ID.String(), + "Bearer "+user.Token, + http.StatusNoContent, + ) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) +} + +func testMongodbBackupRestoreWithEncryptionForVersion( + t *testing.T, + mongodbVersion tools.MongodbVersion, + port string, +) { + container, err := connectToMongodbContainer(mongodbVersion, port) + if err != nil { + t.Skipf("Skipping MongoDB %s test: %v", mongodbVersion, err) + return + } + defer container.Client.Disconnect(context.Background()) + + setupMongodbTestData(t, container) + + router := createTestRouter() + user := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace( + "MongoDB Encrypted Test Workspace", + user, + router, + ) + + storage := storages.CreateTestStorage(workspace.ID) + + database := createMongodbDatabaseViaAPI( + t, router, "MongoDB Encrypted Test Database", workspace.ID, + container.Host, container.Port, + container.Username, container.Password, + container.Database, container.AuthDatabase, + container.Version, + user.Token, + ) + + enableBackupsViaAPI( + t, router, database.ID, storage.ID, + backups_config.BackupEncryptionEncrypted, user.Token, + ) + + createBackupViaAPI(t, router, database.ID, user.Token) + + backup := waitForBackupCompletion(t, router, database.ID, user.Token, 5*time.Minute) + assert.Equal(t, backups.BackupStatusCompleted, backup.Status) + assert.Equal(t, backups_config.BackupEncryptionEncrypted, backup.Encryption) + + newDBName := "restoreddb_mongodb_enc_" + uuid.New().String()[:8] + + createMongodbRestoreViaAPI( + t, router, backup.ID, + container.Host, container.Port, + container.Username, container.Password, + newDBName, container.AuthDatabase, + container.Version, + user.Token, + ) + + restore := waitForMongodbRestoreCompletion(t, router, backup.ID, user.Token, 5*time.Minute) + assert.Equal(t, restores_enums.RestoreStatusCompleted, restore.Status) + + verifyMongodbDataIntegrity(t, container, newDBName) + + ctx := context.Background() + _ = container.Client.Database(newDBName).Drop(ctx) + + err = os.Remove(filepath.Join(config.GetEnv().DataFolder, backup.ID.String())) + if err != nil { + t.Logf("Warning: Failed to delete backup file: %v", err) + } + + test_utils.MakeDeleteRequest( + t, + router, + "/api/v1/databases/"+database.ID.String(), + "Bearer "+user.Token, + http.StatusNoContent, + ) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) +} + +func testMongodbBackupRestoreWithReadOnlyUserForVersion( + t *testing.T, + mongodbVersion tools.MongodbVersion, + port string, +) { + container, err := connectToMongodbContainer(mongodbVersion, port) + if err != nil { + t.Skipf("Skipping MongoDB %s test: %v", mongodbVersion, err) + return + } + defer container.Client.Disconnect(context.Background()) + + setupMongodbTestData(t, container) + + router := createTestRouter() + user := users_testing.CreateTestUser(users_enums.UserRoleMember) + workspace := workspaces_testing.CreateTestWorkspace( + "MongoDB ReadOnly Test Workspace", + user, + router, + ) + + storage := storages.CreateTestStorage(workspace.ID) + + database := createMongodbDatabaseViaAPI( + t, router, "MongoDB ReadOnly Test Database", workspace.ID, + container.Host, container.Port, + container.Username, container.Password, + container.Database, container.AuthDatabase, + container.Version, + user.Token, + ) + + readOnlyUser := createMongodbReadOnlyUserViaAPI(t, router, database.ID, user.Token) + assert.NotEmpty(t, readOnlyUser.Username) + assert.NotEmpty(t, readOnlyUser.Password) + + updatedDatabase := updateMongodbDatabaseCredentialsViaAPI( + t, router, database, + readOnlyUser.Username, readOnlyUser.Password, + user.Token, + ) + + enableBackupsViaAPI( + t, router, updatedDatabase.ID, storage.ID, + backups_config.BackupEncryptionNone, user.Token, + ) + + createBackupViaAPI(t, router, updatedDatabase.ID, user.Token) + + backup := waitForBackupCompletion(t, router, updatedDatabase.ID, user.Token, 5*time.Minute) + assert.Equal(t, backups.BackupStatusCompleted, backup.Status) + + newDBName := "restoreddb_mongodb_ro_" + uuid.New().String()[:8] + + createMongodbRestoreViaAPI( + t, router, backup.ID, + container.Host, container.Port, + container.Username, container.Password, + newDBName, container.AuthDatabase, + container.Version, + user.Token, + ) + + restore := waitForMongodbRestoreCompletion(t, router, backup.ID, user.Token, 5*time.Minute) + assert.Equal(t, restores_enums.RestoreStatusCompleted, restore.Status) + + verifyMongodbDataIntegrity(t, container, newDBName) + + ctx := context.Background() + _ = container.Client.Database(newDBName).Drop(ctx) + + dropMongodbUserSafe(container.Client, readOnlyUser.Username, container.AuthDatabase) + + err = os.Remove(filepath.Join(config.GetEnv().DataFolder, backup.ID.String())) + if err != nil { + t.Logf("Warning: Failed to delete backup file: %v", err) + } + + test_utils.MakeDeleteRequest( + t, + router, + "/api/v1/databases/"+updatedDatabase.ID.String(), + "Bearer "+user.Token, + http.StatusNoContent, + ) + storages.RemoveTestStorage(storage.ID) + workspaces_testing.RemoveTestWorkspace(workspace, router) +} + +func createMongodbDatabaseViaAPI( + t *testing.T, + router *gin.Engine, + name string, + workspaceID uuid.UUID, + host string, + port int, + username string, + password string, + database string, + authDatabase string, + version tools.MongodbVersion, + token string, +) *databases.Database { + request := databases.Database{ + Name: name, + WorkspaceID: &workspaceID, + Type: databases.DatabaseTypeMongodb, + Mongodb: &mongodbtypes.MongodbDatabase{ + Host: host, + Port: port, + Username: username, + Password: password, + Database: database, + AuthDatabase: authDatabase, + Version: version, + IsHttps: false, + }, + } + + w := workspaces_testing.MakeAPIRequest( + router, + "POST", + "/api/v1/databases/create", + "Bearer "+token, + request, + ) + + if w.Code != http.StatusCreated { + t.Fatalf("Failed to create MongoDB database. Status: %d, Body: %s", w.Code, w.Body.String()) + } + + var createdDatabase databases.Database + if err := json.Unmarshal(w.Body.Bytes(), &createdDatabase); err != nil { + t.Fatalf("Failed to unmarshal database response: %v", err) + } + + return &createdDatabase +} + +func createMongodbRestoreViaAPI( + t *testing.T, + router *gin.Engine, + backupID uuid.UUID, + host string, + port int, + username string, + password string, + database string, + authDatabase string, + version tools.MongodbVersion, + token string, +) { + request := restores.RestoreBackupRequest{ + MongodbDatabase: &mongodbtypes.MongodbDatabase{ + Host: host, + Port: port, + Username: username, + Password: password, + Database: database, + AuthDatabase: authDatabase, + Version: version, + IsHttps: false, + }, + } + + test_utils.MakePostRequest( + t, + router, + fmt.Sprintf("/api/v1/restores/%s/restore", backupID.String()), + "Bearer "+token, + request, + http.StatusOK, + ) +} + +func waitForMongodbRestoreCompletion( + t *testing.T, + router *gin.Engine, + backupID uuid.UUID, + token string, + timeout time.Duration, +) *restores_models.Restore { + startTime := time.Now() + pollInterval := 500 * time.Millisecond + + for { + if time.Since(startTime) > timeout { + t.Fatalf("Timeout waiting for MongoDB restore completion after %v", timeout) + } + + var restoresList []*restores_models.Restore + test_utils.MakeGetRequestAndUnmarshal( + t, + router, + fmt.Sprintf("/api/v1/restores/%s", backupID.String()), + "Bearer "+token, + http.StatusOK, + &restoresList, + ) + + for _, restore := range restoresList { + if restore.Status == restores_enums.RestoreStatusCompleted { + return restore + } + if restore.Status == restores_enums.RestoreStatusFailed { + failMsg := "unknown error" + if restore.FailMessage != nil { + failMsg = *restore.FailMessage + } + t.Fatalf("MongoDB restore failed: %s", failMsg) + } + } + + time.Sleep(pollInterval) + } +} + +func verifyMongodbDataIntegrity(t *testing.T, container *MongodbContainer, restoredDBName string) { + ctx := context.Background() + + originalCollection := container.Client.Database(container.Database).Collection("test_data") + restoredCollection := container.Client.Database(restoredDBName).Collection("test_data") + + originalCount, err := originalCollection.CountDocuments(ctx, bson.M{}) + assert.NoError(t, err) + + restoredCount, err := restoredCollection.CountDocuments(ctx, bson.M{}) + assert.NoError(t, err) + + assert.Equal(t, originalCount, restoredCount, "Should have same number of documents") + + var originalDocs []MongodbTestDataItem + cursor, err := originalCollection.Find( + ctx, + bson.M{}, + options.Find().SetSort(bson.D{{Key: "_id", Value: 1}}), + ) + assert.NoError(t, err) + err = cursor.All(ctx, &originalDocs) + assert.NoError(t, err) + + var restoredDocs []MongodbTestDataItem + cursor, err = restoredCollection.Find( + ctx, + bson.M{}, + options.Find().SetSort(bson.D{{Key: "_id", Value: 1}}), + ) + assert.NoError(t, err) + err = cursor.All(ctx, &restoredDocs) + assert.NoError(t, err) + + assert.Equal(t, len(originalDocs), len(restoredDocs), "Should have same number of documents") + + for i := range originalDocs { + assert.Equal(t, originalDocs[i].ID, restoredDocs[i].ID, "ID should match") + assert.Equal(t, originalDocs[i].Name, restoredDocs[i].Name, "Name should match") + assert.Equal(t, originalDocs[i].Value, restoredDocs[i].Value, "Value should match") + } +} + +func connectToMongodbContainer( + version tools.MongodbVersion, + port string, +) (*MongodbContainer, error) { + if port == "" { + return nil, fmt.Errorf("MongoDB %s port not configured", version) + } + + dbName := "testdb" + password := "rootpassword" + username := "root" + authDatabase := "admin" + host := "127.0.0.1" + + portInt, err := strconv.Atoi(port) + if err != nil { + return nil, fmt.Errorf("failed to parse port: %w", err) + } + + uri := fmt.Sprintf( + "mongodb://%s:%s@%s:%d/%s?authSource=%s", + username, password, host, portInt, dbName, authDatabase, + ) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + clientOptions := options.Client().ApplyURI(uri) + client, err := mongo.Connect(ctx, clientOptions) + if err != nil { + return nil, fmt.Errorf("failed to connect to MongoDB: %w", err) + } + + if err = client.Ping(ctx, nil); err != nil { + return nil, fmt.Errorf("failed to ping MongoDB: %w", err) + } + + return &MongodbContainer{ + Host: host, + Port: portInt, + Username: username, + Password: password, + Database: dbName, + AuthDatabase: authDatabase, + Version: version, + Client: client, + }, nil +} + +func setupMongodbTestData(t *testing.T, container *MongodbContainer) { + ctx := context.Background() + collection := container.Client.Database(container.Database).Collection("test_data") + + _ = collection.Drop(ctx) + + testDocs := []interface{}{ + MongodbTestDataItem{ + ID: "1", + Name: "test1", + Value: 100, + CreatedAt: time.Now().UTC(), + }, + MongodbTestDataItem{ + ID: "2", + Name: "test2", + Value: 200, + CreatedAt: time.Now().UTC(), + }, + MongodbTestDataItem{ + ID: "3", + Name: "test3", + Value: 300, + CreatedAt: time.Now().UTC(), + }, + } + + _, err := collection.InsertMany(ctx, testDocs) + assert.NoError(t, err) +} + +func createMongodbReadOnlyUserViaAPI( + t *testing.T, + router *gin.Engine, + databaseID uuid.UUID, + token string, +) *databases.CreateReadOnlyUserResponse { + var database databases.Database + test_utils.MakeGetRequestAndUnmarshal( + t, + router, + fmt.Sprintf("/api/v1/databases/%s", databaseID.String()), + "Bearer "+token, + http.StatusOK, + &database, + ) + + var response databases.CreateReadOnlyUserResponse + test_utils.MakePostRequestAndUnmarshal( + t, + router, + "/api/v1/databases/create-readonly-user", + "Bearer "+token, + database, + http.StatusOK, + &response, + ) + + return &response +} + +func updateMongodbDatabaseCredentialsViaAPI( + t *testing.T, + router *gin.Engine, + database *databases.Database, + username string, + password string, + token string, +) *databases.Database { + database.Mongodb.Username = username + database.Mongodb.Password = password + + w := workspaces_testing.MakeAPIRequest( + router, + "POST", + "/api/v1/databases/update", + "Bearer "+token, + database, + ) + + if w.Code != http.StatusOK { + t.Fatalf("Failed to update MongoDB database. Status: %d, Body: %s", w.Code, w.Body.String()) + } + + var updatedDatabase databases.Database + if err := json.Unmarshal(w.Body.Bytes(), &updatedDatabase); err != nil { + t.Fatalf("Failed to unmarshal database response: %v", err) + } + + return &updatedDatabase +} + +func dropMongodbUserSafe(client *mongo.Client, username, authDatabase string) { + ctx := context.Background() + adminDB := client.Database(authDatabase) + _ = adminDB.RunCommand(ctx, bson.D{{Key: "dropUser", Value: username}}) +} diff --git a/backend/internal/features/tests/postgresql_backup_restore_test.go b/backend/internal/features/tests/postgresql_backup_restore_test.go index 35feb84..214115e 100644 --- a/backend/internal/features/tests/postgresql_backup_restore_test.go +++ b/backend/internal/features/tests/postgresql_backup_restore_test.go @@ -447,10 +447,13 @@ func testBackupRestoreForVersion(t *testing.T, pgVersion string, port string) { func testSchemaSelectionAllSchemasForVersion(t *testing.T, pgVersion string, port string) { container, err := connectToPostgresContainer(pgVersion, port) - assert.NoError(t, err) + if err != nil { + t.Fatalf("Failed to connect to PostgreSQL container: %v", err) + } defer container.DB.Close() _, err = container.DB.Exec(` + DROP TABLE IF EXISTS public.public_table; DROP SCHEMA IF EXISTS schema_a CASCADE; DROP SCHEMA IF EXISTS schema_b CASCADE; CREATE SCHEMA schema_a; @@ -569,7 +572,9 @@ func testSchemaSelectionAllSchemasForVersion(t *testing.T, pgVersion string, por func testBackupRestoreWithExcludeExtensionsForVersion(t *testing.T, pgVersion string, port string) { container, err := connectToPostgresContainer(pgVersion, port) - assert.NoError(t, err) + if err != nil { + t.Fatalf("Failed to connect to PostgreSQL container: %v", err) + } defer container.DB.Close() // Create table with uuid-ossp extension and add a comment on the extension @@ -696,7 +701,9 @@ func testBackupRestoreWithoutExcludeExtensionsForVersion( port string, ) { container, err := connectToPostgresContainer(pgVersion, port) - assert.NoError(t, err) + if err != nil { + t.Fatalf("Failed to connect to PostgreSQL container: %v", err) + } defer container.DB.Close() // Create table with uuid-ossp extension @@ -935,10 +942,13 @@ func testSchemaSelectionOnlySpecifiedSchemasForVersion( port string, ) { container, err := connectToPostgresContainer(pgVersion, port) - assert.NoError(t, err) + if err != nil { + t.Fatalf("Failed to connect to PostgreSQL container: %v", err) + } defer container.DB.Close() _, err = container.DB.Exec(` + DROP TABLE IF EXISTS public.public_table; DROP SCHEMA IF EXISTS schema_a CASCADE; DROP SCHEMA IF EXISTS schema_b CASCADE; CREATE SCHEMA schema_a; diff --git a/backend/internal/util/tools/mongodb.go b/backend/internal/util/tools/mongodb.go new file mode 100644 index 0000000..66cc668 --- /dev/null +++ b/backend/internal/util/tools/mongodb.go @@ -0,0 +1,168 @@ +package tools + +import ( + "fmt" + "log/slog" + "os" + "path/filepath" + "runtime" + + env_utils "postgresus-backend/internal/util/env" +) + +type MongodbVersion string + +const ( + MongodbVersion40 MongodbVersion = "4.0" + MongodbVersion42 MongodbVersion = "4.2" + MongodbVersion44 MongodbVersion = "4.4" + MongodbVersion50 MongodbVersion = "5.0" + MongodbVersion60 MongodbVersion = "6.0" + MongodbVersion70 MongodbVersion = "7.0" + MongodbVersion80 MongodbVersion = "8.0" +) + +type MongodbExecutable string + +const ( + MongodbExecutableMongodump MongodbExecutable = "mongodump" + MongodbExecutableMongorestore MongodbExecutable = "mongorestore" +) + +// GetMongodbExecutable returns the full path to a MongoDB executable. +// MongoDB Database Tools use a single client version that is backward compatible +// with all server versions. +func GetMongodbExecutable( + executable MongodbExecutable, + envMode env_utils.EnvMode, + mongodbInstallDir string, +) string { + basePath := getMongodbBasePath(envMode, mongodbInstallDir) + executableName := string(executable) + + if runtime.GOOS == "windows" { + executableName += ".exe" + } + + return filepath.Join(basePath, executableName) +} + +// VerifyMongodbInstallation verifies that MongoDB Database Tools are installed. +// Unlike PostgreSQL (version-specific), MongoDB tools use a single version that +// supports all server versions (backward compatible). +func VerifyMongodbInstallation( + logger *slog.Logger, + envMode env_utils.EnvMode, + mongodbInstallDir string, +) { + binDir := getMongodbBasePath(envMode, mongodbInstallDir) + + logger.Info( + "Verifying MongoDB Database Tools installation", + "path", binDir, + ) + + if _, err := os.Stat(binDir); os.IsNotExist(err) { + if envMode == env_utils.EnvModeDevelopment { + logger.Warn( + "MongoDB bin directory not found. MongoDB support will be disabled. Read ./tools/readme.md for details", + "path", + binDir, + ) + } else { + logger.Warn( + "MongoDB bin directory not found. MongoDB support will be disabled.", + "path", binDir, + ) + } + return + } + + requiredCommands := []MongodbExecutable{ + MongodbExecutableMongodump, + MongodbExecutableMongorestore, + } + + for _, cmd := range requiredCommands { + cmdPath := GetMongodbExecutable(cmd, envMode, mongodbInstallDir) + + logger.Info( + "Checking for MongoDB command", + "command", cmd, + "path", cmdPath, + ) + + if _, err := os.Stat(cmdPath); os.IsNotExist(err) { + if envMode == env_utils.EnvModeDevelopment { + logger.Warn( + "MongoDB command not found. MongoDB support will be disabled. Read ./tools/readme.md for details", + "command", + cmd, + "path", + cmdPath, + ) + } else { + logger.Warn( + "MongoDB command not found. MongoDB support will be disabled.", + "command", cmd, + "path", cmdPath, + ) + } + continue + } + + logger.Info("MongoDB command found", "command", cmd) + } + + logger.Info("MongoDB Database Tools verification completed!") +} + +// IsMongodbBackupVersionHigherThanRestoreVersion checks if backup was made with +// a newer MongoDB version than the restore target +func IsMongodbBackupVersionHigherThanRestoreVersion( + backupVersion, restoreVersion MongodbVersion, +) bool { + versionOrder := map[MongodbVersion]int{ + MongodbVersion40: 1, + MongodbVersion42: 2, + MongodbVersion44: 3, + MongodbVersion50: 4, + MongodbVersion60: 5, + MongodbVersion70: 6, + MongodbVersion80: 7, + } + return versionOrder[backupVersion] > versionOrder[restoreVersion] +} + +// GetMongodbVersionEnum converts a version string to MongodbVersion enum +func GetMongodbVersionEnum(version string) MongodbVersion { + switch version { + case "4.0": + return MongodbVersion40 + case "4.2": + return MongodbVersion42 + case "4.4": + return MongodbVersion44 + case "5.0": + return MongodbVersion50 + case "6.0": + return MongodbVersion60 + case "7.0": + return MongodbVersion70 + case "8.0": + return MongodbVersion80 + default: + panic(fmt.Sprintf("invalid mongodb version: %s", version)) + } +} + +func getMongodbBasePath( + envMode env_utils.EnvMode, + mongodbInstallDir string, +) string { + if envMode == env_utils.EnvModeDevelopment { + return filepath.Join(mongodbInstallDir, "bin") + } + // Production: single client version in /usr/local/mongodb-database-tools/bin + return "/usr/local/mongodb-database-tools/bin" +} diff --git a/backend/migrations/20251221195603_add_mongodb_databases_table.sql b/backend/migrations/20251221195603_add_mongodb_databases_table.sql new file mode 100644 index 0000000..3c603f1 --- /dev/null +++ b/backend/migrations/20251221195603_add_mongodb_databases_table.sql @@ -0,0 +1,28 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE mongodb_databases ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + database_id UUID REFERENCES databases(id) ON DELETE CASCADE, + version TEXT NOT NULL, + host TEXT NOT NULL, + port INT NOT NULL, + username TEXT NOT NULL, + password TEXT NOT NULL, + database TEXT NOT NULL, + auth_database TEXT NOT NULL DEFAULT 'admin', + is_https BOOLEAN NOT NULL DEFAULT FALSE +); +-- +goose StatementEnd + +-- +goose StatementBegin +CREATE INDEX idx_mongodb_databases_database_id ON mongodb_databases(database_id); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP INDEX IF EXISTS idx_mongodb_databases_database_id; +-- +goose StatementEnd + +-- +goose StatementBegin +DROP TABLE IF EXISTS mongodb_databases; +-- +goose StatementEnd diff --git a/backend/tools/download_linux.sh b/backend/tools/download_linux.sh index f3e6f19..1daadd5 100644 --- a/backend/tools/download_linux.sh +++ b/backend/tools/download_linux.sh @@ -5,7 +5,7 @@ set -e # Exit on any error # Ensure non-interactive mode for apt export DEBIAN_FRONTEND=noninteractive -echo "Installing PostgreSQL and MySQL client tools for Linux (Debian/Ubuntu)..." +echo "Installing PostgreSQL, MySQL, MariaDB and MongoDB client tools for Linux (Debian/Ubuntu)..." echo # Check if running on supported system @@ -225,6 +225,55 @@ for version in $mariadb_versions; do echo done +# ========== MongoDB Installation ========== +echo "========================================" +echo "Installing MongoDB Database Tools (single latest version)..." +echo "========================================" + +MONGODB_DIR="$(pwd)/mongodb" +mkdir -p "$MONGODB_DIR/bin" + +echo "Installing MongoDB Database Tools to: $MONGODB_DIR" + +# MongoDB Database Tools are backward compatible - single version supports all servers (4.0-8.0) +# Detect architecture +ARCH=$(uname -m) +if [ "$ARCH" = "x86_64" ]; then + MONGODB_TOOLS_URL="https://fastdl.mongodb.org/tools/db/mongodb-database-tools-debian12-x86_64-100.10.0.deb" +elif [ "$ARCH" = "aarch64" ]; then + MONGODB_TOOLS_URL="https://fastdl.mongodb.org/tools/db/mongodb-database-tools-debian12-aarch64-100.10.0.deb" +else + echo "Warning: Unsupported architecture $ARCH for MongoDB Database Tools" + MONGODB_TOOLS_URL="" +fi + +if [ -n "$MONGODB_TOOLS_URL" ]; then + TEMP_DIR="/tmp/mongodb_install" + mkdir -p "$TEMP_DIR" + cd "$TEMP_DIR" + + echo "Downloading MongoDB Database Tools..." + wget -q "$MONGODB_TOOLS_URL" -O mongodb-database-tools.deb || { + echo "Warning: Could not download MongoDB Database Tools" + cd - >/dev/null + rm -rf "$TEMP_DIR" + } + + if [ -f "mongodb-database-tools.deb" ]; then + echo "Installing MongoDB Database Tools..." + $SUDO dpkg -i mongodb-database-tools.deb 2>/dev/null || $SUDO apt-get install -f -y -qq + + # Create symlinks to tools directory + ln -sf /usr/bin/mongodump "$MONGODB_DIR/bin/mongodump" + ln -sf /usr/bin/mongorestore "$MONGODB_DIR/bin/mongorestore" + + echo "MongoDB Database Tools installed successfully" + fi + + cd - >/dev/null + rm -rf "$TEMP_DIR" +fi + echo echo "========================================" @@ -234,6 +283,7 @@ echo echo "PostgreSQL client tools are available in: $POSTGRES_DIR" echo "MySQL client tools are available in: $MYSQL_DIR" echo "MariaDB client tools are available in: $MARIADB_DIR" +echo "MongoDB Database Tools are available in: $MONGODB_DIR" echo # List installed PostgreSQL versions @@ -269,8 +319,17 @@ for version in $mariadb_versions; do fi done +echo +echo "Installed MongoDB Database Tools:" +if [ -f "$MONGODB_DIR/bin/mongodump" ]; then + echo " mongodb: $MONGODB_DIR/bin/" + version_output=$("$MONGODB_DIR/bin/mongodump" --version 2>/dev/null | head -1) + echo " Version check: $version_output" +fi + echo echo "Usage examples:" echo " $POSTGRES_DIR/postgresql-15/bin/pg_dump --version" echo " $MYSQL_DIR/mysql-8.0/bin/mysqldump --version" -echo " $MARIADB_DIR/mariadb-12.1/bin/mariadb-dump --version" \ No newline at end of file +echo " $MARIADB_DIR/mariadb-12.1/bin/mariadb-dump --version" +echo " $MONGODB_DIR/bin/mongodump --version" \ No newline at end of file diff --git a/backend/tools/download_macos.sh b/backend/tools/download_macos.sh index f6217e7..e79cc70 100755 --- a/backend/tools/download_macos.sh +++ b/backend/tools/download_macos.sh @@ -2,7 +2,7 @@ set -e # Exit on any error -echo "Installing PostgreSQL and MySQL client tools for MacOS..." +echo "Installing PostgreSQL, MySQL, MariaDB and MongoDB client tools for MacOS..." echo # Check if Homebrew is installed @@ -284,6 +284,43 @@ for version in $mariadb_versions; do echo done +# ========== MongoDB Installation ========== +echo "========================================" +echo "Installing MongoDB Database Tools..." +echo "========================================" + +MONGODB_DIR="$(pwd)/mongodb" +mkdir -p "$MONGODB_DIR/bin" + +echo "Installing MongoDB Database Tools to: $MONGODB_DIR" + +# Install via Homebrew +echo " Installing MongoDB Database Tools via Homebrew..." +brew tap mongodb/brew 2>/dev/null || true +brew install mongodb-database-tools 2>/dev/null || { + echo " Warning: Could not install mongodb-database-tools via Homebrew" +} + +# Find Homebrew MongoDB tools path +BREW_MONGODB="" +if [ -f "/opt/homebrew/bin/mongodump" ]; then + BREW_MONGODB="/opt/homebrew/bin" +elif [ -f "/usr/local/bin/mongodump" ]; then + BREW_MONGODB="/usr/local/bin" +fi + +if [ -n "$BREW_MONGODB" ]; then + ln -sf "$BREW_MONGODB/mongodump" "$MONGODB_DIR/bin/mongodump" + ln -sf "$BREW_MONGODB/mongorestore" "$MONGODB_DIR/bin/mongorestore" + echo " MongoDB Database Tools linked from Homebrew" + + mongodump_ver=$("$MONGODB_DIR/bin/mongodump" --version 2>/dev/null | head -1) + echo " Verified: $mongodump_ver" +else + echo " Warning: Could not find MongoDB Database Tools binaries" + echo " Please install manually: brew tap mongodb/brew && brew install mongodb-database-tools" +fi + echo # Clean up build directory @@ -297,6 +334,7 @@ echo echo "PostgreSQL client tools are available in: $POSTGRES_DIR" echo "MySQL client tools are available in: $MYSQL_DIR" echo "MariaDB client tools are available in: $MARIADB_DIR" +echo "MongoDB Database Tools are available in: $MONGODB_DIR" echo # List installed PostgreSQL versions @@ -331,13 +369,23 @@ for version in $mariadb_versions; do fi done +echo +echo "Installed MongoDB Database Tools:" +if [ -f "$MONGODB_DIR/bin/mongodump" ]; then + mongodump_ver=$("$MONGODB_DIR/bin/mongodump" --version 2>/dev/null | head -1) + echo " mongodb: $MONGODB_DIR/bin/" + echo " $mongodump_ver" +fi + echo echo "Usage examples:" echo " $POSTGRES_DIR/postgresql-15/bin/pg_dump --version" echo " $MYSQL_DIR/mysql-8.0/bin/mysqldump --version" echo " $MARIADB_DIR/mariadb-12.1/bin/mariadb-dump --version" +echo " $MONGODB_DIR/bin/mongodump --version" echo echo "To add specific versions to your PATH temporarily:" echo " export PATH=\"$POSTGRES_DIR/postgresql-15/bin:\$PATH\"" echo " export PATH=\"$MYSQL_DIR/mysql-8.0/bin:\$PATH\"" -echo " export PATH=\"$MARIADB_DIR/mariadb-12.1/bin:\$PATH\"" \ No newline at end of file +echo " export PATH=\"$MARIADB_DIR/mariadb-12.1/bin:\$PATH\"" +echo " export PATH=\"$MONGODB_DIR/bin:\$PATH\"" \ No newline at end of file diff --git a/backend/tools/download_windows.bat b/backend/tools/download_windows.bat index 9fe9e17..0d7caf1 100644 --- a/backend/tools/download_windows.bat +++ b/backend/tools/download_windows.bat @@ -1,7 +1,7 @@ @echo off setlocal enabledelayedexpansion -echo Downloading and installing PostgreSQL and MySQL client tools for Windows... +echo Downloading and installing PostgreSQL, MySQL, MariaDB and MongoDB client tools for Windows... echo. :: Create directories if they don't exist @@ -9,15 +9,18 @@ if not exist "downloads" mkdir downloads if not exist "postgresql" mkdir postgresql if not exist "mysql" mkdir mysql if not exist "mariadb" mkdir mariadb +if not exist "mongodb" mkdir mongodb :: Get the absolute paths set "POSTGRES_DIR=%cd%\postgresql" set "MYSQL_DIR=%cd%\mysql" set "MARIADB_DIR=%cd%\mariadb" +set "MONGODB_DIR=%cd%\mongodb" echo PostgreSQL will be installed to: %POSTGRES_DIR% echo MySQL will be installed to: %MYSQL_DIR% echo MariaDB will be installed to: %MARIADB_DIR% +echo MongoDB will be installed to: %MONGODB_DIR% echo. cd downloads @@ -286,6 +289,73 @@ for %%v in (%mariadb_versions%) do ( :skip_mariadb echo. +:: ========== MongoDB Installation ========== +echo ======================================== +echo Installing MongoDB Database Tools... +echo ======================================== +echo. + +:: MongoDB Database Tools are backward compatible - single version supports all servers (4.0-8.0) +set "MONGODB_TOOLS_URL=https://fastdl.mongodb.org/tools/db/mongodb-database-tools-windows-x86_64-100.10.0.zip" + +set "mongodb_install_dir=%MONGODB_DIR%" + +:: Check if already installed +if exist "!mongodb_install_dir!\bin\mongodump.exe" ( + echo MongoDB Database Tools already installed, skipping... +) else ( + set "mongodb_filename=mongodb-database-tools.zip" + + if not exist "!mongodb_filename!" ( + echo Downloading MongoDB Database Tools... + curl -L -o "!mongodb_filename!" -A "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" "!MONGODB_TOOLS_URL!" + if !errorlevel! neq 0 ( + echo ERROR: Download request failed + goto :skip_mongodb + ) + if not exist "!mongodb_filename!" ( + echo ERROR: Download failed - file not created + goto :skip_mongodb + ) + for %%s in ("!mongodb_filename!") do if %%~zs LSS 1000000 ( + echo ERROR: Download failed - file too small, likely error page + del "!mongodb_filename!" 2>nul + goto :skip_mongodb + ) + echo MongoDB Database Tools downloaded successfully + ) else ( + echo MongoDB Database Tools already downloaded + ) + + :: Extract MongoDB Database Tools + echo Extracting MongoDB Database Tools... + mkdir "!mongodb_install_dir!" 2>nul + mkdir "!mongodb_install_dir!\bin" 2>nul + + powershell -Command "Expand-Archive -Path '!mongodb_filename!' -DestinationPath '!mongodb_install_dir!_temp' -Force" + + :: Move files from nested directory to install_dir + for /d %%d in ("!mongodb_install_dir!_temp\mongodb-database-tools-*") do ( + if exist "%%d\bin\mongodump.exe" ( + copy "%%d\bin\mongodump.exe" "!mongodb_install_dir!\bin\" >nul 2>&1 + copy "%%d\bin\mongorestore.exe" "!mongodb_install_dir!\bin\" >nul 2>&1 + ) + ) + + :: Cleanup temp directory + rmdir /s /q "!mongodb_install_dir!_temp" 2>nul + + :: Verify installation + if exist "!mongodb_install_dir!\bin\mongodump.exe" ( + echo MongoDB Database Tools installed successfully + ) else ( + echo Failed to install MongoDB Database Tools - mongodump.exe not found + ) +) + +:skip_mongodb +echo. + cd .. echo. @@ -296,6 +366,7 @@ echo. echo PostgreSQL versions are installed in: %POSTGRES_DIR% echo MySQL versions are installed in: %MYSQL_DIR% echo MariaDB is installed in: %MARIADB_DIR% +echo MongoDB Database Tools are installed in: %MONGODB_DIR% echo. :: List installed PostgreSQL versions @@ -325,11 +396,18 @@ for %%v in (%mariadb_versions%) do ( ) ) +echo. +echo Installed MongoDB Database Tools: +if exist "%MONGODB_DIR%\bin\mongodump.exe" ( + echo mongodb: %MONGODB_DIR%\bin\ +) + echo. echo Usage examples: echo %POSTGRES_DIR%\postgresql-15\bin\pg_dump.exe --version echo %MYSQL_DIR%\mysql-8.0\bin\mysqldump.exe --version echo %MARIADB_DIR%\mariadb-12.1\bin\mariadb-dump.exe --version +echo %MONGODB_DIR%\bin\mongodump.exe --version echo. pause diff --git a/backend/tools/mongodb/bin/mongodump.exe b/backend/tools/mongodb/bin/mongodump.exe new file mode 100644 index 0000000..e4e3cfc Binary files /dev/null and b/backend/tools/mongodb/bin/mongodump.exe differ diff --git a/backend/tools/mongodb/bin/mongorestore.exe b/backend/tools/mongodb/bin/mongorestore.exe new file mode 100644 index 0000000..6f5aa7a Binary files /dev/null and b/backend/tools/mongodb/bin/mongorestore.exe differ diff --git a/backend/tools/readme.md b/backend/tools/readme.md index 4b83bb2..fac77dc 100644 --- a/backend/tools/readme.md +++ b/backend/tools/readme.md @@ -1,7 +1,7 @@ This directory is needed only for development and CI\CD. -We have to download and install all the PostgreSQL versions from 12 to 18, MySQL versions 5.7, 8.0, 8.4 and MariaDB client tools locally. -This is needed so we can call pg_dump, pg_restore, mysqldump, mysql, mariadb-dump, mariadb, etc. on each version of the database. +We have to download and install all the PostgreSQL versions from 12 to 18, MySQL versions 5.7, 8.0, 8.4, MariaDB client tools and MongoDB Database Tools locally. +This is needed so we can call pg_dump, pg_restore, mysqldump, mysql, mariadb-dump, mariadb, mongodump, mongorestore, etc. on each version of the database. You do not need to install the databases fully with all the components. We only need the client tools for each version. @@ -33,6 +33,14 @@ MariaDB uses two client versions to support all server versions: The reason for two versions is that MariaDB 12.1 client uses SQL queries that reference the `generation_expression` column in `information_schema.columns`, which was only added in MariaDB 10.2. Older servers (5.5, 10.1) don't have this column and fail with newer clients. +### MongoDB + +MongoDB Database Tools are backward compatible - a single version supports all server versions: + +- MongoDB Database Tools 100.10.0 (supports MongoDB servers 4.0-8.0) + +The MongoDB Database Tools (`mongodump`, `mongorestore`) are designed to be backward compatible with all MongoDB server versions, so only one client version is needed. + ## Installation Run the appropriate download script for your platform: @@ -134,6 +142,15 @@ For example: - `./tools/mariadb/mariadb-10.6/bin/mariadb-dump` (legacy - for servers 5.5, 10.1) - `./tools/mariadb/mariadb-12.1/bin/mariadb-dump` (modern - for servers 10.2+) +### MongoDB + +MongoDB Database Tools use a single version that supports all server versions: + +``` +./tools/mongodb/bin/mongodump +./tools/mongodb/bin/mongorestore +``` + ## Usage After installation, you can use version-specific tools: @@ -148,6 +165,9 @@ After installation, you can use version-specific tools: # Windows - MariaDB ./mariadb/mariadb-12.1/bin/mariadb-dump.exe --version +# Windows - MongoDB +./mongodb/bin/mongodump.exe --version + # Linux/MacOS - PostgreSQL ./postgresql/postgresql-15/bin/pg_dump --version @@ -156,6 +176,9 @@ After installation, you can use version-specific tools: # Linux/MacOS - MariaDB ./mariadb/mariadb-12.1/bin/mariadb-dump --version + +# Linux/MacOS - MongoDB +./mongodb/bin/mongodump --version ``` ## Environment Variables @@ -172,6 +195,10 @@ MYSQL_INSTALL_DIR=C:\path\to\tools\mysql # MariaDB tools directory (default: ./tools/mariadb) # Contains subdirectories: mariadb-10.6 and mariadb-12.1 MARIADB_INSTALL_DIR=C:\path\to\tools\mariadb + +# MongoDB tools directory (default: ./tools/mongodb) +# Contains bin subdirectory with mongodump and mongorestore +MONGODB_INSTALL_DIR=C:\path\to\tools\mongodb ``` ## Troubleshooting @@ -197,7 +224,8 @@ If downloads fail, you can manually download the files: - PostgreSQL: https://www.postgresql.org/ftp/source/ - MySQL: https://dev.mysql.com/downloads/mysql/ -- MariaDB: https://mariadb.org/download/ or https://cdn.mysql.com/archives/mariadb-12.0/ +- MariaDB: https://mariadb.org/download/ or https://archive.mariadb.org/ +- MongoDB Database Tools: https://www.mongodb.com/try/download/database-tools ### MariaDB Client Compatibility @@ -216,3 +244,17 @@ MariaDB client tools require different versions depending on the server: - MariaDB 12.0 The reason is that MariaDB 12.1 client uses SQL queries referencing the `generation_expression` column in `information_schema.columns`, which was added in MariaDB 10.2. The application automatically selects the appropriate client version based on the target server version. + +### MongoDB Database Tools Compatibility + +MongoDB Database Tools are backward compatible - a single version supports all server versions: + +**Supported MongoDB server versions:** + +- MongoDB 4.0, 4.2, 4.4 (EOL but still supported) +- MongoDB 5.0 +- MongoDB 6.0 +- MongoDB 7.0 (LTS) +- MongoDB 8.0 (Current) + +The application uses MongoDB Database Tools version 100.10.0, which supports all the above server versions. diff --git a/frontend/public/icons/databases/mongodb.svg b/frontend/public/icons/databases/mongodb.svg new file mode 100644 index 0000000..65c4a12 --- /dev/null +++ b/frontend/public/icons/databases/mongodb.svg @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/frontend/src/entity/databases/index.ts b/frontend/src/entity/databases/index.ts index 385d8ba..a090cf2 100644 --- a/frontend/src/entity/databases/index.ts +++ b/frontend/src/entity/databases/index.ts @@ -9,5 +9,7 @@ export { type MysqlDatabase } from './model/mysql/MysqlDatabase'; export { MysqlVersion } from './model/mysql/MysqlVersion'; export { type MariadbDatabase } from './model/mariadb/MariadbDatabase'; export { MariadbVersion } from './model/mariadb/MariadbVersion'; +export { type MongodbDatabase } from './model/mongodb/MongodbDatabase'; +export { MongodbVersion } from './model/mongodb/MongodbVersion'; export { type IsReadOnlyResponse } from './model/IsReadOnlyResponse'; export { type CreateReadOnlyUserResponse } from './model/CreateReadOnlyUserResponse'; diff --git a/frontend/src/entity/databases/model/Database.ts b/frontend/src/entity/databases/model/Database.ts index 458e72e..b64dfdb 100644 --- a/frontend/src/entity/databases/model/Database.ts +++ b/frontend/src/entity/databases/model/Database.ts @@ -2,6 +2,7 @@ import type { Notifier } from '../../notifiers'; import type { DatabaseType } from './DatabaseType'; import type { HealthStatus } from './HealthStatus'; import type { MariadbDatabase } from './mariadb/MariadbDatabase'; +import type { MongodbDatabase } from './mongodb/MongodbDatabase'; import type { MysqlDatabase } from './mysql/MysqlDatabase'; import type { PostgresqlDatabase } from './postgresql/PostgresqlDatabase'; @@ -14,6 +15,7 @@ export interface Database { postgresql?: PostgresqlDatabase; mysql?: MysqlDatabase; mariadb?: MariadbDatabase; + mongodb?: MongodbDatabase; notifiers: Notifier[]; diff --git a/frontend/src/entity/databases/model/DatabaseType.ts b/frontend/src/entity/databases/model/DatabaseType.ts index 59f666a..6e40e4a 100644 --- a/frontend/src/entity/databases/model/DatabaseType.ts +++ b/frontend/src/entity/databases/model/DatabaseType.ts @@ -2,4 +2,5 @@ export enum DatabaseType { POSTGRES = 'POSTGRES', MYSQL = 'MYSQL', MARIADB = 'MARIADB', + MONGODB = 'MONGODB', } diff --git a/frontend/src/entity/databases/model/getDatabaseLogoFromType.ts b/frontend/src/entity/databases/model/getDatabaseLogoFromType.ts index d0ccd97..d903371 100644 --- a/frontend/src/entity/databases/model/getDatabaseLogoFromType.ts +++ b/frontend/src/entity/databases/model/getDatabaseLogoFromType.ts @@ -8,6 +8,8 @@ export const getDatabaseLogoFromType = (type: DatabaseType) => { return '/icons/databases/mysql.svg'; case DatabaseType.MARIADB: return '/icons/databases/mariadb.svg'; + case DatabaseType.MONGODB: + return '/icons/databases/mongodb.svg'; default: return ''; } diff --git a/frontend/src/entity/databases/model/mongodb/MongodbConnectionStringParser.test.ts b/frontend/src/entity/databases/model/mongodb/MongodbConnectionStringParser.test.ts new file mode 100644 index 0000000..5e2e490 --- /dev/null +++ b/frontend/src/entity/databases/model/mongodb/MongodbConnectionStringParser.test.ts @@ -0,0 +1,449 @@ +import { describe, expect, it } from 'vitest'; + +import { + MongodbConnectionStringParser, + type ParseError, + type ParseResult, +} from './MongodbConnectionStringParser'; + +describe('MongodbConnectionStringParser', () => { + // Helper to assert successful parse + const expectSuccess = (result: ParseResult | ParseError): ParseResult => { + expect('error' in result).toBe(false); + return result as ParseResult; + }; + + // Helper to assert parse error + const expectError = (result: ParseResult | ParseError): ParseError => { + expect('error' in result).toBe(true); + return result as ParseError; + }; + + describe('Standard MongoDB URI (mongodb://)', () => { + it('should parse basic mongodb:// connection string', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://myuser:mypassword@localhost:27017/mydb'), + ); + + expect(result.host).toBe('localhost'); + expect(result.port).toBe(27017); + expect(result.username).toBe('myuser'); + expect(result.password).toBe('mypassword'); + expect(result.database).toBe('mydb'); + expect(result.authDatabase).toBe('admin'); + expect(result.useTls).toBe(false); + }); + + it('should parse connection string without database', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://root:rostislav123!@82.146.56.0:27017'), + ); + + expect(result.host).toBe('82.146.56.0'); + expect(result.port).toBe(27017); + expect(result.username).toBe('root'); + expect(result.password).toBe('rostislav123!'); + expect(result.database).toBe(''); + expect(result.authDatabase).toBe('admin'); + expect(result.useTls).toBe(false); + }); + + it('should default port to 27017 when not specified', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://user:pass@host/db'), + ); + + expect(result.port).toBe(27017); + }); + + it('should handle URL-encoded passwords', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://user:p%40ss%23word@host:27017/db'), + ); + + expect(result.password).toBe('p@ss#word'); + }); + + it('should handle URL-encoded usernames', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://user%40domain:password@host:27017/db'), + ); + + expect(result.username).toBe('user@domain'); + }); + + it('should parse authSource from query string', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'mongodb://user:pass@host:27017/mydb?authSource=authdb', + ), + ); + + expect(result.authDatabase).toBe('authdb'); + }); + + it('should parse authDatabase from query string', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'mongodb://user:pass@host:27017/mydb?authDatabase=authdb', + ), + ); + + expect(result.authDatabase).toBe('authdb'); + }); + }); + + describe('MongoDB Atlas SRV URI (mongodb+srv://)', () => { + it('should parse mongodb+srv:// connection string', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'mongodb+srv://atlasuser:atlaspass@cluster0.abc123.mongodb.net/mydb', + ), + ); + + expect(result.host).toBe('cluster0.abc123.mongodb.net'); + expect(result.port).toBe(27017); + expect(result.username).toBe('atlasuser'); + expect(result.password).toBe('atlaspass'); + expect(result.database).toBe('mydb'); + expect(result.useTls).toBe(true); // SRV connections use TLS by default + }); + + it('should parse mongodb+srv:// without database', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'mongodb+srv://atlasuser:atlaspass@cluster0.abc123.mongodb.net', + ), + ); + + expect(result.host).toBe('cluster0.abc123.mongodb.net'); + expect(result.database).toBe(''); + expect(result.useTls).toBe(true); + }); + }); + + describe('TLS/SSL Mode Handling', () => { + it('should set useTls=true for tls=true', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://u:p@host:27017/db?tls=true'), + ); + + expect(result.useTls).toBe(true); + }); + + it('should set useTls=true for ssl=true', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://u:p@host:27017/db?ssl=true'), + ); + + expect(result.useTls).toBe(true); + }); + + it('should set useTls=true for tls=yes', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://u:p@host:27017/db?tls=yes'), + ); + + expect(result.useTls).toBe(true); + }); + + it('should set useTls=true for tls=1', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://u:p@host:27017/db?tls=1'), + ); + + expect(result.useTls).toBe(true); + }); + + it('should set useTls=false for tls=false', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://u:p@host:27017/db?tls=false'), + ); + + expect(result.useTls).toBe(false); + }); + + it('should set useTls=false when no tls/ssl specified', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://u:p@host:27017/db'), + ); + + expect(result.useTls).toBe(false); + }); + }); + + describe('Key-Value Format', () => { + it('should parse key-value format connection string', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'host=localhost port=27017 database=mydb user=admin password=secret', + ), + ); + + expect(result.host).toBe('localhost'); + expect(result.port).toBe(27017); + expect(result.username).toBe('admin'); + expect(result.password).toBe('secret'); + expect(result.database).toBe('mydb'); + }); + + it('should parse key-value format without database', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('host=localhost port=27017 user=admin password=secret'), + ); + + expect(result.host).toBe('localhost'); + expect(result.database).toBe(''); + }); + + it('should parse key-value format with quoted password containing spaces', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + "host=localhost port=27017 database=mydb user=admin password='my secret pass'", + ), + ); + + expect(result.password).toBe('my secret pass'); + }); + + it('should default port to 27017 when not specified in key-value format', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'host=localhost database=mydb user=admin password=secret', + ), + ); + + expect(result.port).toBe(27017); + }); + + it('should handle hostaddr as alternative to host', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'hostaddr=192.168.1.1 port=27017 database=mydb user=admin password=secret', + ), + ); + + expect(result.host).toBe('192.168.1.1'); + }); + + it('should handle dbname as alternative to database', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'host=localhost port=27017 dbname=mydb user=admin password=secret', + ), + ); + + expect(result.database).toBe('mydb'); + }); + + it('should handle db as alternative to database', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'host=localhost port=27017 db=mydb user=admin password=secret', + ), + ); + + expect(result.database).toBe('mydb'); + }); + + it('should handle username as alternative to user', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'host=localhost port=27017 database=mydb username=admin password=secret', + ), + ); + + expect(result.username).toBe('admin'); + }); + + it('should parse authSource in key-value format', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'host=localhost database=mydb user=admin password=secret authSource=authdb', + ), + ); + + expect(result.authDatabase).toBe('authdb'); + }); + + it('should parse authDatabase in key-value format', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'host=localhost database=mydb user=admin password=secret authDatabase=authdb', + ), + ); + + expect(result.authDatabase).toBe('authdb'); + }); + + it('should parse tls in key-value format', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'host=localhost database=mydb user=admin password=secret tls=true', + ), + ); + + expect(result.useTls).toBe(true); + }); + + it('should parse ssl in key-value format', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'host=localhost database=mydb user=admin password=secret ssl=true', + ), + ); + + expect(result.useTls).toBe(true); + }); + + it('should return error for key-value format missing host', () => { + const result = expectError( + MongodbConnectionStringParser.parse('port=27017 database=mydb user=admin password=secret'), + ); + + expect(result.error).toContain('Host'); + expect(result.format).toBe('key-value'); + }); + + it('should return error for key-value format missing user', () => { + const result = expectError( + MongodbConnectionStringParser.parse('host=localhost database=mydb password=secret'), + ); + + expect(result.error).toContain('Username'); + expect(result.format).toBe('key-value'); + }); + + it('should return error for key-value format missing password', () => { + const result = expectError( + MongodbConnectionStringParser.parse('host=localhost database=mydb user=admin'), + ); + + expect(result.error).toContain('Password'); + expect(result.format).toBe('key-value'); + }); + }); + + describe('Error Cases', () => { + it('should return error for empty string', () => { + const result = expectError(MongodbConnectionStringParser.parse('')); + + expect(result.error).toContain('empty'); + }); + + it('should return error for whitespace-only string', () => { + const result = expectError(MongodbConnectionStringParser.parse(' ')); + + expect(result.error).toContain('empty'); + }); + + it('should return error for unrecognized format', () => { + const result = expectError(MongodbConnectionStringParser.parse('some random text')); + + expect(result.error).toContain('Unrecognized'); + }); + + it('should return error for missing username in URI', () => { + const result = expectError( + MongodbConnectionStringParser.parse('mongodb://:password@host:27017/db'), + ); + + expect(result.error).toContain('Username'); + }); + + it('should return error for missing password in URI', () => { + const result = expectError( + MongodbConnectionStringParser.parse('mongodb://user@host:27017/db'), + ); + + expect(result.error).toContain('Password'); + }); + + it('should return error for mysql:// format (wrong database type)', () => { + const result = expectError( + MongodbConnectionStringParser.parse('mysql://user:pass@host:3306/db'), + ); + + expect(result.error).toContain('Unrecognized'); + }); + + it('should return error for postgresql:// format (wrong database type)', () => { + const result = expectError( + MongodbConnectionStringParser.parse('postgresql://user:pass@host:5432/db'), + ); + + expect(result.error).toContain('Unrecognized'); + }); + }); + + describe('Edge Cases', () => { + it('should handle special characters in password', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://user:p%40ss%3Aw%2Ford@host:27017/db'), + ); + + expect(result.password).toBe('p@ss:w/ord'); + }); + + it('should handle password with exclamation mark', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://root:rostislav123!@82.146.56.0:27017'), + ); + + expect(result.password).toBe('rostislav123!'); + }); + + it('should handle numeric database names', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://user:pass@host:27017/12345'), + ); + + expect(result.database).toBe('12345'); + }); + + it('should handle hyphenated host names', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'mongodb://user:pass@my-database-host.example.com:27017/db', + ), + ); + + expect(result.host).toBe('my-database-host.example.com'); + }); + + it('should handle IP address as host', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://user:pass@192.168.1.100:27017/db'), + ); + + expect(result.host).toBe('192.168.1.100'); + }); + + it('should handle connection string with extra query parameters', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse( + 'mongodb://user:pass@host:27017/db?tls=true&connectTimeoutMS=10000&retryWrites=true', + ), + ); + + expect(result.useTls).toBe(true); + expect(result.database).toBe('db'); + }); + + it('should trim whitespace from connection string', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse(' mongodb://user:pass@host:27017/db '), + ); + + expect(result.host).toBe('host'); + }); + + it('should handle trailing slash without database', () => { + const result = expectSuccess( + MongodbConnectionStringParser.parse('mongodb://user:pass@host:27017/'), + ); + + expect(result.database).toBe(''); + }); + }); +}); diff --git a/frontend/src/entity/databases/model/mongodb/MongodbConnectionStringParser.ts b/frontend/src/entity/databases/model/mongodb/MongodbConnectionStringParser.ts new file mode 100644 index 0000000..8b29b6f --- /dev/null +++ b/frontend/src/entity/databases/model/mongodb/MongodbConnectionStringParser.ts @@ -0,0 +1,194 @@ +export type ParseResult = { + host: string; + port: number; + username: string; + password: string; + database: string; + authDatabase: string; + useTls: boolean; +}; + +export type ParseError = { + error: string; + format?: string; +}; + +export class MongodbConnectionStringParser { + /** + * Parses a MongoDB connection string in various formats. + * + * Supported formats: + * 1. Standard MongoDB URI: mongodb://user:pass@host:port/db?authSource=admin + * 2. MongoDB Atlas SRV: mongodb+srv://user:pass@cluster.mongodb.net/db + * 3. Key-value format: host=x port=27017 database=db user=u password=p authSource=admin + * 4. With TLS params: mongodb://user:pass@host:port/db?tls=true or ?ssl=true + */ + static parse(connectionString: string): ParseResult | ParseError { + const trimmed = connectionString.trim(); + + if (!trimmed) { + return { error: 'Connection string is empty' }; + } + + // Try key-value format (contains key=value pairs without ://) + if (this.isKeyValueFormat(trimmed)) { + return this.parseKeyValue(trimmed); + } + + // Try URI format (mongodb:// or mongodb+srv://) + if (trimmed.startsWith('mongodb://') || trimmed.startsWith('mongodb+srv://')) { + return this.parseUri(trimmed); + } + + return { + error: 'Unrecognized connection string format', + }; + } + + private static isKeyValueFormat(str: string): boolean { + return ( + !str.includes('://') && + (str.includes('host=') || str.includes('database=')) && + str.includes('=') + ); + } + + private static parseUri(connectionString: string): ParseResult | ParseError { + try { + const isSrv = connectionString.startsWith('mongodb+srv://'); + + // Standard URI parsing using URL API + const url = new URL(connectionString); + + const host = url.hostname; + const port = url.port ? parseInt(url.port, 10) : isSrv ? 27017 : 27017; + const username = decodeURIComponent(url.username); + const password = decodeURIComponent(url.password); + const database = decodeURIComponent(url.pathname.slice(1)); + const authDatabase = this.getAuthSource(url.search) || 'admin'; + const useTls = isSrv ? true : this.checkTlsMode(url.search); + + if (!host) { + return { error: 'Host is missing from connection string' }; + } + + if (!username) { + return { error: 'Username is missing from connection string' }; + } + + if (!password) { + return { error: 'Password is missing from connection string' }; + } + + return { + host, + port, + username, + password, + database: database || '', + authDatabase, + useTls, + }; + } catch (e) { + return { + error: `Failed to parse connection string: ${(e as Error).message}`, + format: 'URI', + }; + } + } + + private static parseKeyValue(connectionString: string): ParseResult | ParseError { + try { + const params: Record = {}; + + const regex = /(\w+)=(?:'([^']*)'|(\S+))/g; + let match; + + while ((match = regex.exec(connectionString)) !== null) { + const key = match[1]; + const value = match[2] !== undefined ? match[2] : match[3]; + params[key] = value; + } + + const host = params['host'] || params['hostaddr']; + const port = params['port']; + const database = params['database'] || params['dbname'] || params['db']; + const username = params['user'] || params['username']; + const password = params['password']; + const authDatabase = params['authSource'] || params['authDatabase'] || 'admin'; + const tls = params['tls'] || params['ssl']; + + if (!host) { + return { + error: 'Host is missing from connection string. Use host=hostname', + format: 'key-value', + }; + } + + if (!username) { + return { + error: 'Username is missing from connection string. Use user=username', + format: 'key-value', + }; + } + + if (!password) { + return { + error: 'Password is missing from connection string. Use password=yourpassword', + format: 'key-value', + }; + } + + const useTls = this.isTlsEnabled(tls); + + return { + host, + port: port ? parseInt(port, 10) : 27017, + username, + password, + database: database || '', + authDatabase, + useTls, + }; + } catch (e) { + return { + error: `Failed to parse key-value connection string: ${(e as Error).message}`, + format: 'key-value', + }; + } + } + + private static getAuthSource(queryString: string | undefined | null): string | undefined { + if (!queryString) return undefined; + + const params = new URLSearchParams( + queryString.startsWith('?') ? queryString.slice(1) : queryString, + ); + + return params.get('authSource') || params.get('authDatabase') || undefined; + } + + private static checkTlsMode(queryString: string | undefined | null): boolean { + if (!queryString) return false; + + const params = new URLSearchParams( + queryString.startsWith('?') ? queryString.slice(1) : queryString, + ); + + const tls = params.get('tls'); + const ssl = params.get('ssl'); + + if (tls) return this.isTlsEnabled(tls); + if (ssl) return this.isTlsEnabled(ssl); + + return false; + } + + private static isTlsEnabled(tlsValue: string | null | undefined): boolean { + if (!tlsValue) return false; + + const lowercased = tlsValue.toLowerCase(); + const enabledValues = ['true', 'yes', '1']; + return enabledValues.includes(lowercased); + } +} diff --git a/frontend/src/entity/databases/model/mongodb/MongodbDatabase.ts b/frontend/src/entity/databases/model/mongodb/MongodbDatabase.ts new file mode 100644 index 0000000..9417d75 --- /dev/null +++ b/frontend/src/entity/databases/model/mongodb/MongodbDatabase.ts @@ -0,0 +1,13 @@ +import type { MongodbVersion } from './MongodbVersion'; + +export interface MongodbDatabase { + id: string; + version: MongodbVersion; + host: string; + port: number; + username: string; + password: string; + database: string; + authDatabase: string; + useTls: boolean; +} diff --git a/frontend/src/entity/databases/model/mongodb/MongodbVersion.ts b/frontend/src/entity/databases/model/mongodb/MongodbVersion.ts new file mode 100644 index 0000000..6a56a96 --- /dev/null +++ b/frontend/src/entity/databases/model/mongodb/MongodbVersion.ts @@ -0,0 +1,9 @@ +export enum MongodbVersion { + MongodbVersion40 = '4.0', + MongodbVersion42 = '4.2', + MongodbVersion44 = '4.4', + MongodbVersion50 = '5.0', + MongodbVersion60 = '6.0', + MongodbVersion70 = '7.0', + MongodbVersion80 = '8.0', +} diff --git a/frontend/src/entity/restores/api/restoreApi.ts b/frontend/src/entity/restores/api/restoreApi.ts index 4bef134..119ac08 100644 --- a/frontend/src/entity/restores/api/restoreApi.ts +++ b/frontend/src/entity/restores/api/restoreApi.ts @@ -1,7 +1,12 @@ import { getApplicationServer } from '../../../constants'; import RequestOptions from '../../../shared/api/RequestOptions'; import { apiHelper } from '../../../shared/api/apiHelper'; -import type { MariadbDatabase, MysqlDatabase, PostgresqlDatabase } from '../../databases'; +import type { + MariadbDatabase, + MongodbDatabase, + MysqlDatabase, + PostgresqlDatabase, +} from '../../databases'; import type { Restore } from '../model/Restore'; export const restoreApi = { @@ -18,11 +23,13 @@ export const restoreApi = { postgresql, mysql, mariadb, + mongodb, }: { backupId: string; postgresql?: PostgresqlDatabase; mysql?: MysqlDatabase; mariadb?: MariadbDatabase; + mongodb?: MongodbDatabase; }) { const requestOptions: RequestOptions = new RequestOptions(); requestOptions.setBody( @@ -30,6 +37,7 @@ export const restoreApi = { postgresqlDatabase: postgresql, mysqlDatabase: mysql, mariadbDatabase: mariadb, + mongodbDatabase: mongodb, }), ); diff --git a/frontend/src/features/databases/ui/CreateDatabaseComponent.tsx b/frontend/src/features/databases/ui/CreateDatabaseComponent.tsx index dcab9c7..28fc18c 100644 --- a/frontend/src/features/databases/ui/CreateDatabaseComponent.tsx +++ b/frontend/src/features/databases/ui/CreateDatabaseComponent.tsx @@ -5,6 +5,7 @@ import { type Database, DatabaseType, type MariadbDatabase, + type MongodbDatabase, type MysqlDatabase, Period, type PostgresqlDatabase, @@ -41,7 +42,13 @@ const createInitialDatabase = (workspaceId: string): Database => }) as Database; const initializeDatabaseTypeData = (db: Database): Database => { - const base = { ...db, postgresql: undefined, mysql: undefined, mariadb: undefined }; + const base = { + ...db, + postgresql: undefined, + mysql: undefined, + mariadb: undefined, + mongodb: undefined, + }; switch (db.type) { case DatabaseType.POSTGRES: @@ -50,6 +57,8 @@ const initializeDatabaseTypeData = (db: Database): Database => { return { ...base, mysql: db.mysql ?? ({} as MysqlDatabase) }; case DatabaseType.MARIADB: return { ...base, mariadb: db.mariadb ?? ({} as MariadbDatabase) }; + case DatabaseType.MONGODB: + return { ...base, mongodb: db.mongodb ?? ({} as MongodbDatabase) }; default: return db; } diff --git a/frontend/src/features/databases/ui/edit/EditDatabaseBaseInfoComponent.tsx b/frontend/src/features/databases/ui/edit/EditDatabaseBaseInfoComponent.tsx index 3de4e9e..09f4e99 100644 --- a/frontend/src/features/databases/ui/edit/EditDatabaseBaseInfoComponent.tsx +++ b/frontend/src/features/databases/ui/edit/EditDatabaseBaseInfoComponent.tsx @@ -5,6 +5,7 @@ import { type Database, DatabaseType, type MariadbDatabase, + type MongodbDatabase, type MysqlDatabase, type PostgresqlDatabase, databaseApi, @@ -28,6 +29,7 @@ const databaseTypeOptions = [ { value: DatabaseType.POSTGRES, label: 'PostgreSQL' }, { value: DatabaseType.MYSQL, label: 'MySQL' }, { value: DatabaseType.MARIADB, label: 'MariaDB' }, + { value: DatabaseType.MONGODB, label: 'MongoDB' }, ]; export const EditDatabaseBaseInfoComponent = ({ @@ -58,6 +60,7 @@ export const EditDatabaseBaseInfoComponent = ({ postgresql: undefined, mysql: undefined, mariadb: undefined, + mongodb: undefined, }; switch (newType) { @@ -70,6 +73,9 @@ export const EditDatabaseBaseInfoComponent = ({ case DatabaseType.MARIADB: updatedDatabase.mariadb = editingDatabase.mariadb ?? ({} as MariadbDatabase); break; + case DatabaseType.MONGODB: + updatedDatabase.mongodb = editingDatabase.mongodb ?? ({} as MongodbDatabase); + break; } setEditingDatabase(updatedDatabase); diff --git a/frontend/src/features/databases/ui/edit/EditDatabaseSpecificDataComponent.tsx b/frontend/src/features/databases/ui/edit/EditDatabaseSpecificDataComponent.tsx index bd952fd..d893f51 100644 --- a/frontend/src/features/databases/ui/edit/EditDatabaseSpecificDataComponent.tsx +++ b/frontend/src/features/databases/ui/edit/EditDatabaseSpecificDataComponent.tsx @@ -1,5 +1,6 @@ import { type Database, DatabaseType } from '../../../../entity/databases'; import { EditMariaDbSpecificDataComponent } from './EditMariaDbSpecificDataComponent'; +import { EditMongoDbSpecificDataComponent } from './EditMongoDbSpecificDataComponent'; import { EditMySqlSpecificDataComponent } from './EditMySqlSpecificDataComponent'; import { EditPostgreSqlSpecificDataComponent } from './EditPostgreSqlSpecificDataComponent'; @@ -54,6 +55,8 @@ export const EditDatabaseSpecificDataComponent = ({ return ; case DatabaseType.MARIADB: return ; + case DatabaseType.MONGODB: + return ; default: return null; } diff --git a/frontend/src/features/databases/ui/edit/EditMongoDbSpecificDataComponent.tsx b/frontend/src/features/databases/ui/edit/EditMongoDbSpecificDataComponent.tsx new file mode 100644 index 0000000..e33a9a1 --- /dev/null +++ b/frontend/src/features/databases/ui/edit/EditMongoDbSpecificDataComponent.tsx @@ -0,0 +1,387 @@ +import { CopyOutlined, DownOutlined, UpOutlined } from '@ant-design/icons'; +import { App, Button, Input, InputNumber, Switch } from 'antd'; +import { useEffect, useState } from 'react'; + +import { type Database, databaseApi } from '../../../../entity/databases'; +import { MongodbConnectionStringParser } from '../../../../entity/databases/model/mongodb/MongodbConnectionStringParser'; +import { ToastHelper } from '../../../../shared/toast'; + +interface Props { + database: Database; + + isShowCancelButton?: boolean; + onCancel: () => void; + + isShowBackButton: boolean; + onBack: () => void; + + saveButtonText?: string; + isSaveToApi: boolean; + onSaved: (database: Database) => void; + + isShowDbName?: boolean; +} + +export const EditMongoDbSpecificDataComponent = ({ + database, + + isShowCancelButton, + onCancel, + + isShowBackButton, + onBack, + + saveButtonText, + isSaveToApi, + onSaved, + isShowDbName = true, +}: Props) => { + const { message } = App.useApp(); + + const [editingDatabase, setEditingDatabase] = useState(); + const [isSaving, setIsSaving] = useState(false); + + const [isConnectionTested, setIsConnectionTested] = useState(false); + const [isTestingConnection, setIsTestingConnection] = useState(false); + const [isConnectionFailed, setIsConnectionFailed] = useState(false); + + const hasAdvancedValues = !!database.mongodb?.authDatabase; + const [isShowAdvanced, setShowAdvanced] = useState(hasAdvancedValues); + + const parseFromClipboard = async () => { + try { + const text = await navigator.clipboard.readText(); + const trimmedText = text.trim(); + + if (!trimmedText) { + message.error('Clipboard is empty'); + return; + } + + const result = MongodbConnectionStringParser.parse(trimmedText); + + if ('error' in result) { + message.error(result.error); + return; + } + + if (!editingDatabase?.mongodb) return; + + const updatedDatabase: Database = { + ...editingDatabase, + mongodb: { + ...editingDatabase.mongodb, + host: result.host, + port: result.port, + username: result.username, + password: result.password, + database: result.database, + authDatabase: result.authDatabase, + useTls: result.useTls, + }, + }; + + setEditingDatabase(updatedDatabase); + setIsConnectionTested(false); + message.success('Connection string parsed successfully'); + } catch { + message.error('Failed to read clipboard. Please check browser permissions.'); + } + }; + + const testConnection = async () => { + if (!editingDatabase) return; + setIsTestingConnection(true); + setIsConnectionFailed(false); + + try { + await databaseApi.testDatabaseConnectionDirect(editingDatabase); + setIsConnectionTested(true); + ToastHelper.showToast({ + title: 'Connection test passed', + description: 'You can continue with the next step', + }); + } catch (e) { + setIsConnectionFailed(true); + alert((e as Error).message); + } + + setIsTestingConnection(false); + }; + + const saveDatabase = async () => { + if (!editingDatabase) return; + + if (isSaveToApi) { + setIsSaving(true); + + try { + await databaseApi.updateDatabase(editingDatabase); + } catch (e) { + alert((e as Error).message); + } + + setIsSaving(false); + } + + onSaved(editingDatabase); + }; + + useEffect(() => { + setIsSaving(false); + setIsConnectionTested(false); + setIsTestingConnection(false); + setIsConnectionFailed(false); + + setEditingDatabase({ ...database }); + }, [database]); + + if (!editingDatabase) return null; + + let isAllFieldsFilled = true; + if (!editingDatabase.mongodb?.host) isAllFieldsFilled = false; + if (!editingDatabase.mongodb?.port) isAllFieldsFilled = false; + if (!editingDatabase.mongodb?.username) isAllFieldsFilled = false; + if (!editingDatabase.id && !editingDatabase.mongodb?.password) isAllFieldsFilled = false; + if (!editingDatabase.mongodb?.database) isAllFieldsFilled = false; + + const isLocalhostDb = + editingDatabase.mongodb?.host?.includes('localhost') || + editingDatabase.mongodb?.host?.includes('127.0.0.1'); + + return ( +
+
+
+
+ + Parse from clipboard +
+
+ +
+
Host
+ { + if (!editingDatabase.mongodb) return; + + setEditingDatabase({ + ...editingDatabase, + mongodb: { + ...editingDatabase.mongodb, + host: e.target.value.trim().replace('https://', '').replace('http://', ''), + }, + }); + setIsConnectionTested(false); + }} + size="small" + className="max-w-[200px] grow" + placeholder="Enter MongoDB host" + /> +
+ + {isLocalhostDb && ( +
+
+
+ Please{' '} + + read this document + {' '} + to study how to backup local database +
+
+ )} + +
+
Port
+ { + if (!editingDatabase.mongodb || e === null) return; + + setEditingDatabase({ + ...editingDatabase, + mongodb: { ...editingDatabase.mongodb, port: e }, + }); + setIsConnectionTested(false); + }} + size="small" + className="max-w-[200px] grow" + placeholder="27017" + /> +
+ +
+
Username
+ { + if (!editingDatabase.mongodb) return; + + setEditingDatabase({ + ...editingDatabase, + mongodb: { ...editingDatabase.mongodb, username: e.target.value.trim() }, + }); + setIsConnectionTested(false); + }} + size="small" + className="max-w-[200px] grow" + placeholder="Enter MongoDB username" + /> +
+ +
+
Password
+ { + if (!editingDatabase.mongodb) return; + + setEditingDatabase({ + ...editingDatabase, + mongodb: { ...editingDatabase.mongodb, password: e.target.value.trim() }, + }); + setIsConnectionTested(false); + }} + size="small" + className="max-w-[200px] grow" + placeholder="Enter MongoDB password" + autoComplete="off" + data-1p-ignore + data-lpignore="true" + data-form-type="other" + /> +
+ + {isShowDbName && ( +
+
DB name
+ { + if (!editingDatabase.mongodb) return; + + setEditingDatabase({ + ...editingDatabase, + mongodb: { ...editingDatabase.mongodb, database: e.target.value.trim() }, + }); + setIsConnectionTested(false); + }} + size="small" + className="max-w-[200px] grow" + placeholder="Enter MongoDB database name" + /> +
+ )} + +
+
Use TLS
+ { + if (!editingDatabase.mongodb) return; + + setEditingDatabase({ + ...editingDatabase, + mongodb: { ...editingDatabase.mongodb, useTls: checked }, + }); + setIsConnectionTested(false); + }} + size="small" + /> +
+ +
+
setShowAdvanced(!isShowAdvanced)} + > + Advanced settings + + {isShowAdvanced ? ( + + ) : ( + + )} +
+
+ + {isShowAdvanced && ( +
+
Auth database
+ { + if (!editingDatabase.mongodb) return; + + setEditingDatabase({ + ...editingDatabase, + mongodb: { ...editingDatabase.mongodb, authDatabase: e.target.value.trim() }, + }); + setIsConnectionTested(false); + }} + size="small" + className="max-w-[200px] grow" + placeholder="admin" + /> +
+ )} + +
+ {isShowCancelButton && ( + + )} + + {isShowBackButton && ( + + )} + + {!isConnectionTested && ( + + )} + + {isConnectionTested && ( + + )} +
+ + {isConnectionFailed && ( +
+ If your database uses IP whitelist, make sure Postgresus server IP is added to the allowed + list. +
+ )} +
+ ); +}; diff --git a/frontend/src/features/restores/ui/RestoresComponent.tsx b/frontend/src/features/restores/ui/RestoresComponent.tsx index 18b8cb9..ced37ce 100644 --- a/frontend/src/features/restores/ui/RestoresComponent.tsx +++ b/frontend/src/features/restores/ui/RestoresComponent.tsx @@ -38,6 +38,7 @@ const createInitialEditingDatabase = (database: Database): Database => ({ postgresql: clearCredentials(database.postgresql), mysql: clearCredentials(database.mysql), mariadb: clearCredentials(database.mariadb), + mongodb: clearCredentials(database.mongodb), }); const getRestorePayload = (database: Database, editingDatabase: Database) => { @@ -48,6 +49,8 @@ const getRestorePayload = (database: Database, editingDatabase: Database) => { return { mysql: editingDatabase.mysql }; case DatabaseType.MARIADB: return { mariadb: editingDatabase.mariadb }; + case DatabaseType.MONGODB: + return { mongodb: editingDatabase.mongodb }; default: return {}; }