mirror of
https://github.com/databasus/databasus.git
synced 2026-04-06 00:32:03 +02:00
Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b666cd9e2e | ||
|
|
9dac63430d | ||
|
|
8217906c7a | ||
|
|
db71a5ef7b | ||
|
|
df78e296b3 | ||
|
|
fda3bf9b98 | ||
|
|
e19f449c60 | ||
|
|
5944d7c4b6 | ||
|
|
1f5c9d3d01 | ||
|
|
d27b885fc1 | ||
|
|
45054bc4b5 | ||
|
|
09f27019e8 | ||
|
|
cba8fdf49c | ||
|
|
41c72cf7b6 |
11
.github/workflows/ci-release.yml
vendored
11
.github/workflows/ci-release.yml
vendored
@@ -82,6 +82,11 @@ jobs:
|
||||
cd frontend
|
||||
npm run lint
|
||||
|
||||
- name: Run frontend tests
|
||||
run: |
|
||||
cd frontend
|
||||
npm run test
|
||||
|
||||
test-backend:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [lint-backend]
|
||||
@@ -144,6 +149,12 @@ jobs:
|
||||
# testing Telegram
|
||||
TEST_TELEGRAM_BOT_TOKEN=${{ secrets.TEST_TELEGRAM_BOT_TOKEN }}
|
||||
TEST_TELEGRAM_CHAT_ID=${{ secrets.TEST_TELEGRAM_CHAT_ID }}
|
||||
# supabase
|
||||
TEST_SUPABASE_HOST=${{ secrets.TEST_SUPABASE_HOST }}
|
||||
TEST_SUPABASE_PORT=${{ secrets.TEST_SUPABASE_PORT }}
|
||||
TEST_SUPABASE_USERNAME=${{ secrets.TEST_SUPABASE_USERNAME }}
|
||||
TEST_SUPABASE_PASSWORD=${{ secrets.TEST_SUPABASE_PASSWORD }}
|
||||
TEST_SUPABASE_DATABASE=${{ secrets.TEST_SUPABASE_DATABASE }}
|
||||
EOF
|
||||
|
||||
- name: Start test containers
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -7,4 +7,5 @@ node_modules/
|
||||
.idea
|
||||
/articles
|
||||
|
||||
.DS_Store
|
||||
.DS_Store
|
||||
/scripts
|
||||
15
README.md
15
README.md
@@ -80,6 +80,15 @@
|
||||
- **Dark & light themes**: Choose the look that suits your workflow
|
||||
- **Mobile adaptive**: Check your backups from anywhere on any device
|
||||
|
||||
### ☁️ **Works with Self-Hosted & Cloud Databases**
|
||||
|
||||
Postgresus works seamlessly with both self-hosted PostgreSQL and cloud-managed databases:
|
||||
|
||||
- **Cloud support**: AWS RDS, Google Cloud SQL, Azure Database for PostgreSQL
|
||||
- **Self-hosted**: Any PostgreSQL instance you manage yourself
|
||||
- **Why no PITR?**: Cloud providers already offer native PITR, and external PITR backups cannot be restored to managed cloud databases — making them impractical for cloud-hosted PostgreSQL
|
||||
- **Practical granularity**: Hourly and daily backups are sufficient for 99% of projects without the operational complexity of WAL archiving
|
||||
|
||||
### 🐳 **Self-Hosted & Secure**
|
||||
|
||||
- **Docker-based**: Easy deployment and management
|
||||
@@ -88,7 +97,7 @@
|
||||
|
||||
### 📦 Installation <a href="https://postgresus.com/installation">(docs)</a>
|
||||
|
||||
You have three ways to install Postgresus:
|
||||
You have several ways to install Postgresus:
|
||||
|
||||
- Script (recommended)
|
||||
- Simple Docker run
|
||||
@@ -106,7 +115,7 @@ You have three ways to install Postgresus: automated script (recommended), simpl
|
||||
|
||||
The installation script will:
|
||||
|
||||
- ✅ Install Docker with Docker Compose(if not already installed)
|
||||
- ✅ Install Docker with Docker Compose (if not already installed)
|
||||
- ✅ Set up Postgresus
|
||||
- ✅ Configure automatic startup on system reboot
|
||||
|
||||
@@ -229,4 +238,4 @@ This project is licensed under the Apache 2.0 License - see the [LICENSE](LICENS
|
||||
|
||||
## 🤝 Contributing
|
||||
|
||||
Contributions are welcome! Read <a href="https://postgresus.com/contributing">contributing guide</a> for more details, prioerities and rules are specified there. If you want to contribute, but don't know what and how - message me on Telegram [@rostislav_dugin](https://t.me/rostislav_dugin)
|
||||
Contributions are welcome! Read <a href="https://postgresus.com/contribute">contributing guide</a> for more details, priorities and rules are specified there. If you want to contribute, but don't know what and how - message me on Telegram [@rostislav_dugin](https://t.me/rostislav_dugin)
|
||||
|
||||
@@ -33,4 +33,10 @@ TEST_NAS_PORT=7006
|
||||
TEST_TELEGRAM_BOT_TOKEN=
|
||||
TEST_TELEGRAM_CHAT_ID=
|
||||
# testing Azure Blob Storage
|
||||
TEST_AZURITE_BLOB_PORT=10000
|
||||
TEST_AZURITE_BLOB_PORT=10000
|
||||
# supabase
|
||||
TEST_SUPABASE_HOST=
|
||||
TEST_SUPABASE_PORT=
|
||||
TEST_SUPABASE_USERNAME=
|
||||
TEST_SUPABASE_PASSWORD=
|
||||
TEST_SUPABASE_DATABASE=
|
||||
@@ -58,6 +58,13 @@ type EnvVariables struct {
|
||||
// testing Telegram
|
||||
TestTelegramBotToken string `env:"TEST_TELEGRAM_BOT_TOKEN"`
|
||||
TestTelegramChatID string `env:"TEST_TELEGRAM_CHAT_ID"`
|
||||
|
||||
// testing Supabase
|
||||
TestSupabaseHost string `env:"TEST_SUPABASE_HOST"`
|
||||
TestSupabasePort string `env:"TEST_SUPABASE_PORT"`
|
||||
TestSupabaseUsername string `env:"TEST_SUPABASE_USERNAME"`
|
||||
TestSupabasePassword string `env:"TEST_SUPABASE_PASSWORD"`
|
||||
TestSupabaseDatabase string `env:"TEST_SUPABASE_DATABASE"`
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -334,6 +334,10 @@ func (uc *CreatePostgresqlBackupUsecase) buildPgDumpArgs(pg *pgtypes.PostgresqlD
|
||||
"--verbose",
|
||||
}
|
||||
|
||||
for _, schema := range pg.IncludeSchemas {
|
||||
args = append(args, "-n", schema)
|
||||
}
|
||||
|
||||
compressionArgs := uc.getCompressionArgs(pg.Version)
|
||||
return append(args, compressionArgs...)
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jackc/pgx/v5"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type PostgresqlDatabase struct {
|
||||
@@ -29,17 +30,37 @@ type PostgresqlDatabase struct {
|
||||
Password string `json:"password" gorm:"type:text;not null"`
|
||||
Database *string `json:"database" gorm:"type:text"`
|
||||
IsHttps bool `json:"isHttps" gorm:"type:boolean;default:false"`
|
||||
|
||||
// backup settings
|
||||
IncludeSchemas []string `json:"includeSchemas" gorm:"-"`
|
||||
IncludeSchemasString string `json:"-" gorm:"column:include_schemas;type:text;not null;default:''"`
|
||||
}
|
||||
|
||||
func (p *PostgresqlDatabase) TableName() string {
|
||||
return "postgresql_databases"
|
||||
}
|
||||
|
||||
func (p *PostgresqlDatabase) Validate() error {
|
||||
if p.Version == "" {
|
||||
return errors.New("version is required")
|
||||
func (p *PostgresqlDatabase) BeforeSave(_ *gorm.DB) error {
|
||||
if len(p.IncludeSchemas) > 0 {
|
||||
p.IncludeSchemasString = strings.Join(p.IncludeSchemas, ",")
|
||||
} else {
|
||||
p.IncludeSchemasString = ""
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PostgresqlDatabase) AfterFind(_ *gorm.DB) error {
|
||||
if p.IncludeSchemasString != "" {
|
||||
p.IncludeSchemas = strings.Split(p.IncludeSchemasString, ",")
|
||||
} else {
|
||||
p.IncludeSchemas = []string{}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PostgresqlDatabase) Validate() error {
|
||||
if p.Host == "" {
|
||||
return errors.New("host is required")
|
||||
}
|
||||
@@ -85,6 +106,7 @@ func (p *PostgresqlDatabase) Update(incoming *PostgresqlDatabase) {
|
||||
p.Username = incoming.Username
|
||||
p.Database = incoming.Database
|
||||
p.IsHttps = incoming.IsHttps
|
||||
p.IncludeSchemas = incoming.IncludeSchemas
|
||||
|
||||
if incoming.Password != "" {
|
||||
p.Password = incoming.Password
|
||||
@@ -106,6 +128,50 @@ func (p *PostgresqlDatabase) EncryptSensitiveFields(
|
||||
return nil
|
||||
}
|
||||
|
||||
// PopulateVersionIfEmpty detects and sets the PostgreSQL version if not already set.
|
||||
// This should be called before encrypting sensitive fields.
|
||||
func (p *PostgresqlDatabase) PopulateVersionIfEmpty(
|
||||
logger *slog.Logger,
|
||||
encryptor encryption.FieldEncryptor,
|
||||
databaseID uuid.UUID,
|
||||
) error {
|
||||
if p.Version != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if p.Database == nil || *p.Database == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
password, err := decryptPasswordIfNeeded(p.Password, encryptor, databaseID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decrypt password: %w", err)
|
||||
}
|
||||
|
||||
connStr := buildConnectionStringForDB(p, *p.Database, password)
|
||||
|
||||
conn, err := pgx.Connect(ctx, connStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to database: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := conn.Close(ctx); closeErr != nil {
|
||||
logger.Error("Failed to close connection", "error", closeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
detectedVersion, err := detectDatabaseVersion(ctx, conn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.Version = detectedVersion
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsUserReadOnly checks if the database user has read-only privileges.
|
||||
//
|
||||
// This method performs a comprehensive security check by examining:
|
||||
@@ -286,8 +352,20 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
|
||||
// Retry logic for username collision
|
||||
maxRetries := 3
|
||||
for attempt := 0; attempt < maxRetries; attempt++ {
|
||||
username := fmt.Sprintf("postgresus-%s", uuid.New().String()[:8])
|
||||
for attempt := range maxRetries {
|
||||
// Generate base username for PostgreSQL user creation
|
||||
baseUsername := fmt.Sprintf("postgresus-%s", uuid.New().String()[:8])
|
||||
|
||||
// For Supabase session pooler, the username format for connection is "username.projectid"
|
||||
// but the actual PostgreSQL user must be created with just the base name.
|
||||
// The pooler will strip the ".projectid" suffix when authenticating.
|
||||
connectionUsername := baseUsername
|
||||
if isSupabaseConnection(p.Host, p.Username) {
|
||||
if supabaseProjectID := extractSupabaseProjectID(p.Username); supabaseProjectID != "" {
|
||||
connectionUsername = fmt.Sprintf("%s.%s", baseUsername, supabaseProjectID)
|
||||
}
|
||||
}
|
||||
|
||||
newPassword := uuid.New().String()
|
||||
|
||||
tx, err := conn.Begin(ctx)
|
||||
@@ -305,9 +383,10 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
}()
|
||||
|
||||
// Step 1: Create PostgreSQL user with LOGIN privilege
|
||||
// Note: We use baseUsername for the actual PostgreSQL user name if Supabase is used
|
||||
_, err = tx.Exec(
|
||||
ctx,
|
||||
fmt.Sprintf(`CREATE USER "%s" WITH PASSWORD '%s' LOGIN`, username, newPassword),
|
||||
fmt.Sprintf(`CREATE USER "%s" WITH PASSWORD '%s' LOGIN`, baseUsername, newPassword),
|
||||
)
|
||||
if err != nil {
|
||||
if err.Error() != "" && attempt < maxRetries-1 {
|
||||
@@ -331,28 +410,28 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
}
|
||||
|
||||
// Now revoke from the specific user as well (belt and suspenders)
|
||||
_, err = tx.Exec(ctx, fmt.Sprintf(`REVOKE CREATE ON SCHEMA public FROM "%s"`, username))
|
||||
_, err = tx.Exec(ctx, fmt.Sprintf(`REVOKE CREATE ON SCHEMA public FROM "%s"`, baseUsername))
|
||||
if err != nil {
|
||||
logger.Error(
|
||||
"Failed to revoke CREATE on public schema from user",
|
||||
"error",
|
||||
err,
|
||||
"username",
|
||||
username,
|
||||
baseUsername,
|
||||
)
|
||||
}
|
||||
|
||||
// Step 2: Grant database connection privilege and revoke TEMP
|
||||
_, err = tx.Exec(
|
||||
ctx,
|
||||
fmt.Sprintf(`GRANT CONNECT ON DATABASE %s TO "%s"`, *p.Database, username),
|
||||
fmt.Sprintf(`GRANT CONNECT ON DATABASE "%s" TO "%s"`, *p.Database, baseUsername),
|
||||
)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to grant connect privilege: %w", err)
|
||||
}
|
||||
|
||||
// Revoke TEMP privilege from PUBLIC role (like CREATE on public schema, TEMP is granted to PUBLIC by default)
|
||||
_, err = tx.Exec(ctx, fmt.Sprintf(`REVOKE TEMP ON DATABASE %s FROM PUBLIC`, *p.Database))
|
||||
_, err = tx.Exec(ctx, fmt.Sprintf(`REVOKE TEMP ON DATABASE "%s" FROM PUBLIC`, *p.Database))
|
||||
if err != nil {
|
||||
logger.Warn("Failed to revoke TEMP from PUBLIC", "error", err)
|
||||
}
|
||||
@@ -360,10 +439,10 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
// Also revoke from the specific user (belt and suspenders)
|
||||
_, err = tx.Exec(
|
||||
ctx,
|
||||
fmt.Sprintf(`REVOKE TEMP ON DATABASE %s FROM "%s"`, *p.Database, username),
|
||||
fmt.Sprintf(`REVOKE TEMP ON DATABASE "%s" FROM "%s"`, *p.Database, baseUsername),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Warn("Failed to revoke TEMP privilege", "error", err, "username", username)
|
||||
logger.Warn("Failed to revoke TEMP privilege", "error", err, "username", baseUsername)
|
||||
}
|
||||
|
||||
// Step 3: Discover all user-created schemas
|
||||
@@ -396,7 +475,7 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
// Revoke CREATE specifically (handles inheritance from PUBLIC role)
|
||||
_, err = tx.Exec(
|
||||
ctx,
|
||||
fmt.Sprintf(`REVOKE CREATE ON SCHEMA "%s" FROM "%s"`, schema, username),
|
||||
fmt.Sprintf(`REVOKE CREATE ON SCHEMA "%s" FROM "%s"`, schema, baseUsername),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Warn(
|
||||
@@ -406,14 +485,14 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
"schema",
|
||||
schema,
|
||||
"username",
|
||||
username,
|
||||
baseUsername,
|
||||
)
|
||||
}
|
||||
|
||||
// Grant only USAGE (not CREATE)
|
||||
_, err = tx.Exec(
|
||||
ctx,
|
||||
fmt.Sprintf(`GRANT USAGE ON SCHEMA "%s" TO "%s"`, schema, username),
|
||||
fmt.Sprintf(`GRANT USAGE ON SCHEMA "%s" TO "%s"`, schema, baseUsername),
|
||||
)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to grant usage on schema %s: %w", schema, err)
|
||||
@@ -435,7 +514,7 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
EXECUTE format('GRANT SELECT ON ALL SEQUENCES IN SCHEMA %%I TO "%s"', schema_rec.schema_name);
|
||||
END LOOP;
|
||||
END $$;
|
||||
`, username, username)
|
||||
`, baseUsername, baseUsername)
|
||||
|
||||
_, err = tx.Exec(ctx, grantSelectSQL)
|
||||
if err != nil {
|
||||
@@ -457,7 +536,7 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %%I GRANT SELECT ON SEQUENCES TO "%s"', schema_rec.schema_name);
|
||||
END LOOP;
|
||||
END $$;
|
||||
`, username, username)
|
||||
`, baseUsername, baseUsername)
|
||||
|
||||
_, err = tx.Exec(ctx, defaultPrivilegesSQL)
|
||||
if err != nil {
|
||||
@@ -466,7 +545,7 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
|
||||
// Step 7: Verify user creation before committing
|
||||
var verifyUsername string
|
||||
err = tx.QueryRow(ctx, fmt.Sprintf(`SELECT rolname FROM pg_roles WHERE rolname = '%s'`, username)).
|
||||
err = tx.QueryRow(ctx, fmt.Sprintf(`SELECT rolname FROM pg_roles WHERE rolname = '%s'`, baseUsername)).
|
||||
Scan(&verifyUsername)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to verify user creation: %w", err)
|
||||
@@ -477,8 +556,15 @@ func (p *PostgresqlDatabase) CreateReadOnlyUser(
|
||||
}
|
||||
|
||||
success = true
|
||||
logger.Info("Read-only user created successfully", "username", username)
|
||||
return username, newPassword, nil
|
||||
// Return connectionUsername (with project ID suffix for Supabase) for the caller to use when connecting
|
||||
logger.Info(
|
||||
"Read-only user created successfully",
|
||||
"username",
|
||||
baseUsername,
|
||||
"connectionUsername",
|
||||
connectionUsername,
|
||||
)
|
||||
return connectionUsername, newPassword, nil
|
||||
}
|
||||
|
||||
return "", "", errors.New("failed to generate unique username after 3 attempts")
|
||||
@@ -521,10 +607,12 @@ func testSingleDatabaseConnection(
|
||||
}
|
||||
}()
|
||||
|
||||
// Check version after successful connection
|
||||
if err := verifyDatabaseVersion(ctx, conn, postgresDb.Version); err != nil {
|
||||
// Detect and set the database version automatically
|
||||
detectedVersion, err := detectDatabaseVersion(ctx, conn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
postgresDb.Version = detectedVersion
|
||||
|
||||
// Test if we can perform basic operations (like pg_dump would need)
|
||||
if err := testBasicOperations(ctx, conn, *postgresDb.Database); err != nil {
|
||||
@@ -538,35 +626,31 @@ func testSingleDatabaseConnection(
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyDatabaseVersion checks if the actual database version matches the specified version
|
||||
func verifyDatabaseVersion(
|
||||
ctx context.Context,
|
||||
conn *pgx.Conn,
|
||||
expectedVersion tools.PostgresqlVersion,
|
||||
) error {
|
||||
// detectDatabaseVersion queries and returns the PostgreSQL major version
|
||||
func detectDatabaseVersion(ctx context.Context, conn *pgx.Conn) (tools.PostgresqlVersion, error) {
|
||||
var versionStr string
|
||||
err := conn.QueryRow(ctx, "SELECT version()").Scan(&versionStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query database version: %w", err)
|
||||
return "", fmt.Errorf("failed to query database version: %w", err)
|
||||
}
|
||||
|
||||
// Parse version from string like "PostgreSQL 14.2 on x86_64-pc-linux-gnu..."
|
||||
re := regexp.MustCompile(`PostgreSQL (\d+)\.`)
|
||||
// or "PostgreSQL 16 maintained by Postgre BY..." (some builds omit minor version)
|
||||
re := regexp.MustCompile(`PostgreSQL (\d+)`)
|
||||
matches := re.FindStringSubmatch(versionStr)
|
||||
if len(matches) < 2 {
|
||||
return fmt.Errorf("could not parse version from: %s", versionStr)
|
||||
return "", fmt.Errorf("could not parse version from: %s", versionStr)
|
||||
}
|
||||
|
||||
actualVersion := tools.GetPostgresqlVersionEnum(matches[1])
|
||||
if actualVersion != expectedVersion {
|
||||
return fmt.Errorf(
|
||||
"you specified wrong version. Real version is %s, but you specified %s",
|
||||
actualVersion,
|
||||
expectedVersion,
|
||||
)
|
||||
}
|
||||
majorVersion := matches[1]
|
||||
|
||||
return nil
|
||||
// Map to known PostgresqlVersion enum values
|
||||
switch majorVersion {
|
||||
case "12", "13", "14", "15", "16", "17", "18":
|
||||
return tools.PostgresqlVersion(majorVersion), nil
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported PostgreSQL version: %s", majorVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// testBasicOperations tests basic operations that backup tools need
|
||||
@@ -594,7 +678,7 @@ func buildConnectionStringForDB(p *PostgresqlDatabase, dbName string, password s
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"host=%s port=%d user=%s password=%s dbname=%s sslmode=%s default_query_exec_mode=simple_protocol standard_conforming_strings=on",
|
||||
"host=%s port=%d user=%s password=%s dbname=%s sslmode=%s default_query_exec_mode=simple_protocol standard_conforming_strings=on client_encoding=UTF8",
|
||||
p.Host,
|
||||
p.Port,
|
||||
p.Username,
|
||||
@@ -614,3 +698,15 @@ func decryptPasswordIfNeeded(
|
||||
}
|
||||
return encryptor.Decrypt(databaseID, password)
|
||||
}
|
||||
|
||||
func isSupabaseConnection(host, username string) bool {
|
||||
return strings.Contains(strings.ToLower(host), "supabase") ||
|
||||
strings.Contains(strings.ToLower(username), "supabase")
|
||||
}
|
||||
|
||||
func extractSupabaseProjectID(username string) string {
|
||||
if idx := strings.Index(username, "."); idx != -1 {
|
||||
return username[idx+1:]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -246,6 +246,188 @@ func Test_ReadOnlyUser_MultipleSchemas_AllAccessible(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func Test_CreateReadOnlyUser_DatabaseNameWithDash_Success(t *testing.T) {
|
||||
env := config.GetEnv()
|
||||
container := connectToPostgresContainer(t, env.TestPostgres16Port)
|
||||
defer container.DB.Close()
|
||||
|
||||
dashDbName := "test-db-with-dash"
|
||||
|
||||
_, err := container.DB.Exec(fmt.Sprintf(`DROP DATABASE IF EXISTS "%s"`, dashDbName))
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = container.DB.Exec(fmt.Sprintf(`CREATE DATABASE "%s"`, dashDbName))
|
||||
assert.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
_, _ = container.DB.Exec(fmt.Sprintf(`DROP DATABASE IF EXISTS "%s"`, dashDbName))
|
||||
}()
|
||||
|
||||
dashDSN := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=disable",
|
||||
container.Host, container.Port, container.Username, container.Password, dashDbName)
|
||||
dashDB, err := sqlx.Connect("postgres", dashDSN)
|
||||
assert.NoError(t, err)
|
||||
defer dashDB.Close()
|
||||
|
||||
_, err = dashDB.Exec(`
|
||||
CREATE TABLE dash_test (
|
||||
id SERIAL PRIMARY KEY,
|
||||
data TEXT NOT NULL
|
||||
);
|
||||
INSERT INTO dash_test (data) VALUES ('test1'), ('test2');
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pgModel := &PostgresqlDatabase{
|
||||
Version: tools.GetPostgresqlVersionEnum("16"),
|
||||
Host: container.Host,
|
||||
Port: container.Port,
|
||||
Username: container.Username,
|
||||
Password: container.Password,
|
||||
Database: &dashDbName,
|
||||
IsHttps: false,
|
||||
}
|
||||
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
ctx := context.Background()
|
||||
|
||||
username, password, err := pgModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, username)
|
||||
assert.NotEmpty(t, password)
|
||||
assert.True(t, strings.HasPrefix(username, "postgresus-"))
|
||||
|
||||
readOnlyDSN := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=disable",
|
||||
container.Host, container.Port, username, password, dashDbName)
|
||||
readOnlyConn, err := sqlx.Connect("postgres", readOnlyDSN)
|
||||
assert.NoError(t, err)
|
||||
defer readOnlyConn.Close()
|
||||
|
||||
var count int
|
||||
err = readOnlyConn.Get(&count, "SELECT COUNT(*) FROM dash_test")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, count)
|
||||
|
||||
_, err = readOnlyConn.Exec("INSERT INTO dash_test (data) VALUES ('should-fail')")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "permission denied")
|
||||
|
||||
_, err = dashDB.Exec(fmt.Sprintf(`DROP OWNED BY "%s" CASCADE`, username))
|
||||
if err != nil {
|
||||
t.Logf("Warning: Failed to drop owned objects: %v", err)
|
||||
}
|
||||
|
||||
_, err = dashDB.Exec(fmt.Sprintf(`DROP USER IF EXISTS "%s"`, username))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func Test_CreateReadOnlyUser_Supabase_UserCanReadButNotWrite(t *testing.T) {
|
||||
env := config.GetEnv()
|
||||
|
||||
if env.TestSupabaseHost == "" {
|
||||
t.Skip("Skipping Supabase test: missing environment variables")
|
||||
}
|
||||
|
||||
portInt, err := strconv.Atoi(env.TestSupabasePort)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dsn := fmt.Sprintf(
|
||||
"host=%s port=%d user=%s password=%s dbname=%s sslmode=require",
|
||||
env.TestSupabaseHost,
|
||||
portInt,
|
||||
env.TestSupabaseUsername,
|
||||
env.TestSupabasePassword,
|
||||
env.TestSupabaseDatabase,
|
||||
)
|
||||
|
||||
adminDB, err := sqlx.Connect("postgres", dsn)
|
||||
assert.NoError(t, err)
|
||||
defer adminDB.Close()
|
||||
|
||||
tableName := fmt.Sprintf(
|
||||
"readonly_test_%s",
|
||||
strings.ReplaceAll(uuid.New().String()[:8], "-", ""),
|
||||
)
|
||||
_, err = adminDB.Exec(fmt.Sprintf(`
|
||||
DROP TABLE IF EXISTS public.%s CASCADE;
|
||||
CREATE TABLE public.%s (
|
||||
id SERIAL PRIMARY KEY,
|
||||
data TEXT NOT NULL
|
||||
);
|
||||
INSERT INTO public.%s (data) VALUES ('test1'), ('test2');
|
||||
`, tableName, tableName, tableName))
|
||||
assert.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
_, _ = adminDB.Exec(fmt.Sprintf(`DROP TABLE IF EXISTS public.%s CASCADE`, tableName))
|
||||
}()
|
||||
|
||||
pgModel := &PostgresqlDatabase{
|
||||
Host: env.TestSupabaseHost,
|
||||
Port: portInt,
|
||||
Username: env.TestSupabaseUsername,
|
||||
Password: env.TestSupabasePassword,
|
||||
Database: &env.TestSupabaseDatabase,
|
||||
IsHttps: true,
|
||||
}
|
||||
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
ctx := context.Background()
|
||||
|
||||
connectionUsername, newPassword, err := pgModel.CreateReadOnlyUser(ctx, logger, nil, uuid.New())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, connectionUsername)
|
||||
assert.NotEmpty(t, newPassword)
|
||||
assert.True(t, strings.HasPrefix(connectionUsername, "postgresus-"))
|
||||
|
||||
baseUsername := connectionUsername
|
||||
if idx := strings.Index(connectionUsername, "."); idx != -1 {
|
||||
baseUsername = connectionUsername[:idx]
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_, _ = adminDB.Exec(fmt.Sprintf(`DROP OWNED BY "%s" CASCADE`, baseUsername))
|
||||
_, _ = adminDB.Exec(fmt.Sprintf(`DROP USER IF EXISTS "%s"`, baseUsername))
|
||||
}()
|
||||
|
||||
readOnlyDSN := fmt.Sprintf(
|
||||
"host=%s port=%d user=%s password=%s dbname=%s sslmode=require",
|
||||
env.TestSupabaseHost,
|
||||
portInt,
|
||||
connectionUsername,
|
||||
newPassword,
|
||||
env.TestSupabaseDatabase,
|
||||
)
|
||||
readOnlyConn, err := sqlx.Connect("postgres", readOnlyDSN)
|
||||
assert.NoError(t, err)
|
||||
defer readOnlyConn.Close()
|
||||
|
||||
var count int
|
||||
err = readOnlyConn.Get(&count, fmt.Sprintf("SELECT COUNT(*) FROM public.%s", tableName))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, count)
|
||||
|
||||
_, err = readOnlyConn.Exec(
|
||||
fmt.Sprintf("INSERT INTO public.%s (data) VALUES ('should-fail')", tableName),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "permission denied")
|
||||
|
||||
_, err = readOnlyConn.Exec(
|
||||
fmt.Sprintf("UPDATE public.%s SET data = 'hacked' WHERE id = 1", tableName),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "permission denied")
|
||||
|
||||
_, err = readOnlyConn.Exec(fmt.Sprintf("DELETE FROM public.%s WHERE id = 1", tableName))
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "permission denied")
|
||||
|
||||
_, err = readOnlyConn.Exec("CREATE TABLE public.hack_table (id INT)")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "permission denied")
|
||||
}
|
||||
|
||||
type PostgresContainer struct {
|
||||
Host string
|
||||
Port int
|
||||
|
||||
@@ -75,6 +75,16 @@ func (d *Database) EncryptSensitiveFields(encryptor encryption.FieldEncryptor) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Database) PopulateVersionIfEmpty(
|
||||
logger *slog.Logger,
|
||||
encryptor encryption.FieldEncryptor,
|
||||
) error {
|
||||
if d.Postgresql != nil {
|
||||
return d.Postgresql.PopulateVersionIfEmpty(logger, encryptor, d.ID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Database) Update(incoming *Database) {
|
||||
d.Name = incoming.Name
|
||||
d.Type = incoming.Type
|
||||
|
||||
@@ -68,6 +68,10 @@ func (s *DatabaseService) CreateDatabase(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := database.PopulateVersionIfEmpty(s.logger, s.fieldEncryptor); err != nil {
|
||||
return nil, fmt.Errorf("failed to auto-detect database version: %w", err)
|
||||
}
|
||||
|
||||
if err := database.EncryptSensitiveFields(s.fieldEncryptor); err != nil {
|
||||
return nil, fmt.Errorf("failed to encrypt sensitive fields: %w", err)
|
||||
}
|
||||
@@ -125,6 +129,10 @@ func (s *DatabaseService) UpdateDatabase(
|
||||
return err
|
||||
}
|
||||
|
||||
if err := existingDatabase.PopulateVersionIfEmpty(s.logger, s.fieldEncryptor); err != nil {
|
||||
return fmt.Errorf("failed to auto-detect database version: %w", err)
|
||||
}
|
||||
|
||||
if err := existingDatabase.EncryptSensitiveFields(s.fieldEncryptor); err != nil {
|
||||
return fmt.Errorf("failed to encrypt sensitive fields: %w", err)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"postgresus-backend/internal/features/restores/usecases"
|
||||
"postgresus-backend/internal/features/storages"
|
||||
workspaces_services "postgresus-backend/internal/features/workspaces/services"
|
||||
"postgresus-backend/internal/util/encryption"
|
||||
"postgresus-backend/internal/util/logger"
|
||||
)
|
||||
|
||||
@@ -22,6 +23,7 @@ var restoreService = &RestoreService{
|
||||
logger.GetLogger(),
|
||||
workspaces_services.GetWorkspaceService(),
|
||||
audit_logs.GetAuditLogService(),
|
||||
encryption.GetFieldEncryptor(),
|
||||
}
|
||||
var restoreController = &RestoreController{
|
||||
restoreService,
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"postgresus-backend/internal/features/storages"
|
||||
users_models "postgresus-backend/internal/features/users/models"
|
||||
workspaces_services "postgresus-backend/internal/features/workspaces/services"
|
||||
"postgresus-backend/internal/util/encryption"
|
||||
"postgresus-backend/internal/util/tools"
|
||||
"time"
|
||||
|
||||
@@ -30,6 +31,7 @@ type RestoreService struct {
|
||||
logger *slog.Logger
|
||||
workspaceService *workspaces_services.WorkspaceService
|
||||
auditLogService *audit_logs.AuditLogService
|
||||
fieldEncryptor encryption.FieldEncryptor
|
||||
}
|
||||
|
||||
func (s *RestoreService) OnBeforeBackupRemove(backup *backups.Backup) error {
|
||||
@@ -120,12 +122,6 @@ func (s *RestoreService) RestoreBackupWithAuth(
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf(
|
||||
"restore from %s to %s\n",
|
||||
backupDatabase.Postgresql.Version,
|
||||
requestDTO.PostgresqlDatabase.Version,
|
||||
)
|
||||
|
||||
if tools.IsBackupDbVersionHigherThanRestoreDbVersion(
|
||||
backupDatabase.Postgresql.Version,
|
||||
requestDTO.PostgresqlDatabase.Version,
|
||||
@@ -214,6 +210,10 @@ func (s *RestoreService) RestoreBackup(
|
||||
Postgresql: requestDTO.PostgresqlDatabase,
|
||||
}
|
||||
|
||||
if err := restoringToDB.PopulateVersionIfEmpty(s.logger, s.fieldEncryptor); err != nil {
|
||||
return fmt.Errorf("failed to auto-detect database version: %w", err)
|
||||
}
|
||||
|
||||
err = s.restoreBackupUsecase.Execute(
|
||||
backupConfig,
|
||||
restore,
|
||||
|
||||
@@ -191,117 +191,34 @@ func (l *LocalStorage) EncryptSensitiveData(encryptor encryption.FieldEncryptor)
|
||||
func (l *LocalStorage) Update(incoming *LocalStorage) {
|
||||
}
|
||||
|
||||
type writeResult struct {
|
||||
bytesWritten int
|
||||
writeErr error
|
||||
}
|
||||
|
||||
type writeJob struct {
|
||||
data []byte
|
||||
n int
|
||||
}
|
||||
|
||||
func copyWithContext(ctx context.Context, dst io.Writer, src io.Reader) (int64, error) {
|
||||
bufA := make([]byte, localChunkSize)
|
||||
bufB := make([]byte, localChunkSize)
|
||||
|
||||
buf := make([]byte, localChunkSize)
|
||||
var written int64
|
||||
|
||||
writeCh := make(chan writeJob, 1)
|
||||
resultCh := make(chan writeResult, 1)
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(doneCh)
|
||||
for job := range writeCh {
|
||||
nw, err := dst.Write(job.data[:job.n])
|
||||
resultCh <- writeResult{nw, err}
|
||||
}
|
||||
}()
|
||||
|
||||
useBufferA := true
|
||||
pendingWrite := false
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
close(writeCh)
|
||||
<-doneCh
|
||||
return written, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
var currentBuf []byte
|
||||
if useBufferA {
|
||||
currentBuf = bufA
|
||||
} else {
|
||||
currentBuf = bufB
|
||||
}
|
||||
|
||||
nr, readErr := src.Read(currentBuf)
|
||||
|
||||
if nr == 0 && readErr == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
if readErr != nil && readErr != io.EOF {
|
||||
close(writeCh)
|
||||
<-doneCh
|
||||
return written, readErr
|
||||
}
|
||||
|
||||
if pendingWrite {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
close(writeCh)
|
||||
<-doneCh
|
||||
return written, ctx.Err()
|
||||
case result := <-resultCh:
|
||||
if result.writeErr != nil {
|
||||
close(writeCh)
|
||||
<-doneCh
|
||||
return written, result.writeErr
|
||||
}
|
||||
written += int64(result.bytesWritten)
|
||||
}
|
||||
}
|
||||
|
||||
nr, readErr := src.Read(buf)
|
||||
if nr > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
close(writeCh)
|
||||
<-doneCh
|
||||
return written, ctx.Err()
|
||||
case writeCh <- writeJob{currentBuf, nr}:
|
||||
pendingWrite = true
|
||||
nw, writeErr := dst.Write(buf[:nr])
|
||||
written += int64(nw)
|
||||
if writeErr != nil {
|
||||
return written, writeErr
|
||||
}
|
||||
if nr != nw {
|
||||
return written, io.ErrShortWrite
|
||||
}
|
||||
|
||||
useBufferA = !useBufferA
|
||||
}
|
||||
|
||||
if readErr == io.EOF {
|
||||
break
|
||||
return written, nil
|
||||
}
|
||||
if readErr != nil {
|
||||
return written, readErr
|
||||
}
|
||||
}
|
||||
|
||||
if pendingWrite {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
close(writeCh)
|
||||
<-doneCh
|
||||
return written, ctx.Err()
|
||||
case result := <-resultCh:
|
||||
if result.writeErr != nil {
|
||||
close(writeCh)
|
||||
<-doneCh
|
||||
return written, result.writeErr
|
||||
}
|
||||
written += int64(result.bytesWritten)
|
||||
}
|
||||
}
|
||||
|
||||
close(writeCh)
|
||||
<-doneCh
|
||||
|
||||
return written, nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package s3_storage
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -40,6 +41,7 @@ type S3Storage struct {
|
||||
|
||||
S3Prefix string `json:"s3Prefix" gorm:"type:text;column:s3_prefix"`
|
||||
S3UseVirtualHostedStyle bool `json:"s3UseVirtualHostedStyle" gorm:"default:false;column:s3_use_virtual_hosted_style"`
|
||||
SkipTLSVerify bool `json:"skipTLSVerify" gorm:"default:false;column:skip_tls_verify"`
|
||||
}
|
||||
|
||||
func (s *S3Storage) TableName() string {
|
||||
@@ -331,6 +333,7 @@ func (s *S3Storage) Update(incoming *S3Storage) {
|
||||
s.S3Region = incoming.S3Region
|
||||
s.S3Endpoint = incoming.S3Endpoint
|
||||
s.S3UseVirtualHostedStyle = incoming.S3UseVirtualHostedStyle
|
||||
s.SkipTLSVerify = incoming.SkipTLSVerify
|
||||
|
||||
if incoming.S3AccessKey != "" {
|
||||
s.S3AccessKey = incoming.S3AccessKey
|
||||
@@ -442,6 +445,9 @@ func (s *S3Storage) getClientParams(
|
||||
TLSHandshakeTimeout: s3TLSHandshakeTimeout,
|
||||
ResponseHeaderTimeout: s3ResponseTimeout,
|
||||
IdleConnTimeout: s3IdleConnTimeout,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: s.SkipTLSVerify,
|
||||
},
|
||||
}
|
||||
|
||||
return endpoint, useSSL, accessKey, secretKey, bucketLookup, transport, nil
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
workspaces_controllers "postgresus-backend/internal/features/workspaces/controllers"
|
||||
workspaces_testing "postgresus-backend/internal/features/workspaces/testing"
|
||||
test_utils "postgresus-backend/internal/util/testing"
|
||||
"postgresus-backend/internal/util/tools"
|
||||
)
|
||||
|
||||
const createAndFillTableQuery = `
|
||||
@@ -114,6 +113,382 @@ func Test_BackupAndRestorePostgresqlWithEncryption_RestoreIsSuccessful(t *testin
|
||||
}
|
||||
}
|
||||
|
||||
func Test_BackupAndRestoreSupabase_PublicSchemaOnly_RestoreIsSuccessful(t *testing.T) {
|
||||
env := config.GetEnv()
|
||||
|
||||
if env.TestSupabaseHost == "" {
|
||||
t.Skip("Skipping Supabase test: missing environment variables")
|
||||
}
|
||||
|
||||
portInt, err := strconv.Atoi(env.TestSupabasePort)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dsn := fmt.Sprintf(
|
||||
"host=%s port=%d user=%s password=%s dbname=%s sslmode=require",
|
||||
env.TestSupabaseHost,
|
||||
portInt,
|
||||
env.TestSupabaseUsername,
|
||||
env.TestSupabasePassword,
|
||||
env.TestSupabaseDatabase,
|
||||
)
|
||||
|
||||
supabaseDB, err := sqlx.Connect("postgres", dsn)
|
||||
assert.NoError(t, err)
|
||||
defer supabaseDB.Close()
|
||||
|
||||
tableName := fmt.Sprintf("backup_test_%s", uuid.New().String()[:8])
|
||||
createTableQuery := fmt.Sprintf(`
|
||||
DROP TABLE IF EXISTS public.%s;
|
||||
CREATE TABLE public.%s (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
value INTEGER NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
INSERT INTO public.%s (name, value) VALUES
|
||||
('test1', 100),
|
||||
('test2', 200),
|
||||
('test3', 300);
|
||||
`, tableName, tableName, tableName)
|
||||
|
||||
_, err = supabaseDB.Exec(createTableQuery)
|
||||
assert.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
_, _ = supabaseDB.Exec(fmt.Sprintf(`DROP TABLE IF EXISTS public.%s`, tableName))
|
||||
}()
|
||||
|
||||
router := createTestRouter()
|
||||
user := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Supabase Test Workspace", user, router)
|
||||
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
|
||||
database := createSupabaseDatabaseViaAPI(
|
||||
t, router, "Supabase Test Database", workspace.ID,
|
||||
env.TestSupabaseHost, portInt,
|
||||
env.TestSupabaseUsername, env.TestSupabasePassword, env.TestSupabaseDatabase,
|
||||
[]string{"public"},
|
||||
user.Token,
|
||||
)
|
||||
|
||||
enableBackupsViaAPI(
|
||||
t, router, database.ID, storage.ID,
|
||||
backups_config.BackupEncryptionNone, user.Token,
|
||||
)
|
||||
|
||||
createBackupViaAPI(t, router, database.ID, user.Token)
|
||||
|
||||
backup := waitForBackupCompletion(t, router, database.ID, user.Token, 5*time.Minute)
|
||||
assert.Equal(t, backups.BackupStatusCompleted, backup.Status)
|
||||
|
||||
_, err = supabaseDB.Exec(fmt.Sprintf(`DELETE FROM public.%s`, tableName))
|
||||
assert.NoError(t, err)
|
||||
|
||||
var countAfterDelete int
|
||||
err = supabaseDB.Get(
|
||||
&countAfterDelete,
|
||||
fmt.Sprintf(`SELECT COUNT(*) FROM public.%s`, tableName),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, countAfterDelete, "Table should be empty after delete")
|
||||
|
||||
createSupabaseRestoreViaAPI(
|
||||
t, router, backup.ID,
|
||||
env.TestSupabaseHost, portInt,
|
||||
env.TestSupabaseUsername, env.TestSupabasePassword, env.TestSupabaseDatabase,
|
||||
user.Token,
|
||||
)
|
||||
|
||||
restore := waitForRestoreCompletion(t, router, backup.ID, user.Token, 5*time.Minute)
|
||||
assert.Equal(t, restores_enums.RestoreStatusCompleted, restore.Status)
|
||||
|
||||
var countAfterRestore int
|
||||
err = supabaseDB.Get(
|
||||
&countAfterRestore,
|
||||
fmt.Sprintf(`SELECT COUNT(*) FROM public.%s`, tableName),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, countAfterRestore, "Table should have 3 rows after restore")
|
||||
|
||||
var restoredData []TestDataItem
|
||||
err = supabaseDB.Select(
|
||||
&restoredData,
|
||||
fmt.Sprintf(`SELECT id, name, value, created_at FROM public.%s ORDER BY id`, tableName),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, restoredData, 3)
|
||||
assert.Equal(t, "test1", restoredData[0].Name)
|
||||
assert.Equal(t, 100, restoredData[0].Value)
|
||||
assert.Equal(t, "test2", restoredData[1].Name)
|
||||
assert.Equal(t, 200, restoredData[1].Value)
|
||||
assert.Equal(t, "test3", restoredData[2].Name)
|
||||
assert.Equal(t, 300, restoredData[2].Value)
|
||||
|
||||
err = os.Remove(filepath.Join(config.GetEnv().DataFolder, backup.ID.String()))
|
||||
if err != nil {
|
||||
t.Logf("Warning: Failed to delete backup file: %v", err)
|
||||
}
|
||||
|
||||
test_utils.MakeDeleteRequest(
|
||||
t,
|
||||
router,
|
||||
"/api/v1/databases/"+database.ID.String(),
|
||||
"Bearer "+user.Token,
|
||||
http.StatusNoContent,
|
||||
)
|
||||
storages.RemoveTestStorage(storage.ID)
|
||||
workspaces_testing.RemoveTestWorkspace(workspace, router)
|
||||
}
|
||||
|
||||
func Test_BackupPostgresql_SchemaSelection_AllSchemasWhenNoneSpecified(t *testing.T) {
|
||||
env := config.GetEnv()
|
||||
|
||||
container, err := connectToPostgresContainer("16", env.TestPostgres16Port)
|
||||
assert.NoError(t, err)
|
||||
defer container.DB.Close()
|
||||
|
||||
_, err = container.DB.Exec(`
|
||||
DROP SCHEMA IF EXISTS schema_a CASCADE;
|
||||
DROP SCHEMA IF EXISTS schema_b CASCADE;
|
||||
CREATE SCHEMA schema_a;
|
||||
CREATE SCHEMA schema_b;
|
||||
|
||||
CREATE TABLE public.public_table (id SERIAL PRIMARY KEY, data TEXT);
|
||||
CREATE TABLE schema_a.table_a (id SERIAL PRIMARY KEY, data TEXT);
|
||||
CREATE TABLE schema_b.table_b (id SERIAL PRIMARY KEY, data TEXT);
|
||||
|
||||
INSERT INTO public.public_table (data) VALUES ('public_data');
|
||||
INSERT INTO schema_a.table_a (data) VALUES ('schema_a_data');
|
||||
INSERT INTO schema_b.table_b (data) VALUES ('schema_b_data');
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
_, _ = container.DB.Exec(`
|
||||
DROP TABLE IF EXISTS public.public_table;
|
||||
DROP SCHEMA IF EXISTS schema_a CASCADE;
|
||||
DROP SCHEMA IF EXISTS schema_b CASCADE;
|
||||
`)
|
||||
}()
|
||||
|
||||
router := createTestRouter()
|
||||
user := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Schema Test Workspace", user, router)
|
||||
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
|
||||
database := createDatabaseWithSchemasViaAPI(
|
||||
t, router, "All Schemas Database", workspace.ID,
|
||||
container.Host, container.Port,
|
||||
container.Username, container.Password, container.Database,
|
||||
nil,
|
||||
user.Token,
|
||||
)
|
||||
|
||||
enableBackupsViaAPI(
|
||||
t, router, database.ID, storage.ID,
|
||||
backups_config.BackupEncryptionNone, user.Token,
|
||||
)
|
||||
|
||||
createBackupViaAPI(t, router, database.ID, user.Token)
|
||||
|
||||
backup := waitForBackupCompletion(t, router, database.ID, user.Token, 5*time.Minute)
|
||||
assert.Equal(t, backups.BackupStatusCompleted, backup.Status)
|
||||
|
||||
newDBName := "restored_all_schemas"
|
||||
_, err = container.DB.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s;", newDBName))
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = container.DB.Exec(fmt.Sprintf("CREATE DATABASE %s;", newDBName))
|
||||
assert.NoError(t, err)
|
||||
|
||||
newDSN := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=disable",
|
||||
container.Host, container.Port, container.Username, container.Password, newDBName)
|
||||
newDB, err := sqlx.Connect("postgres", newDSN)
|
||||
assert.NoError(t, err)
|
||||
defer newDB.Close()
|
||||
|
||||
createRestoreViaAPI(
|
||||
t, router, backup.ID,
|
||||
container.Host, container.Port,
|
||||
container.Username, container.Password, newDBName,
|
||||
user.Token,
|
||||
)
|
||||
|
||||
restore := waitForRestoreCompletion(t, router, backup.ID, user.Token, 5*time.Minute)
|
||||
assert.Equal(t, restores_enums.RestoreStatusCompleted, restore.Status)
|
||||
|
||||
var publicTableExists bool
|
||||
err = newDB.Get(&publicTableExists, `
|
||||
SELECT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_schema = 'public' AND table_name = 'public_table'
|
||||
)
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, publicTableExists, "public.public_table should exist in restored database")
|
||||
|
||||
var schemaATableExists bool
|
||||
err = newDB.Get(&schemaATableExists, `
|
||||
SELECT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_schema = 'schema_a' AND table_name = 'table_a'
|
||||
)
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, schemaATableExists, "schema_a.table_a should exist in restored database")
|
||||
|
||||
var schemaBTableExists bool
|
||||
err = newDB.Get(&schemaBTableExists, `
|
||||
SELECT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_schema = 'schema_b' AND table_name = 'table_b'
|
||||
)
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, schemaBTableExists, "schema_b.table_b should exist in restored database")
|
||||
|
||||
err = os.Remove(filepath.Join(config.GetEnv().DataFolder, backup.ID.String()))
|
||||
if err != nil {
|
||||
t.Logf("Warning: Failed to delete backup file: %v", err)
|
||||
}
|
||||
|
||||
test_utils.MakeDeleteRequest(
|
||||
t,
|
||||
router,
|
||||
"/api/v1/databases/"+database.ID.String(),
|
||||
"Bearer "+user.Token,
|
||||
http.StatusNoContent,
|
||||
)
|
||||
storages.RemoveTestStorage(storage.ID)
|
||||
workspaces_testing.RemoveTestWorkspace(workspace, router)
|
||||
}
|
||||
|
||||
func Test_BackupPostgresql_SchemaSelection_OnlySpecifiedSchemas(t *testing.T) {
|
||||
env := config.GetEnv()
|
||||
|
||||
container, err := connectToPostgresContainer("16", env.TestPostgres16Port)
|
||||
assert.NoError(t, err)
|
||||
defer container.DB.Close()
|
||||
|
||||
_, err = container.DB.Exec(`
|
||||
DROP SCHEMA IF EXISTS schema_a CASCADE;
|
||||
DROP SCHEMA IF EXISTS schema_b CASCADE;
|
||||
CREATE SCHEMA schema_a;
|
||||
CREATE SCHEMA schema_b;
|
||||
|
||||
CREATE TABLE public.public_table (id SERIAL PRIMARY KEY, data TEXT);
|
||||
CREATE TABLE schema_a.table_a (id SERIAL PRIMARY KEY, data TEXT);
|
||||
CREATE TABLE schema_b.table_b (id SERIAL PRIMARY KEY, data TEXT);
|
||||
|
||||
INSERT INTO public.public_table (data) VALUES ('public_data');
|
||||
INSERT INTO schema_a.table_a (data) VALUES ('schema_a_data');
|
||||
INSERT INTO schema_b.table_b (data) VALUES ('schema_b_data');
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
_, _ = container.DB.Exec(`
|
||||
DROP TABLE IF EXISTS public.public_table;
|
||||
DROP SCHEMA IF EXISTS schema_a CASCADE;
|
||||
DROP SCHEMA IF EXISTS schema_b CASCADE;
|
||||
`)
|
||||
}()
|
||||
|
||||
router := createTestRouter()
|
||||
user := users_testing.CreateTestUser(users_enums.UserRoleMember)
|
||||
workspace := workspaces_testing.CreateTestWorkspace("Schema Test Workspace", user, router)
|
||||
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
|
||||
database := createDatabaseWithSchemasViaAPI(
|
||||
t, router, "Specific Schemas Database", workspace.ID,
|
||||
container.Host, container.Port,
|
||||
container.Username, container.Password, container.Database,
|
||||
[]string{"public", "schema_a"},
|
||||
user.Token,
|
||||
)
|
||||
|
||||
enableBackupsViaAPI(
|
||||
t, router, database.ID, storage.ID,
|
||||
backups_config.BackupEncryptionNone, user.Token,
|
||||
)
|
||||
|
||||
createBackupViaAPI(t, router, database.ID, user.Token)
|
||||
|
||||
backup := waitForBackupCompletion(t, router, database.ID, user.Token, 5*time.Minute)
|
||||
assert.Equal(t, backups.BackupStatusCompleted, backup.Status)
|
||||
|
||||
newDBName := "restored_specific_schemas"
|
||||
_, err = container.DB.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s;", newDBName))
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = container.DB.Exec(fmt.Sprintf("CREATE DATABASE %s;", newDBName))
|
||||
assert.NoError(t, err)
|
||||
|
||||
newDSN := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=disable",
|
||||
container.Host, container.Port, container.Username, container.Password, newDBName)
|
||||
newDB, err := sqlx.Connect("postgres", newDSN)
|
||||
assert.NoError(t, err)
|
||||
defer newDB.Close()
|
||||
|
||||
createRestoreViaAPI(
|
||||
t, router, backup.ID,
|
||||
container.Host, container.Port,
|
||||
container.Username, container.Password, newDBName,
|
||||
user.Token,
|
||||
)
|
||||
|
||||
restore := waitForRestoreCompletion(t, router, backup.ID, user.Token, 5*time.Minute)
|
||||
assert.Equal(t, restores_enums.RestoreStatusCompleted, restore.Status)
|
||||
|
||||
var publicTableExists bool
|
||||
err = newDB.Get(&publicTableExists, `
|
||||
SELECT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_schema = 'public' AND table_name = 'public_table'
|
||||
)
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, publicTableExists, "public.public_table should exist (was included)")
|
||||
|
||||
var schemaATableExists bool
|
||||
err = newDB.Get(&schemaATableExists, `
|
||||
SELECT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_schema = 'schema_a' AND table_name = 'table_a'
|
||||
)
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, schemaATableExists, "schema_a.table_a should exist (was included)")
|
||||
|
||||
var schemaBTableExists bool
|
||||
err = newDB.Get(&schemaBTableExists, `
|
||||
SELECT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_schema = 'schema_b' AND table_name = 'table_b'
|
||||
)
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, schemaBTableExists, "schema_b.table_b should NOT exist (was excluded)")
|
||||
|
||||
err = os.Remove(filepath.Join(config.GetEnv().DataFolder, backup.ID.String()))
|
||||
if err != nil {
|
||||
t.Logf("Warning: Failed to delete backup file: %v", err)
|
||||
}
|
||||
|
||||
test_utils.MakeDeleteRequest(
|
||||
t,
|
||||
router,
|
||||
"/api/v1/databases/"+database.ID.String(),
|
||||
"Bearer "+user.Token,
|
||||
http.StatusNoContent,
|
||||
)
|
||||
storages.RemoveTestStorage(storage.ID)
|
||||
workspaces_testing.RemoveTestWorkspace(workspace, router)
|
||||
}
|
||||
|
||||
func testBackupRestoreForVersion(t *testing.T, pgVersion string, port string) {
|
||||
container, err := connectToPostgresContainer(pgVersion, port)
|
||||
assert.NoError(t, err)
|
||||
@@ -132,10 +507,9 @@ func testBackupRestoreForVersion(t *testing.T, pgVersion string, port string) {
|
||||
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
|
||||
pgVersionEnum := tools.GetPostgresqlVersionEnum(pgVersion)
|
||||
database := createDatabaseViaAPI(
|
||||
t, router, "Test Database", workspace.ID,
|
||||
pgVersionEnum, container.Host, container.Port,
|
||||
container.Host, container.Port,
|
||||
container.Username, container.Password, container.Database,
|
||||
user.Token,
|
||||
)
|
||||
@@ -164,7 +538,7 @@ func testBackupRestoreForVersion(t *testing.T, pgVersion string, port string) {
|
||||
defer newDB.Close()
|
||||
|
||||
createRestoreViaAPI(
|
||||
t, router, backup.ID, pgVersionEnum,
|
||||
t, router, backup.ID,
|
||||
container.Host, container.Port,
|
||||
container.Username, container.Password, newDBName,
|
||||
user.Token,
|
||||
@@ -217,10 +591,9 @@ func testBackupRestoreWithEncryptionForVersion(t *testing.T, pgVersion string, p
|
||||
|
||||
storage := storages.CreateTestStorage(workspace.ID)
|
||||
|
||||
pgVersionEnum := tools.GetPostgresqlVersionEnum(pgVersion)
|
||||
database := createDatabaseViaAPI(
|
||||
t, router, "Test Database", workspace.ID,
|
||||
pgVersionEnum, container.Host, container.Port,
|
||||
container.Host, container.Port,
|
||||
container.Username, container.Password, container.Database,
|
||||
user.Token,
|
||||
)
|
||||
@@ -250,7 +623,7 @@ func testBackupRestoreWithEncryptionForVersion(t *testing.T, pgVersion string, p
|
||||
defer newDB.Close()
|
||||
|
||||
createRestoreViaAPI(
|
||||
t, router, backup.ID, pgVersionEnum,
|
||||
t, router, backup.ID,
|
||||
container.Host, container.Port,
|
||||
container.Username, container.Password, newDBName,
|
||||
user.Token,
|
||||
@@ -379,7 +752,6 @@ func createDatabaseViaAPI(
|
||||
router *gin.Engine,
|
||||
name string,
|
||||
workspaceID uuid.UUID,
|
||||
pgVersion tools.PostgresqlVersion,
|
||||
host string,
|
||||
port int,
|
||||
username string,
|
||||
@@ -392,7 +764,6 @@ func createDatabaseViaAPI(
|
||||
WorkspaceID: &workspaceID,
|
||||
Type: databases.DatabaseTypePostgres,
|
||||
Postgresql: &pgtypes.PostgresqlDatabase{
|
||||
Version: pgVersion,
|
||||
Host: host,
|
||||
Port: port,
|
||||
Username: username,
|
||||
@@ -475,7 +846,6 @@ func createRestoreViaAPI(
|
||||
t *testing.T,
|
||||
router *gin.Engine,
|
||||
backupID uuid.UUID,
|
||||
pgVersion tools.PostgresqlVersion,
|
||||
host string,
|
||||
port int,
|
||||
username string,
|
||||
@@ -485,7 +855,6 @@ func createRestoreViaAPI(
|
||||
) {
|
||||
request := restores.RestoreBackupRequest{
|
||||
PostgresqlDatabase: &pgtypes.PostgresqlDatabase{
|
||||
Version: pgVersion,
|
||||
Host: host,
|
||||
Port: port,
|
||||
Username: username,
|
||||
@@ -504,6 +873,141 @@ func createRestoreViaAPI(
|
||||
)
|
||||
}
|
||||
|
||||
func createDatabaseWithSchemasViaAPI(
|
||||
t *testing.T,
|
||||
router *gin.Engine,
|
||||
name string,
|
||||
workspaceID uuid.UUID,
|
||||
host string,
|
||||
port int,
|
||||
username string,
|
||||
password string,
|
||||
database string,
|
||||
includeSchemas []string,
|
||||
token string,
|
||||
) *databases.Database {
|
||||
request := databases.Database{
|
||||
Name: name,
|
||||
WorkspaceID: &workspaceID,
|
||||
Type: databases.DatabaseTypePostgres,
|
||||
Postgresql: &pgtypes.PostgresqlDatabase{
|
||||
Host: host,
|
||||
Port: port,
|
||||
Username: username,
|
||||
Password: password,
|
||||
Database: &database,
|
||||
IncludeSchemas: includeSchemas,
|
||||
},
|
||||
}
|
||||
|
||||
w := workspaces_testing.MakeAPIRequest(
|
||||
router,
|
||||
"POST",
|
||||
"/api/v1/databases/create",
|
||||
"Bearer "+token,
|
||||
request,
|
||||
)
|
||||
|
||||
if w.Code != http.StatusCreated {
|
||||
t.Fatalf(
|
||||
"Failed to create database with schemas. Status: %d, Body: %s",
|
||||
w.Code,
|
||||
w.Body.String(),
|
||||
)
|
||||
}
|
||||
|
||||
var createdDatabase databases.Database
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &createdDatabase); err != nil {
|
||||
t.Fatalf("Failed to unmarshal database response: %v", err)
|
||||
}
|
||||
|
||||
return &createdDatabase
|
||||
}
|
||||
|
||||
func createSupabaseDatabaseViaAPI(
|
||||
t *testing.T,
|
||||
router *gin.Engine,
|
||||
name string,
|
||||
workspaceID uuid.UUID,
|
||||
host string,
|
||||
port int,
|
||||
username string,
|
||||
password string,
|
||||
database string,
|
||||
includeSchemas []string,
|
||||
token string,
|
||||
) *databases.Database {
|
||||
request := databases.Database{
|
||||
Name: name,
|
||||
WorkspaceID: &workspaceID,
|
||||
Type: databases.DatabaseTypePostgres,
|
||||
Postgresql: &pgtypes.PostgresqlDatabase{
|
||||
Host: host,
|
||||
Port: port,
|
||||
Username: username,
|
||||
Password: password,
|
||||
Database: &database,
|
||||
IsHttps: true,
|
||||
IncludeSchemas: includeSchemas,
|
||||
},
|
||||
}
|
||||
|
||||
w := workspaces_testing.MakeAPIRequest(
|
||||
router,
|
||||
"POST",
|
||||
"/api/v1/databases/create",
|
||||
"Bearer "+token,
|
||||
request,
|
||||
)
|
||||
|
||||
if w.Code != http.StatusCreated {
|
||||
t.Fatalf(
|
||||
"Failed to create Supabase database. Status: %d, Body: %s",
|
||||
w.Code,
|
||||
w.Body.String(),
|
||||
)
|
||||
}
|
||||
|
||||
var createdDatabase databases.Database
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &createdDatabase); err != nil {
|
||||
t.Fatalf("Failed to unmarshal database response: %v", err)
|
||||
}
|
||||
|
||||
return &createdDatabase
|
||||
}
|
||||
|
||||
func createSupabaseRestoreViaAPI(
|
||||
t *testing.T,
|
||||
router *gin.Engine,
|
||||
backupID uuid.UUID,
|
||||
host string,
|
||||
port int,
|
||||
username string,
|
||||
password string,
|
||||
database string,
|
||||
token string,
|
||||
) {
|
||||
request := restores.RestoreBackupRequest{
|
||||
PostgresqlDatabase: &pgtypes.PostgresqlDatabase{
|
||||
Host: host,
|
||||
Port: port,
|
||||
Username: username,
|
||||
Password: password,
|
||||
Database: &database,
|
||||
IsHttps: true,
|
||||
},
|
||||
}
|
||||
|
||||
test_utils.MakePostRequest(
|
||||
t,
|
||||
router,
|
||||
fmt.Sprintf("/api/v1/restores/%s/restore", backupID.String()),
|
||||
"Bearer "+token,
|
||||
request,
|
||||
http.StatusOK,
|
||||
)
|
||||
}
|
||||
|
||||
func verifyDataIntegrity(t *testing.T, originalDB *sqlx.DB, restoredDB *sqlx.DB) {
|
||||
var originalData []TestDataItem
|
||||
var restoredData []TestDataItem
|
||||
@@ -550,7 +1054,6 @@ func connectToPostgresContainer(version string, port string) (*PostgresContainer
|
||||
Username: username,
|
||||
Password: password,
|
||||
Database: dbName,
|
||||
Version: version,
|
||||
DB: db,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE postgresql_databases
|
||||
ADD COLUMN include_schemas TEXT NOT NULL DEFAULT '';
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE postgresql_databases
|
||||
DROP COLUMN include_schemas;
|
||||
-- +goose StatementEnd
|
||||
@@ -0,0 +1,11 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE s3_storages
|
||||
ADD COLUMN skip_tls_verify BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE s3_storages
|
||||
DROP COLUMN skip_tls_verify;
|
||||
-- +goose StatementEnd
|
||||
@@ -32,6 +32,29 @@ Then open `http://localhost:4005` in your browser.
|
||||
| `image.pullPolicy` | Image pull policy | `Always` |
|
||||
| `replicaCount` | Number of replicas | `1` |
|
||||
|
||||
### Custom Root CA
|
||||
|
||||
| Parameter | Description | Default Value |
|
||||
| -------------- | ---------------------------------------- | ------------- |
|
||||
| `customRootCA` | Name of Secret containing CA certificate | `""` |
|
||||
|
||||
To trust a custom CA certificate (e.g., for internal services with self-signed certificates):
|
||||
|
||||
1. Create a Secret with your CA certificate:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic my-root-ca \
|
||||
--from-file=ca.crt=./path/to/ca-certificate.crt
|
||||
```
|
||||
|
||||
2. Reference it in values:
|
||||
|
||||
```yaml
|
||||
customRootCA: my-root-ca
|
||||
```
|
||||
|
||||
The certificate will be mounted to `/etc/ssl/certs/custom-root-ca.crt` and the `SSL_CERT_FILE` environment variable will be set automatically.
|
||||
|
||||
### Service
|
||||
|
||||
| Parameter | Description | Default Value |
|
||||
|
||||
@@ -39,6 +39,11 @@ spec:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.customRootCA }}
|
||||
env:
|
||||
- name: SSL_CERT_FILE
|
||||
value: /etc/ssl/certs/custom-root-ca.crt
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.targetPort }}
|
||||
@@ -46,6 +51,12 @@ spec:
|
||||
volumeMounts:
|
||||
- name: postgresus-storage
|
||||
mountPath: {{ .Values.persistence.mountPath }}
|
||||
{{- if .Values.customRootCA }}
|
||||
- name: custom-root-ca
|
||||
mountPath: /etc/ssl/certs/custom-root-ca.crt
|
||||
subPath: ca.crt
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- if .Values.livenessProbe.enabled }}
|
||||
@@ -66,6 +77,12 @@ spec:
|
||||
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
|
||||
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
{{- if .Values.customRootCA }}
|
||||
volumes:
|
||||
- name: custom-root-ca
|
||||
secret:
|
||||
secretName: {{ .Values.customRootCA }}
|
||||
{{- end }}
|
||||
{{- if .Values.persistence.enabled }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
|
||||
@@ -9,6 +9,9 @@ image:
|
||||
# StatefulSet configuration
|
||||
replicaCount: 1
|
||||
|
||||
# RootCA setup, need name of secret in same namespace
|
||||
customRootCA: ""
|
||||
|
||||
# Service configuration
|
||||
service:
|
||||
type: ClusterIP
|
||||
|
||||
1092
frontend/package-lock.json
generated
1092
frontend/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -8,7 +8,9 @@
|
||||
"build": "tsc -b && vite build",
|
||||
"lint": "eslint .",
|
||||
"format": "prettier --write \"**/*.{ts,tsx,js,jsx,json,css,md}\"",
|
||||
"preview": "vite preview"
|
||||
"preview": "vite preview",
|
||||
"test": "vitest run",
|
||||
"test:watch": "vitest"
|
||||
},
|
||||
"dependencies": {
|
||||
"@tailwindcss/vite": "^4.1.7",
|
||||
@@ -22,6 +24,7 @@
|
||||
"tailwindcss": "^4.1.7"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@vitest/coverage-v8": "^3.2.4",
|
||||
"@eslint/js": "^9.25.0",
|
||||
"@trivago/prettier-plugin-sort-imports": "^5.2.2",
|
||||
"@types/react": "^19.1.2",
|
||||
@@ -36,6 +39,7 @@
|
||||
"prettier-plugin-tailwindcss": "^0.6.11",
|
||||
"typescript": "~5.8.3",
|
||||
"typescript-eslint": "^8.30.1",
|
||||
"vite": "^6.3.5"
|
||||
"vite": "^6.3.5",
|
||||
"vitest": "^3.2.4"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,528 @@
|
||||
import { describe, expect, it } from 'vitest';
|
||||
|
||||
import {
|
||||
ConnectionStringParser,
|
||||
type ParseError,
|
||||
type ParseResult,
|
||||
} from './ConnectionStringParser';
|
||||
|
||||
describe('ConnectionStringParser', () => {
|
||||
// Helper to assert successful parse
|
||||
const expectSuccess = (result: ParseResult | ParseError): ParseResult => {
|
||||
expect('error' in result).toBe(false);
|
||||
return result as ParseResult;
|
||||
};
|
||||
|
||||
// Helper to assert parse error
|
||||
const expectError = (result: ParseResult | ParseError): ParseError => {
|
||||
expect('error' in result).toBe(true);
|
||||
return result as ParseError;
|
||||
};
|
||||
|
||||
describe('Standard PostgreSQL URI (postgresql://)', () => {
|
||||
it('should parse basic postgresql:// connection string', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse('postgresql://myuser:mypassword@localhost:5432/mydb'),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('localhost');
|
||||
expect(result.port).toBe(5432);
|
||||
expect(result.username).toBe('myuser');
|
||||
expect(result.password).toBe('mypassword');
|
||||
expect(result.database).toBe('mydb');
|
||||
expect(result.isHttps).toBe(false);
|
||||
});
|
||||
|
||||
it('should default port to 5432 when not specified', () => {
|
||||
const result = expectSuccess(ConnectionStringParser.parse('postgresql://user:pass@host/db'));
|
||||
|
||||
expect(result.port).toBe(5432);
|
||||
});
|
||||
|
||||
it('should handle URL-encoded passwords', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse('postgresql://user:p%40ss%23word@host:5432/db'),
|
||||
);
|
||||
|
||||
expect(result.password).toBe('p@ss#word');
|
||||
});
|
||||
|
||||
it('should handle URL-encoded usernames', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse('postgresql://user%40domain:password@host:5432/db'),
|
||||
);
|
||||
|
||||
expect(result.username).toBe('user@domain');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Postgres URI (postgres://)', () => {
|
||||
it('should parse basic postgres:// connection string', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse('postgres://admin:secret@db.example.com:5432/production'),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('db.example.com');
|
||||
expect(result.port).toBe(5432);
|
||||
expect(result.username).toBe('admin');
|
||||
expect(result.password).toBe('secret');
|
||||
expect(result.database).toBe('production');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Supabase Direct Connection', () => {
|
||||
it('should parse Supabase direct connection string', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'postgresql://postgres:mySecretPassword@db.abcdefghijklmnop.supabase.co:5432/postgres',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('db.abcdefghijklmnop.supabase.co');
|
||||
expect(result.port).toBe(5432);
|
||||
expect(result.username).toBe('postgres');
|
||||
expect(result.password).toBe('mySecretPassword');
|
||||
expect(result.database).toBe('postgres');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Supabase Pooler Connection', () => {
|
||||
it('should parse Supabase pooler session mode connection string (port 5432)', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'postgres://postgres.abcdefghijklmnop:myPassword@aws-0-us-east-1.pooler.supabase.com:5432/postgres',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('aws-0-us-east-1.pooler.supabase.com');
|
||||
expect(result.port).toBe(5432);
|
||||
expect(result.username).toBe('postgres.abcdefghijklmnop');
|
||||
expect(result.password).toBe('myPassword');
|
||||
expect(result.database).toBe('postgres');
|
||||
});
|
||||
|
||||
it('should parse Supabase pooler transaction mode connection string (port 6543)', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'postgres://postgres.projectref:myPassword@aws-0-eu-west-1.pooler.supabase.com:6543/postgres',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('aws-0-eu-west-1.pooler.supabase.com');
|
||||
expect(result.port).toBe(6543);
|
||||
expect(result.username).toBe('postgres.projectref');
|
||||
});
|
||||
});
|
||||
|
||||
describe('JDBC Connection String', () => {
|
||||
it('should parse JDBC connection string with user and password params', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'jdbc:postgresql://localhost:5432/mydb?user=admin&password=secret',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('localhost');
|
||||
expect(result.port).toBe(5432);
|
||||
expect(result.username).toBe('admin');
|
||||
expect(result.password).toBe('secret');
|
||||
expect(result.database).toBe('mydb');
|
||||
});
|
||||
|
||||
it('should parse JDBC connection string without port', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'jdbc:postgresql://db.example.com/mydb?user=admin&password=secret',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('db.example.com');
|
||||
expect(result.port).toBe(5432);
|
||||
});
|
||||
|
||||
it('should parse JDBC with sslmode parameter', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'jdbc:postgresql://host:5432/db?user=u&password=p&sslmode=require',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.isHttps).toBe(true);
|
||||
});
|
||||
|
||||
it('should return error for JDBC without user parameter', () => {
|
||||
const result = expectError(
|
||||
ConnectionStringParser.parse('jdbc:postgresql://host:5432/db?password=secret'),
|
||||
);
|
||||
|
||||
expect(result.error).toContain('user');
|
||||
expect(result.format).toBe('JDBC');
|
||||
});
|
||||
|
||||
it('should return error for JDBC without password parameter', () => {
|
||||
const result = expectError(
|
||||
ConnectionStringParser.parse('jdbc:postgresql://host:5432/db?user=admin'),
|
||||
);
|
||||
|
||||
expect(result.error).toContain('Password');
|
||||
expect(result.format).toBe('JDBC');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Neon Connection String', () => {
|
||||
it('should parse Neon connection string', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'postgresql://neonuser:password123@ep-cool-name-123456.us-east-2.aws.neon.tech/neondb',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('ep-cool-name-123456.us-east-2.aws.neon.tech');
|
||||
expect(result.username).toBe('neonuser');
|
||||
expect(result.database).toBe('neondb');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Railway Connection String', () => {
|
||||
it('should parse Railway connection string', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'postgresql://postgres:railwaypass@containers-us-west-123.railway.app:5432/railway',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('containers-us-west-123.railway.app');
|
||||
expect(result.username).toBe('postgres');
|
||||
expect(result.database).toBe('railway');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Render Connection String', () => {
|
||||
it('should parse Render connection string', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'postgresql://renderuser:renderpass@dpg-abc123.oregon-postgres.render.com/mydb',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('dpg-abc123.oregon-postgres.render.com');
|
||||
expect(result.username).toBe('renderuser');
|
||||
expect(result.database).toBe('mydb');
|
||||
});
|
||||
});
|
||||
|
||||
describe('DigitalOcean Connection String', () => {
|
||||
it('should parse DigitalOcean connection string with sslmode', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'postgresql://doadmin:dopassword@db-postgresql-nyc1-12345-do-user-123456-0.b.db.ondigitalocean.com:25060/defaultdb?sslmode=require',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('db-postgresql-nyc1-12345-do-user-123456-0.b.db.ondigitalocean.com');
|
||||
expect(result.port).toBe(25060);
|
||||
expect(result.username).toBe('doadmin');
|
||||
expect(result.database).toBe('defaultdb');
|
||||
expect(result.isHttps).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('AWS RDS Connection String', () => {
|
||||
it('should parse AWS RDS connection string', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'postgresql://rdsuser:rdspass@mydb.abc123xyz.us-east-1.rds.amazonaws.com:5432/mydb',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('mydb.abc123xyz.us-east-1.rds.amazonaws.com');
|
||||
expect(result.username).toBe('rdsuser');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Azure Database for PostgreSQL Connection String', () => {
|
||||
it('should parse Azure connection string with user@server format', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'postgresql://myuser@myserver:mypassword@myserver.postgres.database.azure.com:5432/mydb?sslmode=require',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('myserver.postgres.database.azure.com');
|
||||
expect(result.port).toBe(5432);
|
||||
expect(result.username).toBe('myuser');
|
||||
expect(result.password).toBe('mypassword');
|
||||
expect(result.database).toBe('mydb');
|
||||
expect(result.isHttps).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Heroku Connection String', () => {
|
||||
it('should parse Heroku connection string', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'postgres://herokuuser:herokupass@ec2-12-34-56-789.compute-1.amazonaws.com:5432/herokudb',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('ec2-12-34-56-789.compute-1.amazonaws.com');
|
||||
expect(result.username).toBe('herokuuser');
|
||||
expect(result.database).toBe('herokudb');
|
||||
});
|
||||
});
|
||||
|
||||
describe('CockroachDB Connection String', () => {
|
||||
it('should parse CockroachDB connection string with sslmode=verify-full', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'postgresql://crdbuser:crdbpass@free-tier.gcp-us-central1.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-full',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('free-tier.gcp-us-central1.cockroachlabs.cloud');
|
||||
expect(result.port).toBe(26257);
|
||||
expect(result.isHttps).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('SSL Mode Handling', () => {
|
||||
it('should set isHttps=true for sslmode=require', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse('postgresql://u:p@host:5432/db?sslmode=require'),
|
||||
);
|
||||
|
||||
expect(result.isHttps).toBe(true);
|
||||
});
|
||||
|
||||
it('should set isHttps=true for sslmode=verify-ca', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse('postgresql://u:p@host:5432/db?sslmode=verify-ca'),
|
||||
);
|
||||
|
||||
expect(result.isHttps).toBe(true);
|
||||
});
|
||||
|
||||
it('should set isHttps=true for sslmode=verify-full', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse('postgresql://u:p@host:5432/db?sslmode=verify-full'),
|
||||
);
|
||||
|
||||
expect(result.isHttps).toBe(true);
|
||||
});
|
||||
|
||||
it('should set isHttps=false for sslmode=disable', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse('postgresql://u:p@host:5432/db?sslmode=disable'),
|
||||
);
|
||||
|
||||
expect(result.isHttps).toBe(false);
|
||||
});
|
||||
|
||||
it('should set isHttps=false when no sslmode specified', () => {
|
||||
const result = expectSuccess(ConnectionStringParser.parse('postgresql://u:p@host:5432/db'));
|
||||
|
||||
expect(result.isHttps).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('libpq Key-Value Format', () => {
|
||||
it('should parse libpq format connection string', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'host=localhost port=5432 dbname=mydb user=admin password=secret',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('localhost');
|
||||
expect(result.port).toBe(5432);
|
||||
expect(result.username).toBe('admin');
|
||||
expect(result.password).toBe('secret');
|
||||
expect(result.database).toBe('mydb');
|
||||
});
|
||||
|
||||
it('should parse libpq format with quoted password containing spaces', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
"host=localhost port=5432 dbname=mydb user=admin password='my secret pass'",
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.password).toBe('my secret pass');
|
||||
});
|
||||
|
||||
it('should default port to 5432 when not specified in libpq format', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse('host=localhost dbname=mydb user=admin password=secret'),
|
||||
);
|
||||
|
||||
expect(result.port).toBe(5432);
|
||||
});
|
||||
|
||||
it('should handle hostaddr as alternative to host', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'hostaddr=192.168.1.1 port=5432 dbname=mydb user=admin password=secret',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('192.168.1.1');
|
||||
});
|
||||
|
||||
it('should handle database as alternative to dbname', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'host=localhost port=5432 database=mydb user=admin password=secret',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.database).toBe('mydb');
|
||||
});
|
||||
|
||||
it('should handle username as alternative to user', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'host=localhost port=5432 dbname=mydb username=admin password=secret',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.username).toBe('admin');
|
||||
});
|
||||
|
||||
it('should parse sslmode in libpq format', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'host=localhost dbname=mydb user=admin password=secret sslmode=require',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.isHttps).toBe(true);
|
||||
});
|
||||
|
||||
it('should return error for libpq format missing host', () => {
|
||||
const result = expectError(
|
||||
ConnectionStringParser.parse('port=5432 dbname=mydb user=admin password=secret'),
|
||||
);
|
||||
|
||||
expect(result.error).toContain('Host');
|
||||
expect(result.format).toBe('libpq');
|
||||
});
|
||||
|
||||
it('should return error for libpq format missing user', () => {
|
||||
const result = expectError(
|
||||
ConnectionStringParser.parse('host=localhost dbname=mydb password=secret'),
|
||||
);
|
||||
|
||||
expect(result.error).toContain('Username');
|
||||
expect(result.format).toBe('libpq');
|
||||
});
|
||||
|
||||
it('should return error for libpq format missing password', () => {
|
||||
const result = expectError(
|
||||
ConnectionStringParser.parse('host=localhost dbname=mydb user=admin'),
|
||||
);
|
||||
|
||||
expect(result.error).toContain('Password');
|
||||
expect(result.format).toBe('libpq');
|
||||
});
|
||||
|
||||
it('should return error for libpq format missing dbname', () => {
|
||||
const result = expectError(
|
||||
ConnectionStringParser.parse('host=localhost user=admin password=secret'),
|
||||
);
|
||||
|
||||
expect(result.error).toContain('Database');
|
||||
expect(result.format).toBe('libpq');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Cases', () => {
|
||||
it('should return error for empty string', () => {
|
||||
const result = expectError(ConnectionStringParser.parse(''));
|
||||
|
||||
expect(result.error).toContain('empty');
|
||||
});
|
||||
|
||||
it('should return error for whitespace-only string', () => {
|
||||
const result = expectError(ConnectionStringParser.parse(' '));
|
||||
|
||||
expect(result.error).toContain('empty');
|
||||
});
|
||||
|
||||
it('should return error for unrecognized format', () => {
|
||||
const result = expectError(ConnectionStringParser.parse('some random text'));
|
||||
|
||||
expect(result.error).toContain('Unrecognized');
|
||||
});
|
||||
|
||||
it('should return error for missing username in URI', () => {
|
||||
const result = expectError(
|
||||
ConnectionStringParser.parse('postgresql://:password@host:5432/db'),
|
||||
);
|
||||
|
||||
expect(result.error).toContain('Username');
|
||||
});
|
||||
|
||||
it('should return error for missing password in URI', () => {
|
||||
const result = expectError(ConnectionStringParser.parse('postgresql://user@host:5432/db'));
|
||||
|
||||
expect(result.error).toContain('Password');
|
||||
});
|
||||
|
||||
it('should return error for missing database in URI', () => {
|
||||
const result = expectError(ConnectionStringParser.parse('postgresql://user:pass@host:5432/'));
|
||||
|
||||
expect(result.error).toContain('Database');
|
||||
});
|
||||
|
||||
it('should return error for invalid JDBC format', () => {
|
||||
const result = expectError(ConnectionStringParser.parse('jdbc:postgresql://invalid'));
|
||||
|
||||
expect(result.format).toBe('JDBC');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Edge Cases', () => {
|
||||
it('should handle special characters in password', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse('postgresql://user:p%40ss%3Aw%2Ford@host:5432/db'),
|
||||
);
|
||||
|
||||
expect(result.password).toBe('p@ss:w/ord');
|
||||
});
|
||||
|
||||
it('should handle numeric database names', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse('postgresql://user:pass@host:5432/12345'),
|
||||
);
|
||||
|
||||
expect(result.database).toBe('12345');
|
||||
});
|
||||
|
||||
it('should handle hyphenated host names', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse('postgresql://user:pass@my-database-host.example.com:5432/db'),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('my-database-host.example.com');
|
||||
});
|
||||
|
||||
it('should handle connection string with extra query parameters', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(
|
||||
'postgresql://user:pass@host:5432/db?sslmode=require&connect_timeout=10&application_name=myapp',
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.isHttps).toBe(true);
|
||||
expect(result.database).toBe('db');
|
||||
});
|
||||
|
||||
it('should trim whitespace from connection string', () => {
|
||||
const result = expectSuccess(
|
||||
ConnectionStringParser.parse(' postgresql://user:pass@host:5432/db '),
|
||||
);
|
||||
|
||||
expect(result.host).toBe('host');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,284 @@
|
||||
export type ParseResult = {
|
||||
host: string;
|
||||
port: number;
|
||||
username: string;
|
||||
password: string;
|
||||
database: string;
|
||||
isHttps: boolean;
|
||||
};
|
||||
|
||||
export type ParseError = {
|
||||
error: string;
|
||||
format?: string;
|
||||
};
|
||||
|
||||
export class ConnectionStringParser {
|
||||
/**
|
||||
* Parses a PostgreSQL connection string in various formats.
|
||||
*
|
||||
* Supported formats:
|
||||
* 1. Standard PostgreSQL URI: postgresql://user:pass@host:port/db
|
||||
* 2. Postgres URI: postgres://user:pass@host:port/db
|
||||
* 3. Supabase Direct: postgresql://postgres:pass@db.xxx.supabase.co:5432/postgres
|
||||
* 4. Supabase Pooler Session: postgres://postgres.ref:pass@aws-0-region.pooler.supabase.com:5432/postgres
|
||||
* 5. Supabase Pooler Transaction: same as above with port 6543
|
||||
* 6. JDBC: jdbc:postgresql://host:port/db?user=x&password=y
|
||||
* 7. Neon: postgresql://user:pass@ep-xxx.neon.tech/db
|
||||
* 8. Railway: postgresql://postgres:pass@xxx.railway.app:port/railway
|
||||
* 9. Render: postgresql://user:pass@xxx.render.com/db
|
||||
* 10. DigitalOcean: postgresql://user:pass@xxx.ondigitalocean.com:port/db?sslmode=require
|
||||
* 11. AWS RDS: postgresql://user:pass@xxx.rds.amazonaws.com:port/db
|
||||
* 12. Azure: postgresql://user@server:pass@xxx.postgres.database.azure.com:port/db?sslmode=require
|
||||
* 13. Heroku: postgres://user:pass@ec2-xxx.amazonaws.com:port/db
|
||||
* 14. CockroachDB: postgresql://user:pass@xxx.cockroachlabs.cloud:port/db?sslmode=verify-full
|
||||
* 15. With SSL params: postgresql://user:pass@host:port/db?sslmode=require
|
||||
* 16. libpq key-value: host=x port=5432 dbname=db user=u password=p
|
||||
*/
|
||||
static parse(connectionString: string): ParseResult | ParseError {
|
||||
const trimmed = connectionString.trim();
|
||||
|
||||
if (!trimmed) {
|
||||
return { error: 'Connection string is empty' };
|
||||
}
|
||||
|
||||
// Try JDBC format first (starts with jdbc:)
|
||||
if (trimmed.startsWith('jdbc:postgresql://')) {
|
||||
return this.parseJdbc(trimmed);
|
||||
}
|
||||
|
||||
// Try libpq key-value format (contains key=value pairs without ://)
|
||||
if (this.isLibpqFormat(trimmed)) {
|
||||
return this.parseLibpq(trimmed);
|
||||
}
|
||||
|
||||
// Try URI format (postgresql:// or postgres://)
|
||||
if (trimmed.startsWith('postgresql://') || trimmed.startsWith('postgres://')) {
|
||||
return this.parseUri(trimmed);
|
||||
}
|
||||
|
||||
return {
|
||||
error: 'Unrecognized connection string format',
|
||||
};
|
||||
}
|
||||
|
||||
private static isLibpqFormat(str: string): boolean {
|
||||
// libpq format has key=value pairs separated by spaces
|
||||
// Must contain at least host= or dbname= to be considered libpq format
|
||||
return (
|
||||
!str.includes('://') &&
|
||||
(str.includes('host=') || str.includes('dbname=')) &&
|
||||
str.includes('=')
|
||||
);
|
||||
}
|
||||
|
||||
private static parseUri(connectionString: string): ParseResult | ParseError {
|
||||
try {
|
||||
// Handle Azure format where username contains @: user@server:pass
|
||||
// Azure format: postgresql://user@servername:password@host:port/db
|
||||
const azureMatch = connectionString.match(
|
||||
/^postgres(?:ql)?:\/\/([^@:]+)@([^:]+):([^@]+)@([^:/?]+):?(\d+)?\/([^?]+)(?:\?(.*))?$/,
|
||||
);
|
||||
|
||||
if (azureMatch) {
|
||||
const [, user, , password, host, port, database, queryString] = azureMatch;
|
||||
const isHttps = this.checkSslMode(queryString);
|
||||
|
||||
return {
|
||||
host: host,
|
||||
port: port ? parseInt(port, 10) : 5432,
|
||||
username: decodeURIComponent(user),
|
||||
password: decodeURIComponent(password),
|
||||
database: decodeURIComponent(database),
|
||||
isHttps,
|
||||
};
|
||||
}
|
||||
|
||||
// Standard URI parsing using URL API
|
||||
const url = new URL(connectionString);
|
||||
|
||||
const host = url.hostname;
|
||||
const port = url.port ? parseInt(url.port, 10) : 5432;
|
||||
const username = decodeURIComponent(url.username);
|
||||
const password = decodeURIComponent(url.password);
|
||||
const database = decodeURIComponent(url.pathname.slice(1)); // Remove leading /
|
||||
const isHttps = this.checkSslMode(url.search);
|
||||
|
||||
// Validate required fields
|
||||
if (!host) {
|
||||
return { error: 'Host is missing from connection string' };
|
||||
}
|
||||
|
||||
if (!username) {
|
||||
return { error: 'Username is missing from connection string' };
|
||||
}
|
||||
|
||||
if (!password) {
|
||||
return { error: 'Password is missing from connection string' };
|
||||
}
|
||||
|
||||
if (!database) {
|
||||
return { error: 'Database name is missing from connection string' };
|
||||
}
|
||||
|
||||
return {
|
||||
host,
|
||||
port,
|
||||
username,
|
||||
password,
|
||||
database,
|
||||
isHttps,
|
||||
};
|
||||
} catch (e) {
|
||||
return {
|
||||
error: `Failed to parse connection string: ${(e as Error).message}`,
|
||||
format: 'URI',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private static parseJdbc(connectionString: string): ParseResult | ParseError {
|
||||
try {
|
||||
// JDBC format: jdbc:postgresql://host:port/database?user=x&password=y
|
||||
const jdbcRegex = /^jdbc:postgresql:\/\/([^:/?]+):?(\d+)?\/([^?]+)(?:\?(.*))?$/;
|
||||
const match = connectionString.match(jdbcRegex);
|
||||
|
||||
if (!match) {
|
||||
return {
|
||||
error:
|
||||
'Invalid JDBC connection string format. Expected: jdbc:postgresql://host:port/database?user=x&password=y',
|
||||
format: 'JDBC',
|
||||
};
|
||||
}
|
||||
|
||||
const [, host, port, database, queryString] = match;
|
||||
|
||||
if (!queryString) {
|
||||
return {
|
||||
error: 'JDBC connection string is missing query parameters (user and password)',
|
||||
format: 'JDBC',
|
||||
};
|
||||
}
|
||||
|
||||
const params = new URLSearchParams(queryString);
|
||||
const username = params.get('user');
|
||||
const password = params.get('password');
|
||||
const isHttps = this.checkSslMode(queryString);
|
||||
|
||||
if (!username) {
|
||||
return {
|
||||
error: 'Username (user parameter) is missing from JDBC connection string',
|
||||
format: 'JDBC',
|
||||
};
|
||||
}
|
||||
|
||||
if (!password) {
|
||||
return {
|
||||
error: 'Password parameter is missing from JDBC connection string',
|
||||
format: 'JDBC',
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
host,
|
||||
port: port ? parseInt(port, 10) : 5432,
|
||||
username: decodeURIComponent(username),
|
||||
password: decodeURIComponent(password),
|
||||
database: decodeURIComponent(database),
|
||||
isHttps,
|
||||
};
|
||||
} catch (e) {
|
||||
return {
|
||||
error: `Failed to parse JDBC connection string: ${(e as Error).message}`,
|
||||
format: 'JDBC',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private static parseLibpq(connectionString: string): ParseResult | ParseError {
|
||||
try {
|
||||
// libpq format: host=x port=5432 dbname=db user=u password=p
|
||||
// Values can be quoted with single quotes: password='my pass'
|
||||
const params: Record<string, string> = {};
|
||||
|
||||
// Match key=value or key='quoted value'
|
||||
const regex = /(\w+)=(?:'([^']*)'|(\S+))/g;
|
||||
let match;
|
||||
|
||||
while ((match = regex.exec(connectionString)) !== null) {
|
||||
const key = match[1];
|
||||
const value = match[2] !== undefined ? match[2] : match[3];
|
||||
params[key] = value;
|
||||
}
|
||||
|
||||
const host = params['host'] || params['hostaddr'];
|
||||
const port = params['port'];
|
||||
const database = params['dbname'] || params['database'];
|
||||
const username = params['user'] || params['username'];
|
||||
const password = params['password'];
|
||||
const sslmode = params['sslmode'];
|
||||
|
||||
if (!host) {
|
||||
return {
|
||||
error: 'Host is missing from connection string. Use host=hostname',
|
||||
format: 'libpq',
|
||||
};
|
||||
}
|
||||
|
||||
if (!username) {
|
||||
return {
|
||||
error: 'Username is missing from connection string. Use user=username',
|
||||
format: 'libpq',
|
||||
};
|
||||
}
|
||||
|
||||
if (!password) {
|
||||
return {
|
||||
error: 'Password is missing from connection string. Use password=yourpassword',
|
||||
format: 'libpq',
|
||||
};
|
||||
}
|
||||
|
||||
if (!database) {
|
||||
return {
|
||||
error: 'Database name is missing from connection string. Use dbname=database',
|
||||
format: 'libpq',
|
||||
};
|
||||
}
|
||||
|
||||
const isHttps = this.isSslEnabled(sslmode);
|
||||
|
||||
return {
|
||||
host,
|
||||
port: port ? parseInt(port, 10) : 5432,
|
||||
username,
|
||||
password,
|
||||
database,
|
||||
isHttps,
|
||||
};
|
||||
} catch (e) {
|
||||
return {
|
||||
error: `Failed to parse libpq connection string: ${(e as Error).message}`,
|
||||
format: 'libpq',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private static checkSslMode(queryString: string | undefined | null): boolean {
|
||||
if (!queryString) return false;
|
||||
|
||||
const params = new URLSearchParams(
|
||||
queryString.startsWith('?') ? queryString.slice(1) : queryString,
|
||||
);
|
||||
const sslmode = params.get('sslmode');
|
||||
|
||||
return this.isSslEnabled(sslmode);
|
||||
}
|
||||
|
||||
private static isSslEnabled(sslmode: string | null | undefined): boolean {
|
||||
if (!sslmode) return false;
|
||||
|
||||
// These modes require SSL
|
||||
const sslModes = ['require', 'verify-ca', 'verify-full'];
|
||||
return sslModes.includes(sslmode.toLowerCase());
|
||||
}
|
||||
}
|
||||
@@ -11,4 +11,7 @@ export interface PostgresqlDatabase {
|
||||
password: string;
|
||||
database?: string;
|
||||
isHttps: boolean;
|
||||
|
||||
// backup settings
|
||||
includeSchemas?: string[];
|
||||
}
|
||||
|
||||
@@ -6,4 +6,5 @@ export interface S3Storage {
|
||||
s3Endpoint?: string;
|
||||
s3Prefix?: string;
|
||||
s3UseVirtualHostedStyle?: boolean;
|
||||
skipTLSVerify?: boolean;
|
||||
}
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
import { InfoCircleOutlined } from '@ant-design/icons';
|
||||
import { Button, Input, InputNumber, Select, Switch, Tooltip } from 'antd';
|
||||
import { CopyOutlined, DownOutlined, UpOutlined } from '@ant-design/icons';
|
||||
import { App, Button, Input, InputNumber, Select, Switch } from 'antd';
|
||||
import { useEffect, useState } from 'react';
|
||||
|
||||
import {
|
||||
type Database,
|
||||
DatabaseType,
|
||||
PostgresqlVersion,
|
||||
databaseApi,
|
||||
} from '../../../../entity/databases';
|
||||
import { type Database, DatabaseType, databaseApi } from '../../../../entity/databases';
|
||||
import { ConnectionStringParser } from '../../../../entity/databases/model/postgresql/ConnectionStringParser';
|
||||
import { ToastHelper } from '../../../../shared/toast';
|
||||
|
||||
interface Props {
|
||||
@@ -23,7 +19,6 @@ interface Props {
|
||||
isSaveToApi: boolean;
|
||||
onSaved: (database: Database) => void;
|
||||
|
||||
isShowDbVersionHint?: boolean;
|
||||
isShowDbName?: boolean;
|
||||
}
|
||||
|
||||
@@ -39,10 +34,10 @@ export const EditDatabaseSpecificDataComponent = ({
|
||||
saveButtonText,
|
||||
isSaveToApi,
|
||||
onSaved,
|
||||
|
||||
isShowDbVersionHint = true,
|
||||
isShowDbName = true,
|
||||
}: Props) => {
|
||||
const { message } = App.useApp();
|
||||
|
||||
const [editingDatabase, setEditingDatabase] = useState<Database>();
|
||||
const [isSaving, setIsSaving] = useState(false);
|
||||
|
||||
@@ -50,6 +45,76 @@ export const EditDatabaseSpecificDataComponent = ({
|
||||
const [isTestingConnection, setIsTestingConnection] = useState(false);
|
||||
const [isConnectionFailed, setIsConnectionFailed] = useState(false);
|
||||
|
||||
const hasAdvancedValues = !!database.postgresql?.includeSchemas?.length;
|
||||
const [isShowAdvanced, setShowAdvanced] = useState(hasAdvancedValues);
|
||||
|
||||
const [hasAutoAddedPublicSchema, setHasAutoAddedPublicSchema] = useState(false);
|
||||
|
||||
const parseFromClipboard = async () => {
|
||||
try {
|
||||
const text = await navigator.clipboard.readText();
|
||||
const trimmedText = text.trim();
|
||||
|
||||
if (!trimmedText) {
|
||||
message.error('Clipboard is empty');
|
||||
return;
|
||||
}
|
||||
|
||||
const result = ConnectionStringParser.parse(trimmedText);
|
||||
|
||||
if ('error' in result) {
|
||||
message.error(result.error);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!editingDatabase?.postgresql) return;
|
||||
|
||||
const updatedDatabase: Database = {
|
||||
...editingDatabase,
|
||||
postgresql: {
|
||||
...editingDatabase.postgresql,
|
||||
host: result.host,
|
||||
port: result.port,
|
||||
username: result.username,
|
||||
password: result.password,
|
||||
database: result.database,
|
||||
isHttps: result.isHttps,
|
||||
},
|
||||
};
|
||||
|
||||
setEditingDatabase(autoAddPublicSchemaForSupabase(updatedDatabase));
|
||||
setIsConnectionTested(false);
|
||||
message.success('Connection string parsed successfully');
|
||||
} catch {
|
||||
message.error('Failed to read clipboard. Please check browser permissions.');
|
||||
}
|
||||
};
|
||||
|
||||
const autoAddPublicSchemaForSupabase = (updatedDatabase: Database): Database => {
|
||||
if (hasAutoAddedPublicSchema) return updatedDatabase;
|
||||
|
||||
const host = updatedDatabase.postgresql?.host || '';
|
||||
const username = updatedDatabase.postgresql?.username || '';
|
||||
const isSupabase = host.includes('supabase') || username.includes('supabase');
|
||||
|
||||
if (isSupabase && updatedDatabase.postgresql) {
|
||||
setHasAutoAddedPublicSchema(true);
|
||||
|
||||
const currentSchemas = updatedDatabase.postgresql.includeSchemas || [];
|
||||
if (!currentSchemas.includes('public')) {
|
||||
return {
|
||||
...updatedDatabase,
|
||||
postgresql: {
|
||||
...updatedDatabase.postgresql,
|
||||
includeSchemas: ['public', ...currentSchemas],
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return updatedDatabase;
|
||||
};
|
||||
|
||||
const testConnection = async () => {
|
||||
if (!editingDatabase) return;
|
||||
setIsTestingConnection(true);
|
||||
@@ -100,7 +165,6 @@ export const EditDatabaseSpecificDataComponent = ({
|
||||
if (!editingDatabase) return null;
|
||||
|
||||
let isAllFieldsFilled = true;
|
||||
if (!editingDatabase.postgresql?.version) isAllFieldsFilled = false;
|
||||
if (!editingDatabase.postgresql?.host) isAllFieldsFilled = false;
|
||||
if (!editingDatabase.postgresql?.port) isAllFieldsFilled = false;
|
||||
if (!editingDatabase.postgresql?.username) isAllFieldsFilled = false;
|
||||
@@ -111,49 +175,23 @@ export const EditDatabaseSpecificDataComponent = ({
|
||||
editingDatabase.postgresql?.host?.includes('localhost') ||
|
||||
editingDatabase.postgresql?.host?.includes('127.0.0.1');
|
||||
|
||||
const isSupabaseDb =
|
||||
editingDatabase.postgresql?.host?.includes('supabase') ||
|
||||
editingDatabase.postgresql?.username?.includes('supabase');
|
||||
|
||||
return (
|
||||
<div>
|
||||
{editingDatabase.type === DatabaseType.POSTGRES && (
|
||||
<>
|
||||
<div className="mb-1 flex w-full items-center">
|
||||
<div className="min-w-[150px]">PG version</div>
|
||||
|
||||
<Select
|
||||
value={editingDatabase.postgresql?.version}
|
||||
onChange={(v) => {
|
||||
if (!editingDatabase.postgresql) return;
|
||||
|
||||
setEditingDatabase({
|
||||
...editingDatabase,
|
||||
postgresql: {
|
||||
...editingDatabase.postgresql,
|
||||
version: v as PostgresqlVersion,
|
||||
},
|
||||
});
|
||||
setIsConnectionTested(false);
|
||||
}}
|
||||
size="small"
|
||||
className="max-w-[200px] grow"
|
||||
placeholder="Select PG version"
|
||||
options={[
|
||||
{ label: '12', value: PostgresqlVersion.PostgresqlVersion12 },
|
||||
{ label: '13', value: PostgresqlVersion.PostgresqlVersion13 },
|
||||
{ label: '14', value: PostgresqlVersion.PostgresqlVersion14 },
|
||||
{ label: '15', value: PostgresqlVersion.PostgresqlVersion15 },
|
||||
{ label: '16', value: PostgresqlVersion.PostgresqlVersion16 },
|
||||
{ label: '17', value: PostgresqlVersion.PostgresqlVersion17 },
|
||||
{ label: '18', value: PostgresqlVersion.PostgresqlVersion18 },
|
||||
]}
|
||||
/>
|
||||
|
||||
{isShowDbVersionHint && (
|
||||
<Tooltip
|
||||
className="cursor-pointer"
|
||||
title="Please select the version of PostgreSQL you are backing up now. You will be able to restore backup to the same version or higher"
|
||||
>
|
||||
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
|
||||
</Tooltip>
|
||||
)}
|
||||
<div className="mb-3 flex">
|
||||
<div className="min-w-[150px]" />
|
||||
<div
|
||||
className="cursor-pointer text-sm text-gray-600 transition-colors hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-200"
|
||||
onClick={parseFromClipboard}
|
||||
>
|
||||
<CopyOutlined className="mr-1" />
|
||||
Parse from clipboard
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="mb-1 flex w-full items-center">
|
||||
@@ -163,13 +201,14 @@ export const EditDatabaseSpecificDataComponent = ({
|
||||
onChange={(e) => {
|
||||
if (!editingDatabase.postgresql) return;
|
||||
|
||||
setEditingDatabase({
|
||||
const updatedDatabase = {
|
||||
...editingDatabase,
|
||||
postgresql: {
|
||||
...editingDatabase.postgresql,
|
||||
host: e.target.value.trim().replace('https://', '').replace('http://', ''),
|
||||
},
|
||||
});
|
||||
};
|
||||
setEditingDatabase(autoAddPublicSchemaForSupabase(updatedDatabase));
|
||||
setIsConnectionTested(false);
|
||||
}}
|
||||
size="small"
|
||||
@@ -184,7 +223,7 @@ export const EditDatabaseSpecificDataComponent = ({
|
||||
<div className="max-w-[200px] text-xs text-gray-500 dark:text-gray-400">
|
||||
Please{' '}
|
||||
<a
|
||||
href="https://postgresus.com/faq#how-to-backup-localhost"
|
||||
href="https://postgresus.com/faq/localhost"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
className="!text-blue-600 dark:!text-blue-400"
|
||||
@@ -196,6 +235,24 @@ export const EditDatabaseSpecificDataComponent = ({
|
||||
</div>
|
||||
)}
|
||||
|
||||
{isSupabaseDb && (
|
||||
<div className="mb-1 flex">
|
||||
<div className="min-w-[150px]" />
|
||||
<div className="max-w-[200px] text-xs text-gray-500 dark:text-gray-400">
|
||||
Please{' '}
|
||||
<a
|
||||
href="https://postgresus.com/faq/supabase"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
className="!text-blue-600 dark:!text-blue-400"
|
||||
>
|
||||
read this document
|
||||
</a>{' '}
|
||||
to study how to backup Supabase database
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="mb-1 flex w-full items-center">
|
||||
<div className="min-w-[150px]">Port</div>
|
||||
<InputNumber
|
||||
@@ -223,10 +280,11 @@ export const EditDatabaseSpecificDataComponent = ({
|
||||
onChange={(e) => {
|
||||
if (!editingDatabase.postgresql) return;
|
||||
|
||||
setEditingDatabase({
|
||||
const updatedDatabase = {
|
||||
...editingDatabase,
|
||||
postgresql: { ...editingDatabase.postgresql, username: e.target.value.trim() },
|
||||
});
|
||||
};
|
||||
setEditingDatabase(autoAddPublicSchemaForSupabase(updatedDatabase));
|
||||
setIsConnectionTested(false);
|
||||
}}
|
||||
size="small"
|
||||
@@ -291,6 +349,43 @@ export const EditDatabaseSpecificDataComponent = ({
|
||||
size="small"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="mt-4 mb-3 flex items-center">
|
||||
<div
|
||||
className="flex cursor-pointer items-center text-sm text-blue-600 hover:text-blue-800"
|
||||
onClick={() => setShowAdvanced(!isShowAdvanced)}
|
||||
>
|
||||
<span className="mr-2">Advanced settings</span>
|
||||
|
||||
{isShowAdvanced ? (
|
||||
<UpOutlined style={{ fontSize: '12px' }} />
|
||||
) : (
|
||||
<DownOutlined style={{ fontSize: '12px' }} />
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{isShowAdvanced && (
|
||||
<div className="mb-1 flex w-full items-center">
|
||||
<div className="min-w-[150px]">Include schemas</div>
|
||||
<Select
|
||||
mode="tags"
|
||||
value={editingDatabase.postgresql?.includeSchemas || []}
|
||||
onChange={(values) => {
|
||||
if (!editingDatabase.postgresql) return;
|
||||
|
||||
setEditingDatabase({
|
||||
...editingDatabase,
|
||||
postgresql: { ...editingDatabase.postgresql, includeSchemas: values },
|
||||
});
|
||||
}}
|
||||
size="small"
|
||||
className="max-w-[200px] grow"
|
||||
placeholder="All schemas (default)"
|
||||
tokenSeparators={[',']}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
|
||||
|
||||
@@ -57,6 +57,13 @@ export const ShowDatabaseSpecificDataComponent = ({ database }: Props) => {
|
||||
<div className="min-w-[150px]">Use HTTPS</div>
|
||||
<div>{database.postgresql?.isHttps ? 'Yes' : 'No'}</div>
|
||||
</div>
|
||||
|
||||
{!!database.postgresql?.includeSchemas?.length && (
|
||||
<div className="mb-1 flex w-full items-center">
|
||||
<div className="min-w-[150px]">Include schemas</div>
|
||||
<div>{database.postgresql.includeSchemas.join(', ')}</div>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -111,7 +111,6 @@ export const RestoresComponent = ({ database, backup }: Props) => {
|
||||
setEditingDatabase({ ...database });
|
||||
restore(database);
|
||||
}}
|
||||
isShowDbVersionHint={false}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
|
||||
@@ -39,6 +39,7 @@ export function EditStorageComponent({
|
||||
|
||||
const [isTestingConnection, setIsTestingConnection] = useState(false);
|
||||
const [isTestConnectionSuccess, setIsTestConnectionSuccess] = useState(false);
|
||||
const [connectionError, setConnectionError] = useState<string | undefined>();
|
||||
|
||||
const save = async () => {
|
||||
if (!storage) return;
|
||||
@@ -60,6 +61,7 @@ export function EditStorageComponent({
|
||||
if (!storage) return;
|
||||
|
||||
setIsTestingConnection(true);
|
||||
setConnectionError(undefined);
|
||||
|
||||
try {
|
||||
await storageApi.testStorageConnectionDirect(storage);
|
||||
@@ -69,7 +71,9 @@ export function EditStorageComponent({
|
||||
description: 'Storage connection tested successfully',
|
||||
});
|
||||
} catch (e) {
|
||||
alert((e as Error).message);
|
||||
const errorMessage = (e as Error).message;
|
||||
setConnectionError(errorMessage);
|
||||
alert(errorMessage);
|
||||
}
|
||||
|
||||
setIsTestingConnection(false);
|
||||
@@ -290,7 +294,9 @@ export function EditStorageComponent({
|
||||
setUnsaved={() => {
|
||||
setIsUnsaved(true);
|
||||
setIsTestConnectionSuccess(false);
|
||||
setConnectionError(undefined);
|
||||
}}
|
||||
connectionError={connectionError}
|
||||
/>
|
||||
)}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { DownOutlined, InfoCircleOutlined, UpOutlined } from '@ant-design/icons';
|
||||
import { Checkbox, Input, Tooltip } from 'antd';
|
||||
import { useState } from 'react';
|
||||
import { useEffect, useState } from 'react';
|
||||
|
||||
import type { Storage } from '../../../../../entity/storages';
|
||||
|
||||
@@ -8,13 +8,27 @@ interface Props {
|
||||
storage: Storage;
|
||||
setStorage: (storage: Storage) => void;
|
||||
setUnsaved: () => void;
|
||||
connectionError?: string;
|
||||
}
|
||||
|
||||
export function EditS3StorageComponent({ storage, setStorage, setUnsaved }: Props) {
|
||||
export function EditS3StorageComponent({
|
||||
storage,
|
||||
setStorage,
|
||||
setUnsaved,
|
||||
connectionError,
|
||||
}: Props) {
|
||||
const hasAdvancedValues =
|
||||
!!storage?.s3Storage?.s3Prefix || !!storage?.s3Storage?.s3UseVirtualHostedStyle;
|
||||
!!storage?.s3Storage?.s3Prefix ||
|
||||
!!storage?.s3Storage?.s3UseVirtualHostedStyle ||
|
||||
!!storage?.s3Storage?.skipTLSVerify;
|
||||
const [showAdvanced, setShowAdvanced] = useState(hasAdvancedValues);
|
||||
|
||||
useEffect(() => {
|
||||
if (connectionError?.includes('failed to verify certificate')) {
|
||||
setShowAdvanced(true);
|
||||
}
|
||||
}, [connectionError]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="mb-2 flex items-center">
|
||||
@@ -226,6 +240,36 @@ export function EditS3StorageComponent({ storage, setStorage, setUnsaved }: Prop
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="mb-1 flex w-full flex-col items-start sm:flex-row sm:items-center">
|
||||
<div className="mb-1 min-w-[110px] sm:mb-0">Skip TLS verify</div>
|
||||
<div className="flex items-center">
|
||||
<Checkbox
|
||||
checked={storage?.s3Storage?.skipTLSVerify || false}
|
||||
onChange={(e) => {
|
||||
if (!storage?.s3Storage) return;
|
||||
|
||||
setStorage({
|
||||
...storage,
|
||||
s3Storage: {
|
||||
...storage.s3Storage,
|
||||
skipTLSVerify: e.target.checked,
|
||||
},
|
||||
});
|
||||
setUnsaved();
|
||||
}}
|
||||
>
|
||||
Skip TLS
|
||||
</Checkbox>
|
||||
|
||||
<Tooltip
|
||||
className="cursor-pointer"
|
||||
title="Skip TLS certificate verification. Enable this if your S3-compatible storage uses a self-signed certificate. Warning: this reduces security."
|
||||
>
|
||||
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
|
||||
|
||||
@@ -45,6 +45,13 @@ export function ShowS3StorageComponent({ storage }: Props) {
|
||||
Enabled
|
||||
</div>
|
||||
)}
|
||||
|
||||
{storage?.s3Storage?.skipTLSVerify && (
|
||||
<div className="mb-1 flex items-center">
|
||||
<div className="min-w-[110px]">Skip TLS</div>
|
||||
Enabled
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
8
frontend/vitest.config.ts
Normal file
8
frontend/vitest.config.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
import { defineConfig } from 'vitest/config';
|
||||
|
||||
export default defineConfig({
|
||||
test: {
|
||||
environment: 'node',
|
||||
include: ['src/**/*.test.ts'],
|
||||
},
|
||||
});
|
||||
470
package-lock.json
generated
470
package-lock.json
generated
@@ -1,470 +0,0 @@
|
||||
{
|
||||
"name": "postgresus",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"dependencies": {
|
||||
"@types/recharts": "^1.8.29",
|
||||
"recharts": "^3.2.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@reduxjs/toolkit": {
|
||||
"version": "2.9.0",
|
||||
"resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-2.9.0.tgz",
|
||||
"integrity": "sha512-fSfQlSRu9Z5yBkvsNhYF2rPS8cGXn/TZVrlwN1948QyZ8xMZ0JvP50S2acZNaf+o63u6aEeMjipFyksjIcWrog==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@standard-schema/spec": "^1.0.0",
|
||||
"@standard-schema/utils": "^0.3.0",
|
||||
"immer": "^10.0.3",
|
||||
"redux": "^5.0.1",
|
||||
"redux-thunk": "^3.1.0",
|
||||
"reselect": "^5.1.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"react": "^16.9.0 || ^17.0.0 || ^18 || ^19",
|
||||
"react-redux": "^7.2.1 || ^8.1.3 || ^9.0.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"react": {
|
||||
"optional": true
|
||||
},
|
||||
"react-redux": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@standard-schema/spec": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.0.0.tgz",
|
||||
"integrity": "sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@standard-schema/utils": {
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@standard-schema/utils/-/utils-0.3.0.tgz",
|
||||
"integrity": "sha512-e7Mew686owMaPJVNNLs55PUvgz371nKgwsc4vxE49zsODpJEnxgxRo2y/OKrqueavXgZNMDVj3DdHFlaSAeU8g==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/d3-array": {
|
||||
"version": "3.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz",
|
||||
"integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/d3-color": {
|
||||
"version": "3.1.3",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz",
|
||||
"integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/d3-ease": {
|
||||
"version": "3.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz",
|
||||
"integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/d3-interpolate": {
|
||||
"version": "3.0.4",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz",
|
||||
"integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/d3-color": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/d3-path": {
|
||||
"version": "1.0.11",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-1.0.11.tgz",
|
||||
"integrity": "sha512-4pQMp8ldf7UaB/gR8Fvvy69psNHkTpD/pVw3vmEi8iZAB9EPMBruB1JvHO4BIq9QkUUd2lV1F5YXpMNj7JPBpw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/d3-scale": {
|
||||
"version": "4.0.9",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz",
|
||||
"integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/d3-time": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/d3-shape": {
|
||||
"version": "1.3.12",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-1.3.12.tgz",
|
||||
"integrity": "sha512-8oMzcd4+poSLGgV0R1Q1rOlx/xdmozS4Xab7np0eamFFUYq71AU9pOCJEFnkXW2aI/oXdVYJzw6pssbSut7Z9Q==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/d3-path": "^1"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/d3-time": {
|
||||
"version": "3.0.4",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz",
|
||||
"integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/d3-timer": {
|
||||
"version": "3.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz",
|
||||
"integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/react": {
|
||||
"version": "19.1.13",
|
||||
"resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.13.tgz",
|
||||
"integrity": "sha512-hHkbU/eoO3EG5/MZkuFSKmYqPbSVk5byPFa3e7y/8TybHiLMACgI8seVYlicwk7H5K/rI2px9xrQp/C+AUDTiQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"csstype": "^3.0.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/recharts": {
|
||||
"version": "1.8.29",
|
||||
"resolved": "https://registry.npmjs.org/@types/recharts/-/recharts-1.8.29.tgz",
|
||||
"integrity": "sha512-ulKklaVsnFIIhTQsQw226TnOibrddW1qUQNFVhoQEyY1Z7FRQrNecFCGt7msRuJseudzE9czVawZb17dK/aPXw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/d3-shape": "^1",
|
||||
"@types/react": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/use-sync-external-store": {
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/@types/use-sync-external-store/-/use-sync-external-store-0.0.6.tgz",
|
||||
"integrity": "sha512-zFDAD+tlpf2r4asuHEj0XH6pY6i0g5NeAHPn+15wk3BV6JA69eERFXC1gyGThDkVa1zCyKr5jox1+2LbV/AMLg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/clsx": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz",
|
||||
"integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
}
|
||||
},
|
||||
"node_modules/csstype": {
|
||||
"version": "3.1.3",
|
||||
"resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz",
|
||||
"integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/d3-array": {
|
||||
"version": "3.2.4",
|
||||
"resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz",
|
||||
"integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"internmap": "1 - 2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/d3-color": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz",
|
||||
"integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==",
|
||||
"license": "ISC",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/d3-ease": {
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz",
|
||||
"integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==",
|
||||
"license": "BSD-3-Clause",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/d3-format": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz",
|
||||
"integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==",
|
||||
"license": "ISC",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/d3-interpolate": {
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
|
||||
"integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"d3-color": "1 - 3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/d3-path": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz",
|
||||
"integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==",
|
||||
"license": "ISC",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/d3-scale": {
|
||||
"version": "4.0.2",
|
||||
"resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz",
|
||||
"integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"d3-array": "2.10.0 - 3",
|
||||
"d3-format": "1 - 3",
|
||||
"d3-interpolate": "1.2.0 - 3",
|
||||
"d3-time": "2.1.1 - 3",
|
||||
"d3-time-format": "2 - 4"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/d3-shape": {
|
||||
"version": "3.2.0",
|
||||
"resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz",
|
||||
"integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"d3-path": "^3.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/d3-time": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz",
|
||||
"integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"d3-array": "2 - 3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/d3-time-format": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz",
|
||||
"integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"d3-time": "1 - 3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/d3-timer": {
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz",
|
||||
"integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==",
|
||||
"license": "ISC",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/decimal.js-light": {
|
||||
"version": "2.5.1",
|
||||
"resolved": "https://registry.npmjs.org/decimal.js-light/-/decimal.js-light-2.5.1.tgz",
|
||||
"integrity": "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/es-toolkit": {
|
||||
"version": "1.39.10",
|
||||
"resolved": "https://registry.npmjs.org/es-toolkit/-/es-toolkit-1.39.10.tgz",
|
||||
"integrity": "sha512-E0iGnTtbDhkeczB0T+mxmoVlT4YNweEKBLq7oaU4p11mecdsZpNWOglI4895Vh4usbQ+LsJiuLuI2L0Vdmfm2w==",
|
||||
"license": "MIT",
|
||||
"workspaces": [
|
||||
"docs",
|
||||
"benchmarks"
|
||||
]
|
||||
},
|
||||
"node_modules/eventemitter3": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.1.tgz",
|
||||
"integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/immer": {
|
||||
"version": "10.1.3",
|
||||
"resolved": "https://registry.npmjs.org/immer/-/immer-10.1.3.tgz",
|
||||
"integrity": "sha512-tmjF/k8QDKydUlm3mZU+tjM6zeq9/fFpPqH9SzWmBnVVKsPBg/V66qsMwb3/Bo90cgUN+ghdVBess+hPsxUyRw==",
|
||||
"license": "MIT",
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/immer"
|
||||
}
|
||||
},
|
||||
"node_modules/internmap": {
|
||||
"version": "2.0.3",
|
||||
"resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz",
|
||||
"integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==",
|
||||
"license": "ISC",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/react": {
|
||||
"version": "19.1.1",
|
||||
"resolved": "https://registry.npmjs.org/react/-/react-19.1.1.tgz",
|
||||
"integrity": "sha512-w8nqGImo45dmMIfljjMwOGtbmC/mk4CMYhWIicdSflH91J9TyCyczcPFXJzrZ/ZXcgGRFeP6BU0BEJTw6tZdfQ==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/react-dom": {
|
||||
"version": "19.1.1",
|
||||
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.1.tgz",
|
||||
"integrity": "sha512-Dlq/5LAZgF0Gaz6yiqZCf6VCcZs1ghAJyrsu84Q/GT0gV+mCxbfmKNoGRKBYMJ8IEdGPqu49YWXD02GCknEDkw==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"scheduler": "^0.26.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"react": "^19.1.1"
|
||||
}
|
||||
},
|
||||
"node_modules/react-is": {
|
||||
"version": "19.1.1",
|
||||
"resolved": "https://registry.npmjs.org/react-is/-/react-is-19.1.1.tgz",
|
||||
"integrity": "sha512-tr41fA15Vn8p4X9ntI+yCyeGSf1TlYaY5vlTZfQmeLBrFo3psOPX6HhTDnFNL9uj3EhP0KAQ80cugCl4b4BERA==",
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/react-redux": {
|
||||
"version": "9.2.0",
|
||||
"resolved": "https://registry.npmjs.org/react-redux/-/react-redux-9.2.0.tgz",
|
||||
"integrity": "sha512-ROY9fvHhwOD9ySfrF0wmvu//bKCQ6AeZZq1nJNtbDC+kk5DuSuNX/n6YWYF/SYy7bSba4D4FSz8DJeKY/S/r+g==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/use-sync-external-store": "^0.0.6",
|
||||
"use-sync-external-store": "^1.4.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "^18.2.25 || ^19",
|
||||
"react": "^18.0 || ^19",
|
||||
"redux": "^5.0.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"redux": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/recharts": {
|
||||
"version": "3.2.0",
|
||||
"resolved": "https://registry.npmjs.org/recharts/-/recharts-3.2.0.tgz",
|
||||
"integrity": "sha512-fX0xCgNXo6mag9wz3oLuANR+dUQM4uIlTYBGTGq9CBRgW/8TZPzqPGYs5NTt8aENCf+i1CI8vqxT1py8L/5J2w==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@reduxjs/toolkit": "1.x.x || 2.x.x",
|
||||
"clsx": "^2.1.1",
|
||||
"decimal.js-light": "^2.5.1",
|
||||
"es-toolkit": "^1.39.3",
|
||||
"eventemitter3": "^5.0.1",
|
||||
"immer": "^10.1.1",
|
||||
"react-redux": "8.x.x || 9.x.x",
|
||||
"reselect": "5.1.1",
|
||||
"tiny-invariant": "^1.3.3",
|
||||
"use-sync-external-store": "^1.2.2",
|
||||
"victory-vendor": "^37.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0",
|
||||
"react-dom": "^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0",
|
||||
"react-is": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/redux": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/redux/-/redux-5.0.1.tgz",
|
||||
"integrity": "sha512-M9/ELqF6fy8FwmkpnF0S3YKOqMyoWJ4+CS5Efg2ct3oY9daQvd/Pc71FpGZsVsbl3Cpb+IIcjBDUnnyBdQbq4w==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/redux-thunk": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/redux-thunk/-/redux-thunk-3.1.0.tgz",
|
||||
"integrity": "sha512-NW2r5T6ksUKXCabzhL9z+h206HQw/NJkcLm1GPImRQ8IzfXwRGqjVhKJGauHirT0DAuyy6hjdnMZaRoAcy0Klw==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"redux": "^5.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/reselect": {
|
||||
"version": "5.1.1",
|
||||
"resolved": "https://registry.npmjs.org/reselect/-/reselect-5.1.1.tgz",
|
||||
"integrity": "sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/scheduler": {
|
||||
"version": "0.26.0",
|
||||
"resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.26.0.tgz",
|
||||
"integrity": "sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==",
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/tiny-invariant": {
|
||||
"version": "1.3.3",
|
||||
"resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz",
|
||||
"integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/use-sync-external-store": {
|
||||
"version": "1.5.0",
|
||||
"resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.5.0.tgz",
|
||||
"integrity": "sha512-Rb46I4cGGVBmjamjphe8L/UnvJD+uPPtTkNvX5mZgqdbavhI4EbgIWJiIHXJ8bc/i9EQGPRh4DwEURJ552Do0A==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/victory-vendor": {
|
||||
"version": "37.3.6",
|
||||
"resolved": "https://registry.npmjs.org/victory-vendor/-/victory-vendor-37.3.6.tgz",
|
||||
"integrity": "sha512-SbPDPdDBYp+5MJHhBCAyI7wKM3d5ivekigc2Dk2s7pgbZ9wIgIBYGVw4zGHBml/qTFbexrofXW6Gu4noGxrOwQ==",
|
||||
"license": "MIT AND ISC",
|
||||
"dependencies": {
|
||||
"@types/d3-array": "^3.0.3",
|
||||
"@types/d3-ease": "^3.0.0",
|
||||
"@types/d3-interpolate": "^3.0.1",
|
||||
"@types/d3-scale": "^4.0.2",
|
||||
"@types/d3-shape": "^3.1.0",
|
||||
"@types/d3-time": "^3.0.0",
|
||||
"@types/d3-timer": "^3.0.0",
|
||||
"d3-array": "^3.1.6",
|
||||
"d3-ease": "^3.0.1",
|
||||
"d3-interpolate": "^3.0.1",
|
||||
"d3-scale": "^4.0.2",
|
||||
"d3-shape": "^3.1.0",
|
||||
"d3-time": "^3.0.0",
|
||||
"d3-timer": "^3.0.1"
|
||||
}
|
||||
},
|
||||
"node_modules/victory-vendor/node_modules/@types/d3-shape": {
|
||||
"version": "3.1.7",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.7.tgz",
|
||||
"integrity": "sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/d3-path": "*"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"dependencies": {
|
||||
"@types/recharts": "^1.8.29",
|
||||
"recharts": "^3.2.0"
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user