Compare commits

..

7 Commits

Author SHA1 Message Date
Rostislav Dugin
fcc894d1f5 FEATURE (monitorings): Get rid of monitoring 2025-10-31 17:14:40 +03:00
Rostislav Dugin
7307a515e2 FEATURE (license): Change license to Apache 2.0 2025-10-31 16:38:40 +03:00
Rostislav Dugin
5f280c0d6d FIX (pre-commit): Add pre-commit hooks 2025-10-31 16:33:41 +03:00
Rostislav Dugin
492605a1b0 FEATURE (cursor): Update cursor rules 2025-10-31 16:31:32 +03:00
Rostislav Dugin
f9eaead8a1 FEATURE (s3): Test connection via uploading\removing test file 2025-10-31 16:31:20 +03:00
Rostislav Dugin
aad9ed6589 FEATURE (contribute): Update tasks list [skip-release] 2025-10-30 22:22:45 +03:00
Rostislav Dugin
181c32ded3 FEATURE (readme): Add PostgreSQL 18 label [skip-release] 2025-09-30 12:37:12 +03:00
47 changed files with 1367 additions and 1880 deletions

29
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,29 @@
# Pre-commit configuration
# See https://pre-commit.com for more information
repos:
# Frontend checks
- repo: local
hooks:
- id: frontend-format
name: Frontend Format (Prettier)
entry: powershell -Command "cd frontend; npm run format"
language: system
files: ^frontend/.*\.(ts|tsx|js|jsx|json|css|md)$
pass_filenames: false
- id: frontend-lint
name: Frontend Lint (ESLint)
entry: powershell -Command "cd frontend; npm run lint"
language: system
files: ^frontend/.*\.(ts|tsx|js|jsx)$
pass_filenames: false
# Backend checks
- repo: local
hooks:
- id: backend-format-and-lint
name: Backend Format & Lint (golangci-lint)
entry: powershell -Command "cd backend; golangci-lint fmt; golangci-lint run"
language: system
files: ^backend/.*\.go$
pass_filenames: false

215
LICENSE
View File

@@ -1,21 +1,202 @@
MIT License
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
Copyright (c) 2025 Postgresus
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
1. Definitions.
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"Licensor" shall mean the copyright owner or entity granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(which shall not include communications that are solely written
by You).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based upon (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and derivative works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control
systems, and issue tracking systems that are managed by, or on behalf
of, the Licensor for the purpose of discussing and improving the Work,
but excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to use, reproduce, modify, distribute, prepare
Derivative Works of, and publicly display, publicly perform,
sublicense, and distribute the Work and such Derivative Works in
Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright notice to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. When redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "license" line as the copyright notice for easier
identification within third-party archives.
Copyright 2025 LogBull
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -5,11 +5,11 @@
<p>Free, open source and self-hosted solution for automated PostgreSQL backups. With multiple storage options and notifications</p>
<!-- Badges -->
[![MIT License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)
[![Apache 2.0 License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](LICENSE)
[![Docker Pulls](https://img.shields.io/docker/pulls/rostislavdugin/postgresus?color=brightgreen)](https://hub.docker.com/r/rostislavdugin/postgresus)
[![Platform](https://img.shields.io/badge/platform-linux%20%7C%20macos%20%7C%20windows-lightgrey)](https://github.com/RostislavDugin/postgresus)
[![PostgreSQL](https://img.shields.io/badge/PostgreSQL-13%20%7C%2014%20%7C%2015%20%7C%2016%20%7C%2017-336791?logo=postgresql&logoColor=white)](https://www.postgresql.org/)
[![PostgreSQL](https://img.shields.io/badge/PostgreSQL-13%20%7C%2014%20%7C%2015%20%7C%2016%20%7C%2017%20%7C%2018-336791?logo=postgresql&logoColor=white)](https://www.postgresql.org/)
[![Self Hosted](https://img.shields.io/badge/self--hosted-yes-brightgreen)](https://github.com/RostislavDugin/postgresus)
[![Open Source](https://img.shields.io/badge/open%20source-❤️-red)](https://github.com/RostislavDugin/postgresus)
@@ -62,7 +62,7 @@
- **Docker-based**: Easy deployment and management
- **Privacy-first**: All your data stays on your infrastructure
- **Open source**: MIT licensed, inspect every line of code
- **Open source**: Apache 2.0 licensed, inspect every line of code
### 📦 Installation
@@ -161,7 +161,7 @@ docker exec -it postgresus ./main --new-password="YourNewSecurePassword123"
## 📝 License
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
This project is licensed under the Apache 2.0 License - see the [LICENSE](LICENSE) file for details.
---

View File

@@ -1,14 +1,152 @@
---
description:
globs:
description:
globs:
alwaysApply: true
---
Always place private methods to the bottom of file
Code should look like:
**This rule applies to ALL Go files including tests, services, controllers, repositories, etc.**
type SomeService struct {
func PublicMethod(...) ...
In Go, exported (public) functions/methods start with uppercase letters, while unexported (private) ones start with lowercase letters.
func privateMethod(...) ...
}
## Structure Order:
1. Type definitions and constants
2. Public methods/functions (uppercase)
3. Private methods/functions (lowercase)
## Examples:
### Service with methods:
```go
type UserService struct {
repository *UserRepository
}
// Public methods first
func (s *UserService) CreateUser(user *User) error {
if err := s.validateUser(user); err != nil {
return err
}
return s.repository.Save(user)
}
func (s *UserService) GetUser(id uuid.UUID) (*User, error) {
return s.repository.FindByID(id)
}
// Private methods at the bottom
func (s *UserService) validateUser(user *User) error {
if user.Name == "" {
return errors.New("name is required")
}
return nil
}
```
### Package-level functions:
```go
package utils
// Public functions first
func ProcessData(data []byte) (Result, error) {
cleaned := sanitizeInput(data)
return parseData(cleaned)
}
func ValidateInput(input string) bool {
return isValidFormat(input) && checkLength(input)
}
// Private functions at the bottom
func sanitizeInput(data []byte) []byte {
// implementation
}
func parseData(data []byte) (Result, error) {
// implementation
}
func isValidFormat(input string) bool {
// implementation
}
func checkLength(input string) bool {
// implementation
}
```
### Test files:
```go
package user_test
// Public test functions first
func Test_CreateUser_ValidInput_UserCreated(t *testing.T) {
user := createTestUser()
result, err := service.CreateUser(user)
assert.NoError(t, err)
assert.NotNil(t, result)
}
func Test_GetUser_ExistingUser_ReturnsUser(t *testing.T) {
user := createTestUser()
// test implementation
}
// Private helper functions at the bottom
func createTestUser() *User {
return &User{
Name: "Test User",
Email: "test@example.com",
}
}
func setupTestDatabase() *Database {
// setup implementation
}
```
### Controller example:
```go
type ProjectController struct {
service *ProjectService
}
// Public HTTP handlers first
func (c *ProjectController) CreateProject(ctx *gin.Context) {
var request CreateProjectRequest
if err := ctx.ShouldBindJSON(&request); err != nil {
c.handleError(ctx, err)
return
}
// handler logic
}
func (c *ProjectController) GetProject(ctx *gin.Context) {
projectID := c.extractProjectID(ctx)
// handler logic
}
// Private helper methods at the bottom
func (c *ProjectController) handleError(ctx *gin.Context, err error) {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
}
func (c *ProjectController) extractProjectID(ctx *gin.Context) uuid.UUID {
return uuid.MustParse(ctx.Param("projectId"))
}
```
## Key Points:
- **Exported/Public** = starts with uppercase letter (CreateUser, GetProject)
- **Unexported/Private** = starts with lowercase letter (validateUser, handleError)
- This improves code readability by showing the public API first
- Private helpers are implementation details, so they go at the bottom
- Apply this rule consistently across ALL Go files in the project

View File

@@ -1,7 +1,45 @@
---
description:
globs:
description:
globs:
alwaysApply: true
---
Do not write obvious comments.
Write meaningful code, give meaningful names
## Comment Guidelines
1. **No obvious comments** - Don't state what the code already clearly shows
2. **Functions and variables should have meaningful names** - Code should be self-documenting
3. **Comments for unclear code only** - Only add comments when code logic isn't immediately clear
## Key Principles:
- **Code should tell a story** - Use descriptive variable and function names
- **Comments explain WHY, not WHAT** - The code shows what happens, comments explain business logic or complex decisions
- **Prefer refactoring over commenting** - If code needs explaining, consider making it clearer instead
- **API documentation is required** - Swagger comments for all HTTP endpoints are mandatory
- **Complex algorithms deserve comments** - Mathematical formulas, business rules, or non-obvious optimizations
Example of useless comment:
1.
```sql
// Create projects table
CREATE TABLE projects (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
```
2.
```go
// Create test project
project := CreateTestProject(projectName, user, router)
```
3.
```go
// CreateValidLogItems creates valid log items for testing
func CreateValidLogItems(count int, uniqueID string) []logs_receiving.LogItemRequestDTO {
```

View File

@@ -14,42 +14,114 @@ func (c *TasksController) GetAvailableTasks(ctx *gin.Context) ...
3. We document all routes with Swagger in the following format:
// SignIn
// @Summary Authenticate a user
// @Description Authenticate a user with email and password
// @Tags users
// @Accept json
// @Produce json
// @Param request body SignInRequest true "User signin data"
// @Success 200 {object} SignInResponse
// @Failure 400
// @Router /users/signin [post]
package audit_logs
Do not forget to write docs.
You can avoid description if it is useless.
Specify particular acceping \ producing models
import (
"net/http"
4. All controllers should have RegisterRoutes method which receives
RouterGroup (always put this routes on the top of file under controller definition)
user_models "logbull/internal/features/users/models"
Example:
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
func (c *OrderController) RegisterRoutes(router *gin.RouterGroup) {
router.POST("/bots/users/orders/generate", c.GenerateOrder)
router.POST("/bots/users/orders/generate-by-admin", c.GenerateOrderByAdmin)
router.GET("/bots/users/orders/mark-as-paid-by-admin", c.MarkOrderAsPaidByAdmin)
router.GET("/bots/users/orders/payments-by-bot", c.GetOrderPaymentsByBot)
router.GET("/bots/users/orders/payments-by-user", c.GetOrderPaymentsByUser)
router.GET("/bots/users/orders/orders-by-user-for-admin", c.GetOrdersByUserForAdmin)
router.POST("/bots/users/orders/orders-by-user-for-user", c.GetOrdersByUserForUser)
router.POST("/bots/users/orders/order", c.GetOrder)
router.POST("/bots/users/orders/cancel-subscription-by-user", c.CancelSubscriptionByUser)
router.GET("/bots/users/orders/cancel-subscription-by-admin", c.CancelSubscriptionByAdmin)
router.GET(
"/bots/users/orders/cancel-subscriptions-by-payment-option",
c.CancelSubscriptionsByPaymentOption,
)
type AuditLogController struct {
auditLogService *AuditLogService
}
5. Check that use use valid .Query("param") and .Param("param") methods.
If route does not have param - use .Query("query")
func (c *AuditLogController) RegisterRoutes(router *gin.RouterGroup) {
// All audit log endpoints require authentication (handled in main.go)
auditRoutes := router.Group("/audit-logs")
auditRoutes.GET("/global", c.GetGlobalAuditLogs)
auditRoutes.GET("/users/:userId", c.GetUserAuditLogs)
}
// GetGlobalAuditLogs
// @Summary Get global audit logs (ADMIN only)
// @Description Retrieve all audit logs across the system
// @Tags audit-logs
// @Accept json
// @Produce json
// @Security BearerAuth
// @Param limit query int false "Limit number of results" default(100)
// @Param offset query int false "Offset for pagination" default(0)
// @Param beforeDate query string false "Filter logs created before this date (RFC3339 format)" format(date-time)
// @Success 200 {object} GetAuditLogsResponse
// @Failure 401 {object} map[string]string
// @Failure 403 {object} map[string]string
// @Router /audit-logs/global [get]
func (c *AuditLogController) GetGlobalAuditLogs(ctx *gin.Context) {
user, isOk := ctx.MustGet("user").(*user_models.User)
if !isOk {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": "Invalid user type in context"})
return
}
request := &GetAuditLogsRequest{}
if err := ctx.ShouldBindQuery(request); err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "Invalid query parameters"})
return
}
response, err := c.auditLogService.GetGlobalAuditLogs(user, request)
if err != nil {
if err.Error() == "only administrators can view global audit logs" {
ctx.JSON(http.StatusForbidden, gin.H{"error": err.Error()})
return
}
ctx.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve audit logs"})
return
}
ctx.JSON(http.StatusOK, response)
}
// GetUserAuditLogs
// @Summary Get user audit logs
// @Description Retrieve audit logs for a specific user
// @Tags audit-logs
// @Accept json
// @Produce json
// @Security BearerAuth
// @Param userId path string true "User ID"
// @Param limit query int false "Limit number of results" default(100)
// @Param offset query int false "Offset for pagination" default(0)
// @Param beforeDate query string false "Filter logs created before this date (RFC3339 format)" format(date-time)
// @Success 200 {object} GetAuditLogsResponse
// @Failure 400 {object} map[string]string
// @Failure 401 {object} map[string]string
// @Failure 403 {object} map[string]string
// @Router /audit-logs/users/{userId} [get]
func (c *AuditLogController) GetUserAuditLogs(ctx *gin.Context) {
user, isOk := ctx.MustGet("user").(*user_models.User)
if !isOk {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": "Invalid user type in context"})
return
}
userIDStr := ctx.Param("userId")
targetUserID, err := uuid.Parse(userIDStr)
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "Invalid user ID"})
return
}
request := &GetAuditLogsRequest{}
if err := ctx.ShouldBindQuery(request); err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "Invalid query parameters"})
return
}
response, err := c.auditLogService.GetUserAuditLogs(targetUserID, user, request)
if err != nil {
if err.Error() == "insufficient permissions to view user audit logs" {
ctx.JSON(http.StatusForbidden, gin.H{"error": err.Error()})
return
}
ctx.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve audit logs"})
return
}
ctx.JSON(http.StatusOK, response)
}

View File

@@ -0,0 +1,655 @@
---
alwaysApply: false
---
This is example of CRUD:
------ backend/internal/features/audit_logs/controller.go ------
``````
package audit_logs
import (
"net/http"
user_models "logbull/internal/features/users/models"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
type AuditLogController struct {
auditLogService *AuditLogService
}
func (c *AuditLogController) RegisterRoutes(router *gin.RouterGroup) {
// All audit log endpoints require authentication (handled in main.go)
auditRoutes := router.Group("/audit-logs")
auditRoutes.GET("/global", c.GetGlobalAuditLogs)
auditRoutes.GET("/users/:userId", c.GetUserAuditLogs)
}
// GetGlobalAuditLogs
// @Summary Get global audit logs (ADMIN only)
// @Description Retrieve all audit logs across the system
// @Tags audit-logs
// @Accept json
// @Produce json
// @Security BearerAuth
// @Param limit query int false "Limit number of results" default(100)
// @Param offset query int false "Offset for pagination" default(0)
// @Param beforeDate query string false "Filter logs created before this date (RFC3339 format)" format(date-time)
// @Success 200 {object} GetAuditLogsResponse
// @Failure 401 {object} map[string]string
// @Failure 403 {object} map[string]string
// @Router /audit-logs/global [get]
func (c *AuditLogController) GetGlobalAuditLogs(ctx *gin.Context) {
user, isOk := ctx.MustGet("user").(*user_models.User)
if !isOk {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": "Invalid user type in context"})
return
}
request := &GetAuditLogsRequest{}
if err := ctx.ShouldBindQuery(request); err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "Invalid query parameters"})
return
}
response, err := c.auditLogService.GetGlobalAuditLogs(user, request)
if err != nil {
if err.Error() == "only administrators can view global audit logs" {
ctx.JSON(http.StatusForbidden, gin.H{"error": err.Error()})
return
}
ctx.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve audit logs"})
return
}
ctx.JSON(http.StatusOK, response)
}
// GetUserAuditLogs
// @Summary Get user audit logs
// @Description Retrieve audit logs for a specific user
// @Tags audit-logs
// @Accept json
// @Produce json
// @Security BearerAuth
// @Param userId path string true "User ID"
// @Param limit query int false "Limit number of results" default(100)
// @Param offset query int false "Offset for pagination" default(0)
// @Param beforeDate query string false "Filter logs created before this date (RFC3339 format)" format(date-time)
// @Success 200 {object} GetAuditLogsResponse
// @Failure 400 {object} map[string]string
// @Failure 401 {object} map[string]string
// @Failure 403 {object} map[string]string
// @Router /audit-logs/users/{userId} [get]
func (c *AuditLogController) GetUserAuditLogs(ctx *gin.Context) {
user, isOk := ctx.MustGet("user").(*user_models.User)
if !isOk {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": "Invalid user type in context"})
return
}
userIDStr := ctx.Param("userId")
targetUserID, err := uuid.Parse(userIDStr)
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "Invalid user ID"})
return
}
request := &GetAuditLogsRequest{}
if err := ctx.ShouldBindQuery(request); err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "Invalid query parameters"})
return
}
response, err := c.auditLogService.GetUserAuditLogs(targetUserID, user, request)
if err != nil {
if err.Error() == "insufficient permissions to view user audit logs" {
ctx.JSON(http.StatusForbidden, gin.H{"error": err.Error()})
return
}
ctx.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve audit logs"})
return
}
ctx.JSON(http.StatusOK, response)
}
``````
------ backend/internal/features/audit_logs/controller_test.go ------
``````
package audit_logs
import (
"fmt"
"net/http"
"testing"
"time"
user_enums "logbull/internal/features/users/enums"
users_middleware "logbull/internal/features/users/middleware"
users_services "logbull/internal/features/users/services"
users_testing "logbull/internal/features/users/testing"
"logbull/internal/storage"
test_utils "logbull/internal/util/testing"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
func Test_GetGlobalAuditLogs_AdminSucceedsAndMemberGetsForbidden(t *testing.T) {
adminUser := users_testing.CreateTestUser(user_enums.UserRoleAdmin)
memberUser := users_testing.CreateTestUser(user_enums.UserRoleMember)
router := createRouter()
service := GetAuditLogService()
projectID := uuid.New()
// Create test logs
createAuditLog(service, "Test log with user", &adminUser.UserID, nil)
createAuditLog(service, "Test log with project", nil, &projectID)
createAuditLog(service, "Test log standalone", nil, nil)
// Test ADMIN can access global logs
var response GetAuditLogsResponse
test_utils.MakeGetRequestAndUnmarshal(t, router,
"/api/v1/audit-logs/global?limit=10", "Bearer "+adminUser.Token, http.StatusOK, &response)
assert.GreaterOrEqual(t, len(response.AuditLogs), 3)
assert.GreaterOrEqual(t, response.Total, int64(3))
messages := extractMessages(response.AuditLogs)
assert.Contains(t, messages, "Test log with user")
assert.Contains(t, messages, "Test log with project")
assert.Contains(t, messages, "Test log standalone")
// Test MEMBER cannot access global logs
resp := test_utils.MakeGetRequest(t, router, "/api/v1/audit-logs/global",
"Bearer "+memberUser.Token, http.StatusForbidden)
assert.Contains(t, string(resp.Body), "only administrators can view global audit logs")
}
func Test_GetUserAuditLogs_PermissionsEnforcedCorrectly(t *testing.T) {
adminUser := users_testing.CreateTestUser(user_enums.UserRoleAdmin)
user1 := users_testing.CreateTestUser(user_enums.UserRoleMember)
user2 := users_testing.CreateTestUser(user_enums.UserRoleMember)
router := createRouter()
service := GetAuditLogService()
projectID := uuid.New()
// Create test logs for different users
createAuditLog(service, "Test log user1 first", &user1.UserID, nil)
createAuditLog(service, "Test log user1 second", &user1.UserID, &projectID)
createAuditLog(service, "Test log user2 first", &user2.UserID, nil)
createAuditLog(service, "Test log user2 second", &user2.UserID, &projectID)
createAuditLog(service, "Test project log", nil, &projectID)
// Test ADMIN can view any user's logs
var user1Response GetAuditLogsResponse
test_utils.MakeGetRequestAndUnmarshal(t, router,
fmt.Sprintf("/api/v1/audit-logs/users/%s?limit=10", user1.UserID.String()),
"Bearer "+adminUser.Token, http.StatusOK, &user1Response)
assert.Equal(t, 2, len(user1Response.AuditLogs))
messages := extractMessages(user1Response.AuditLogs)
assert.Contains(t, messages, "Test log user1 first")
assert.Contains(t, messages, "Test log user1 second")
// Test user can view own logs
var ownLogsResponse GetAuditLogsResponse
test_utils.MakeGetRequestAndUnmarshal(t, router,
fmt.Sprintf("/api/v1/audit-logs/users/%s", user2.UserID.String()),
"Bearer "+user2.Token, http.StatusOK, &ownLogsResponse)
assert.Equal(t, 2, len(ownLogsResponse.AuditLogs))
// Test user cannot view other user's logs
resp := test_utils.MakeGetRequest(t, router,
fmt.Sprintf("/api/v1/audit-logs/users/%s", user1.UserID.String()),
"Bearer "+user2.Token, http.StatusForbidden)
assert.Contains(t, string(resp.Body), "insufficient permissions")
}
func Test_FilterAuditLogsByTime_ReturnsOnlyLogsBeforeDate(t *testing.T) {
adminUser := users_testing.CreateTestUser(user_enums.UserRoleAdmin)
router := createRouter()
service := GetAuditLogService()
db := storage.GetDb()
baseTime := time.Now().UTC()
// Create logs with different timestamps
createTimedLog(db, &adminUser.UserID, "Test old log", baseTime.Add(-2*time.Hour))
createTimedLog(db, &adminUser.UserID, "Test recent log", baseTime.Add(-30*time.Minute))
createAuditLog(service, "Test current log", &adminUser.UserID, nil)
// Test filtering - get logs before 1 hour ago
beforeTime := baseTime.Add(-1 * time.Hour)
var filteredResponse GetAuditLogsResponse
test_utils.MakeGetRequestAndUnmarshal(t, router,
fmt.Sprintf("/api/v1/audit-logs/global?beforeDate=%s", beforeTime.Format(time.RFC3339)),
"Bearer "+adminUser.Token, http.StatusOK, &filteredResponse)
// Verify only old log is returned
messages := extractMessages(filteredResponse.AuditLogs)
assert.Contains(t, messages, "Test old log")
assert.NotContains(t, messages, "Test recent log")
assert.NotContains(t, messages, "Test current log")
// Test without filter - should get all logs
var allResponse GetAuditLogsResponse
test_utils.MakeGetRequestAndUnmarshal(t, router, "/api/v1/audit-logs/global",
"Bearer "+adminUser.Token, http.StatusOK, &allResponse)
assert.GreaterOrEqual(t, len(allResponse.AuditLogs), 3)
}
func createRouter() *gin.Engine {
gin.SetMode(gin.TestMode)
router := gin.New()
SetupDependencies()
v1 := router.Group("/api/v1")
protected := v1.Group("").Use(users_middleware.AuthMiddleware(users_services.GetUserService()))
GetAuditLogController().RegisterRoutes(protected.(*gin.RouterGroup))
return router
}
``````
------ backend/internal/features/audit_logs/di.go ------
``````
package audit_logs
import (
users_services "logbull/internal/features/users/services"
"logbull/internal/util/logger"
)
var auditLogRepository = &AuditLogRepository{}
var auditLogService = &AuditLogService{
auditLogRepository: auditLogRepository,
logger: logger.GetLogger(),
}
var auditLogController = &AuditLogController{
auditLogService: auditLogService,
}
func GetAuditLogService() *AuditLogService {
return auditLogService
}
func GetAuditLogController() *AuditLogController {
return auditLogController
}
func SetupDependencies() {
users_services.GetUserService().SetAuditLogWriter(auditLogService)
users_services.GetSettingsService().SetAuditLogWriter(auditLogService)
users_services.GetManagementService().SetAuditLogWriter(auditLogService)
}
``````
------ backend/internal/features/audit_logs/dto.go ------
``````
package audit_logs
import "time"
type GetAuditLogsRequest struct {
Limit int `form:"limit" json:"limit"`
Offset int `form:"offset" json:"offset"`
BeforeDate *time.Time `form:"beforeDate" json:"beforeDate"`
}
type GetAuditLogsResponse struct {
AuditLogs []*AuditLog `json:"auditLogs"`
Total int64 `json:"total"`
Limit int `json:"limit"`
Offset int `json:"offset"`
}
``````
------ backend/internal/features/audit_logs/models.go ------
``````
package audit_logs
import (
"time"
"github.com/google/uuid"
)
type AuditLog struct {
ID uuid.UUID `json:"id" gorm:"column:id"`
UserID *uuid.UUID `json:"userId" gorm:"column:user_id"`
ProjectID *uuid.UUID `json:"projectId" gorm:"column:project_id"`
Message string `json:"message" gorm:"column:message"`
CreatedAt time.Time `json:"createdAt" gorm:"column:created_at"`
}
func (AuditLog) TableName() string {
return "audit_logs"
}
``````
------ backend/internal/features/audit_logs/repository.go ------
``````
package audit_logs
import (
"logbull/internal/storage"
"time"
"github.com/google/uuid"
)
type AuditLogRepository struct{}
func (r *AuditLogRepository) Create(auditLog *AuditLog) error {
if auditLog.ID == uuid.Nil {
auditLog.ID = uuid.New()
}
return storage.GetDb().Create(auditLog).Error
}
func (r *AuditLogRepository) GetGlobal(limit, offset int, beforeDate *time.Time) ([]*AuditLog, error) {
var auditLogs []*AuditLog
query := storage.GetDb().Order("created_at DESC")
if beforeDate != nil {
query = query.Where("created_at < ?", *beforeDate)
}
err := query.
Limit(limit).
Offset(offset).
Find(&auditLogs).Error
return auditLogs, err
}
func (r *AuditLogRepository) GetByUser(
userID uuid.UUID,
limit, offset int,
beforeDate *time.Time,
) ([]*AuditLog, error) {
var auditLogs []*AuditLog
query := storage.GetDb().
Where("user_id = ?", userID).
Order("created_at DESC")
if beforeDate != nil {
query = query.Where("created_at < ?", *beforeDate)
}
err := query.
Limit(limit).
Offset(offset).
Find(&auditLogs).Error
return auditLogs, err
}
func (r *AuditLogRepository) GetByProject(
projectID uuid.UUID,
limit, offset int,
beforeDate *time.Time,
) ([]*AuditLog, error) {
var auditLogs []*AuditLog
query := storage.GetDb().
Where("project_id = ?", projectID).
Order("created_at DESC")
if beforeDate != nil {
query = query.Where("created_at < ?", *beforeDate)
}
err := query.
Limit(limit).
Offset(offset).
Find(&auditLogs).Error
return auditLogs, err
}
func (r *AuditLogRepository) CountGlobal(beforeDate *time.Time) (int64, error) {
var count int64
query := storage.GetDb().Model(&AuditLog{})
if beforeDate != nil {
query = query.Where("created_at < ?", *beforeDate)
}
err := query.Count(&count).Error
return count, err
}
``````
------ backend/internal/features/audit_logs/service.go ------
``````
package audit_logs
import (
"errors"
"log/slog"
"time"
user_enums "logbull/internal/features/users/enums"
user_models "logbull/internal/features/users/models"
"github.com/google/uuid"
)
type AuditLogService struct {
auditLogRepository *AuditLogRepository
logger *slog.Logger
}
func (s *AuditLogService) WriteAuditLog(
message string,
userID *uuid.UUID,
projectID *uuid.UUID,
) {
auditLog := &AuditLog{
UserID: userID,
ProjectID: projectID,
Message: message,
CreatedAt: time.Now().UTC(),
}
err := s.auditLogRepository.Create(auditLog)
if err != nil {
s.logger.Error("failed to create audit log", "error", err)
return
}
}
func (s *AuditLogService) CreateAuditLog(auditLog *AuditLog) error {
return s.auditLogRepository.Create(auditLog)
}
func (s *AuditLogService) GetGlobalAuditLogs(
user *user_models.User,
request *GetAuditLogsRequest,
) (*GetAuditLogsResponse, error) {
if user.Role != user_enums.UserRoleAdmin {
return nil, errors.New("only administrators can view global audit logs")
}
limit := request.Limit
if limit <= 0 || limit > 1000 {
limit = 100
}
offset := max(request.Offset, 0)
auditLogs, err := s.auditLogRepository.GetGlobal(limit, offset, request.BeforeDate)
if err != nil {
return nil, err
}
total, err := s.auditLogRepository.CountGlobal(request.BeforeDate)
if err != nil {
return nil, err
}
return &GetAuditLogsResponse{
AuditLogs: auditLogs,
Total: total,
Limit: limit,
Offset: offset,
}, nil
}
func (s *AuditLogService) GetUserAuditLogs(
targetUserID uuid.UUID,
user *user_models.User,
request *GetAuditLogsRequest,
) (*GetAuditLogsResponse, error) {
// Users can view their own logs, ADMIN can view any user's logs
if user.Role != user_enums.UserRoleAdmin && user.ID != targetUserID {
return nil, errors.New("insufficient permissions to view user audit logs")
}
limit := request.Limit
if limit <= 0 || limit > 1000 {
limit = 100
}
offset := max(request.Offset, 0)
auditLogs, err := s.auditLogRepository.GetByUser(targetUserID, limit, offset, request.BeforeDate)
if err != nil {
return nil, err
}
return &GetAuditLogsResponse{
AuditLogs: auditLogs,
Total: int64(len(auditLogs)),
Limit: limit,
Offset: offset,
}, nil
}
func (s *AuditLogService) GetProjectAuditLogs(
projectID uuid.UUID,
request *GetAuditLogsRequest,
) (*GetAuditLogsResponse, error) {
limit := request.Limit
if limit <= 0 || limit > 1000 {
limit = 100
}
offset := max(request.Offset, 0)
auditLogs, err := s.auditLogRepository.GetByProject(projectID, limit, offset, request.BeforeDate)
if err != nil {
return nil, err
}
return &GetAuditLogsResponse{
AuditLogs: auditLogs,
Total: int64(len(auditLogs)),
Limit: limit,
Offset: offset,
}, nil
}
``````
------ backend/internal/features/audit_logs/service_test.go ------
``````
package audit_logs
import (
"testing"
"time"
user_enums "logbull/internal/features/users/enums"
users_testing "logbull/internal/features/users/testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
)
func Test_AuditLogs_ProjectSpecificLogs(t *testing.T) {
service := GetAuditLogService()
user1 := users_testing.CreateTestUser(user_enums.UserRoleMember)
user2 := users_testing.CreateTestUser(user_enums.UserRoleMember)
project1ID, project2ID := uuid.New(), uuid.New()
// Create test logs for projects
createAuditLog(service, "Test project1 log first", &user1.UserID, &project1ID)
createAuditLog(service, "Test project1 log second", &user2.UserID, &project1ID)
createAuditLog(service, "Test project2 log first", &user1.UserID, &project2ID)
createAuditLog(service, "Test project2 log second", &user2.UserID, &project2ID)
createAuditLog(service, "Test no project log", &user1.UserID, nil)
request := &GetAuditLogsRequest{Limit: 10, Offset: 0}
// Test project 1 logs
project1Response, err := service.GetProjectAuditLogs(project1ID, request)
assert.NoError(t, err)
assert.Equal(t, 2, len(project1Response.AuditLogs))
messages := extractMessages(project1Response.AuditLogs)
assert.Contains(t, messages, "Test project1 log first")
assert.Contains(t, messages, "Test project1 log second")
for _, log := range project1Response.AuditLogs {
assert.Equal(t, &project1ID, log.ProjectID)
}
// Test project 2 logs
project2Response, err := service.GetProjectAuditLogs(project2ID, request)
assert.NoError(t, err)
assert.Equal(t, 2, len(project2Response.AuditLogs))
messages2 := extractMessages(project2Response.AuditLogs)
assert.Contains(t, messages2, "Test project2 log first")
assert.Contains(t, messages2, "Test project2 log second")
// Test pagination
limitedResponse, err := service.GetProjectAuditLogs(project1ID,
&GetAuditLogsRequest{Limit: 1, Offset: 0})
assert.NoError(t, err)
assert.Equal(t, 1, len(limitedResponse.AuditLogs))
assert.Equal(t, 1, limitedResponse.Limit)
// Test beforeDate filter
beforeTime := time.Now().UTC().Add(-1 * time.Minute)
filteredResponse, err := service.GetProjectAuditLogs(project1ID,
&GetAuditLogsRequest{Limit: 10, BeforeDate: &beforeTime})
assert.NoError(t, err)
for _, log := range filteredResponse.AuditLogs {
assert.True(t, log.CreatedAt.Before(beforeTime))
}
}
func createAuditLog(service *AuditLogService, message string, userID, projectID *uuid.UUID) {
service.WriteAuditLog(message, userID, projectID)
}
func extractMessages(logs []*AuditLog) []string {
messages := make([]string, len(logs))
for i, log := range logs {
messages[i] = log.Message
}
return messages
}
func createTimedLog(db *gorm.DB, userID *uuid.UUID, message string, createdAt time.Time) {
log := &AuditLog{
ID: uuid.New(),
UserID: userID,
Message: message,
CreatedAt: createdAt,
}
db.Create(log)
}
``````

View File

@@ -0,0 +1,12 @@
---
description:
globs:
alwaysApply: true
---
When applying changes, do not forget to refactor old code.
You can shortify, make more readable, improve code quality, etc.
Common logic can be extracted to functions, constants, files, etc.
After each large change with more than ~50-100 lines of code - always run `make lint` (from backend root folder).

View File

@@ -1,12 +1,147 @@
---
description:
globs:
description:
globs:
alwaysApply: true
---
Write tests names in the format:
Test_WhatWeDo_WhatWeExpect
After writing tests, always launch them and verify that they pass.
Example:
- Test_TestConnection_ConnectionSucceeds
- Test_SaveNewStorage_StorageReturnedViaGet
## Test Naming Format
Use these naming patterns:
- `Test_WhatWeDo_WhatWeExpect`
- `Test_WhatWeDo_WhichConditions_WhatWeExpect`
## Examples from Real Codebase:
- `Test_CreateApiKey_WhenUserIsProjectOwner_ApiKeyCreated`
- `Test_UpdateProject_WhenUserIsProjectAdmin_ProjectUpdated`
- `Test_DeleteApiKey_WhenUserIsProjectMember_ReturnsForbidden`
- `Test_GetProjectAuditLogs_WithDifferentUserRoles_EnforcesPermissionsCorrectly`
- `Test_ProjectLifecycleE2E_CompletesSuccessfully`
## Testing Philosophy
**Prefer Controllers Over Unit Tests:**
- Test through HTTP endpoints via controllers whenever possible
- Avoid testing repositories, services in isolation - test via API instead
- Only use unit tests for complex model logic when no API exists
- Name test files `controller_test.go` or `service_test.go`, not `integration_test.go`
**Extract Common Logic to Testing Utilities:**
- Create `testing.go` or `testing/testing.go` files for shared test utilities
- Extract router creation, user setup, models creation helpers (in API, not just structs creation)
- Reuse common patterns across different test files
**Refactor Existing Tests:**
- When working with existing tests, always look for opportunities to refactor and improve
- Extract repetitive setup code to common utilities
- Simplify complex tests by breaking them into smaller, focused tests
- Replace inline test data creation with reusable helper functions
- Consolidate similar test patterns across different test files
- Make tests more readable and maintainable for other developers
## Testing Utilities Structure
**Create `testing.go` or `testing/testing.go` files with common utilities:**
```go
package projects_testing
// CreateTestRouter creates unified router for all controllers
func CreateTestRouter(controllers ...ControllerInterface) *gin.Engine {
gin.SetMode(gin.TestMode)
router := gin.New()
v1 := router.Group("/api/v1")
protected := v1.Group("").Use(users_middleware.AuthMiddleware(users_services.GetUserService()))
for _, controller := range controllers {
if routerGroup, ok := protected.(*gin.RouterGroup); ok {
controller.RegisterRoutes(routerGroup)
}
}
return router
}
// CreateTestProjectViaAPI creates project through HTTP API
func CreateTestProjectViaAPI(name string, owner *users_dto.SignInResponseDTO, router *gin.Engine) (*projects_models.Project, string) {
request := projects_dto.CreateProjectRequestDTO{Name: name}
w := MakeAPIRequest(router, "POST", "/api/v1/projects", "Bearer "+owner.Token, request)
// Handle response...
return project, owner.Token
}
// AddMemberToProject adds member via API call
func AddMemberToProject(project *projects_models.Project, member *users_dto.SignInResponseDTO, role users_enums.ProjectRole, ownerToken string, router *gin.Engine) {
// Implementation...
}
```
## Controller Test Examples
**Permission-based testing:**
```go
func Test_CreateApiKey_WhenUserIsProjectOwner_ApiKeyCreated(t *testing.T) {
router := CreateApiKeyTestRouter(GetProjectController(), GetMembershipController())
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
project, _ := projects_testing.CreateTestProjectViaAPI("Test Project", owner, router)
request := CreateApiKeyRequestDTO{Name: "Test API Key"}
var response ApiKey
test_utils.MakePostRequestAndUnmarshal(t, router, "/api/v1/projects/api-keys/"+project.ID.String(), "Bearer "+owner.Token, request, http.StatusOK, &response)
assert.Equal(t, "Test API Key", response.Name)
assert.NotEmpty(t, response.Token)
}
```
**Cross-project security testing:**
```go
func Test_UpdateApiKey_WithApiKeyFromDifferentProject_ReturnsBadRequest(t *testing.T) {
router := CreateApiKeyTestRouter(GetProjectController(), GetMembershipController())
owner1 := users_testing.CreateTestUser(users_enums.UserRoleMember)
owner2 := users_testing.CreateTestUser(users_enums.UserRoleMember)
project1, _ := projects_testing.CreateTestProjectViaAPI("Project 1", owner1, router)
project2, _ := projects_testing.CreateTestProjectViaAPI("Project 2", owner2, router)
apiKey := CreateTestApiKey("Cross Project Key", project1.ID, owner1.Token, router)
// Try to update via different project endpoint
request := UpdateApiKeyRequestDTO{Name: &"Hacked Key"}
resp := test_utils.MakePutRequest(t, router, "/api/v1/projects/api-keys/"+project2.ID.String()+"/"+apiKey.ID.String(), "Bearer "+owner2.Token, request, http.StatusBadRequest)
assert.Contains(t, string(resp.Body), "API key does not belong to this project")
}
```
**E2E lifecycle testing:**
```go
func Test_ProjectLifecycleE2E_CompletesSuccessfully(t *testing.T) {
router := projects_testing.CreateTestRouter(GetProjectController(), GetMembershipController())
// 1. Create project
owner := users_testing.CreateTestUser(users_enums.UserRoleMember)
project := projects_testing.CreateTestProject("E2E Project", owner, router)
// 2. Add member
member := users_testing.CreateTestUser(users_enums.UserRoleMember)
projects_testing.AddMemberToProject(project, member, users_enums.ProjectRoleMember, owner.Token, router)
// 3. Promote to admin
projects_testing.ChangeMemberRole(project, member.UserID, users_enums.ProjectRoleAdmin, owner.Token, router)
// 4. Transfer ownership
projects_testing.TransferProjectOwnership(project, member.UserID, owner.Token, router)
// 5. Verify new owner can manage project
finalProject := projects_testing.GetProject(project.ID, member.Token, router)
assert.Equal(t, project.ID, finalProject.ID)
}
```

View File

@@ -2,7 +2,7 @@ run:
go run cmd/main.go
test:
go test -count=1 ./internal/...
go test -p=1 -count=1 -failfast .\internal\...
lint:
golangci-lint fmt && golangci-lint run

View File

@@ -20,8 +20,6 @@ import (
"postgresus-backend/internal/features/disk"
healthcheck_attempt "postgresus-backend/internal/features/healthcheck/attempt"
healthcheck_config "postgresus-backend/internal/features/healthcheck/config"
postgres_monitoring_metrics "postgresus-backend/internal/features/monitoring/postgres/metrics"
postgres_monitoring_settings "postgresus-backend/internal/features/monitoring/postgres/settings"
"postgresus-backend/internal/features/notifiers"
"postgresus-backend/internal/features/restores"
"postgresus-backend/internal/features/storages"
@@ -160,8 +158,6 @@ func setUpRoutes(r *gin.Engine) {
healthcheckAttemptController := healthcheck_attempt.GetHealthcheckAttemptController()
diskController := disk.GetDiskController()
backupConfigController := backups_config.GetBackupConfigController()
postgresMonitoringSettingsController := postgres_monitoring_settings.GetPostgresMonitoringSettingsController()
postgresMonitoringMetricsController := postgres_monitoring_metrics.GetPostgresMonitoringMetricsController()
downdetectContoller.RegisterRoutes(v1)
userController.RegisterRoutes(v1)
@@ -175,15 +171,12 @@ func setUpRoutes(r *gin.Engine) {
healthcheckConfigController.RegisterRoutes(v1)
healthcheckAttemptController.RegisterRoutes(v1)
backupConfigController.RegisterRoutes(v1)
postgresMonitoringSettingsController.RegisterRoutes(v1)
postgresMonitoringMetricsController.RegisterRoutes(v1)
}
func setUpDependencies() {
backups.SetupDependencies()
restores.SetupDependencies()
healthcheck_config.SetupDependencies()
postgres_monitoring_settings.SetupDependencies()
}
func runBackgroundTasks(log *slog.Logger) {
@@ -205,10 +198,6 @@ func runBackgroundTasks(log *slog.Logger) {
go runWithPanicLogging(log, "healthcheck attempt background service", func() {
healthcheck_attempt.GetHealthcheckAttemptBackgroundService().Run()
})
go runWithPanicLogging(log, "postgres monitoring metrics background service", func() {
postgres_monitoring_metrics.GetPostgresMonitoringMetricsBackgroundService().Run()
})
}
func runWithPanicLogging(log *slog.Logger, serviceName string, fn func()) {

View File

@@ -1,292 +0,0 @@
package postgres_monitoring_collectors
import (
"context"
"fmt"
"log/slog"
"postgresus-backend/internal/config"
"postgresus-backend/internal/features/databases"
"postgresus-backend/internal/features/databases/databases/postgresql"
postgres_monitoring_metrics "postgresus-backend/internal/features/monitoring/postgres/metrics"
postgres_monitoring_settings "postgresus-backend/internal/features/monitoring/postgres/settings"
"sync"
"sync/atomic"
"time"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
)
type DbMonitoringBackgroundService struct {
databaseService *databases.DatabaseService
monitoringSettingsService *postgres_monitoring_settings.PostgresMonitoringSettingsService
metricsService *postgres_monitoring_metrics.PostgresMonitoringMetricService
logger *slog.Logger
isRunning int32
lastRunTimes map[uuid.UUID]time.Time
lastRunTimesMutex sync.RWMutex
}
func (s *DbMonitoringBackgroundService) Run() {
for {
if config.IsShouldShutdown() {
s.logger.Info("stopping background monitoring tasks")
return
}
s.processMonitoringTasks()
time.Sleep(1 * time.Second)
}
}
func (s *DbMonitoringBackgroundService) processMonitoringTasks() {
if !atomic.CompareAndSwapInt32(&s.isRunning, 0, 1) {
s.logger.Warn("skipping background task execution, previous task still running")
return
}
defer atomic.StoreInt32(&s.isRunning, 0)
dbsWithEnabledDbMonitoring, err := s.monitoringSettingsService.GetAllDbsWithEnabledDbMonitoring()
if err != nil {
s.logger.Error("failed to get all databases with enabled db monitoring", "error", err)
return
}
for _, dbSettings := range dbsWithEnabledDbMonitoring {
s.processDatabase(&dbSettings)
}
}
func (s *DbMonitoringBackgroundService) processDatabase(
settings *postgres_monitoring_settings.PostgresMonitoringSettings,
) {
db, err := s.databaseService.GetDatabaseByID(settings.DatabaseID)
if err != nil {
s.logger.Error("failed to get database by id", "error", err)
return
}
if db.Type != databases.DatabaseTypePostgres {
return
}
if !s.isReadyForNextRun(settings) {
return
}
err = s.collectAndSaveMetrics(db, settings)
if err != nil {
s.logger.Error("failed to collect and save metrics", "error", err)
return
}
s.updateLastRunTime(db)
}
func (s *DbMonitoringBackgroundService) collectAndSaveMetrics(
db *databases.Database,
settings *postgres_monitoring_settings.PostgresMonitoringSettings,
) error {
if db.Postgresql == nil {
return nil
}
s.logger.Debug("collecting metrics for database", "database_id", db.ID)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
conn, err := s.connectToDatabase(ctx, db)
if err != nil {
return fmt.Errorf("failed to connect to database: %w", err)
}
if conn == nil {
return nil
}
defer func() {
if closeErr := conn.Close(ctx); closeErr != nil {
s.logger.Error("Failed to close connection", "error", closeErr)
}
}()
var metrics []postgres_monitoring_metrics.PostgresMonitoringMetric
now := time.Now().UTC()
if settings.IsDbResourcesMonitoringEnabled {
dbMetrics, err := s.collectDatabaseResourceMetrics(ctx, conn, db.ID, now)
if err != nil {
s.logger.Error("failed to collect database resource metrics", "error", err)
} else {
metrics = append(metrics, dbMetrics...)
}
}
if len(metrics) > 0 {
if err := s.metricsService.Insert(metrics); err != nil {
return fmt.Errorf("failed to insert metrics: %w", err)
}
s.logger.Debug(
"successfully collected and saved metrics",
"count",
len(metrics),
"database_id",
db.ID,
)
}
return nil
}
func (s *DbMonitoringBackgroundService) isReadyForNextRun(
settings *postgres_monitoring_settings.PostgresMonitoringSettings,
) bool {
s.lastRunTimesMutex.RLock()
defer s.lastRunTimesMutex.RUnlock()
if s.lastRunTimes == nil {
return true
}
lastRun, exists := s.lastRunTimes[settings.DatabaseID]
if !exists {
return true
}
return time.Since(lastRun) >= time.Duration(settings.MonitoringIntervalSeconds)*time.Second
}
func (s *DbMonitoringBackgroundService) updateLastRunTime(db *databases.Database) {
s.lastRunTimesMutex.Lock()
defer s.lastRunTimesMutex.Unlock()
if s.lastRunTimes == nil {
s.lastRunTimes = make(map[uuid.UUID]time.Time)
}
s.lastRunTimes[db.ID] = time.Now().UTC()
}
func (s *DbMonitoringBackgroundService) connectToDatabase(
ctx context.Context,
db *databases.Database,
) (*pgx.Conn, error) {
if db.Postgresql == nil {
return nil, nil
}
if db.Postgresql.Database == nil || *db.Postgresql.Database == "" {
return nil, nil
}
connStr := s.buildConnectionString(db.Postgresql)
return pgx.Connect(ctx, connStr)
}
func (s *DbMonitoringBackgroundService) buildConnectionString(
pg *postgresql.PostgresqlDatabase,
) string {
sslMode := "disable"
if pg.IsHttps {
sslMode = "require"
}
return fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s",
pg.Host,
pg.Port,
pg.Username,
pg.Password,
*pg.Database,
sslMode,
)
}
func (s *DbMonitoringBackgroundService) collectDatabaseResourceMetrics(
ctx context.Context,
conn *pgx.Conn,
databaseID uuid.UUID,
timestamp time.Time,
) ([]postgres_monitoring_metrics.PostgresMonitoringMetric, error) {
var metrics []postgres_monitoring_metrics.PostgresMonitoringMetric
// Collect I/O statistics
ioMetrics, err := s.collectIOMetrics(ctx, conn, databaseID, timestamp)
if err != nil {
s.logger.Warn("failed to collect I/O metrics", "error", err)
} else {
metrics = append(metrics, ioMetrics...)
}
// Collect memory usage (approximation based on buffer usage)
ramMetric, err := s.collectRAMUsageMetric(ctx, conn, databaseID, timestamp)
if err != nil {
s.logger.Warn("failed to collect RAM usage metric", "error", err)
} else {
metrics = append(metrics, ramMetric)
}
return metrics, nil
}
func (s *DbMonitoringBackgroundService) collectIOMetrics(
ctx context.Context,
conn *pgx.Conn,
databaseID uuid.UUID,
timestamp time.Time,
) ([]postgres_monitoring_metrics.PostgresMonitoringMetric, error) {
var blocksRead, blocksHit int64
query := `
SELECT
COALESCE(SUM(blks_read), 0) as total_reads,
COALESCE(SUM(blks_hit), 0) as total_hits
FROM pg_stat_database
WHERE datname = current_database()
`
err := conn.QueryRow(ctx, query).Scan(&blocksRead, &blocksHit)
if err != nil {
return nil, err
}
// Calculate I/O activity as total blocks accessed (PostgreSQL block size is typically 8KB)
const pgBlockSize = 8192 // 8KB
totalIOBytes := float64((blocksRead + blocksHit) * pgBlockSize)
return []postgres_monitoring_metrics.PostgresMonitoringMetric{
{
DatabaseID: databaseID,
Metric: postgres_monitoring_metrics.MetricsTypeDbIO,
ValueType: postgres_monitoring_metrics.MetricsValueTypeByte,
Value: totalIOBytes,
CreatedAt: timestamp,
},
}, nil
}
func (s *DbMonitoringBackgroundService) collectRAMUsageMetric(
ctx context.Context,
conn *pgx.Conn,
databaseID uuid.UUID,
timestamp time.Time,
) (postgres_monitoring_metrics.PostgresMonitoringMetric, error) {
var sharedBuffers int64
query := `
SELECT
COALESCE(SUM(blks_hit), 0) * 8192 as buffer_usage
FROM pg_stat_database
WHERE datname = current_database()
`
err := conn.QueryRow(ctx, query).Scan(&sharedBuffers)
if err != nil {
return postgres_monitoring_metrics.PostgresMonitoringMetric{}, err
}
return postgres_monitoring_metrics.PostgresMonitoringMetric{
DatabaseID: databaseID,
Metric: postgres_monitoring_metrics.MetricsTypeDbRAM,
ValueType: postgres_monitoring_metrics.MetricsValueTypeByte,
Value: float64(sharedBuffers),
CreatedAt: timestamp,
}, nil
}

View File

@@ -1,23 +0,0 @@
package postgres_monitoring_collectors
import (
"postgresus-backend/internal/features/databases"
postgres_monitoring_metrics "postgresus-backend/internal/features/monitoring/postgres/metrics"
postgres_monitoring_settings "postgresus-backend/internal/features/monitoring/postgres/settings"
"postgresus-backend/internal/util/logger"
"sync"
)
var dbMonitoringBackgroundService = &DbMonitoringBackgroundService{
databases.GetDatabaseService(),
postgres_monitoring_settings.GetPostgresMonitoringSettingsService(),
postgres_monitoring_metrics.GetPostgresMonitoringMetricsService(),
logger.GetLogger(),
0,
nil,
sync.RWMutex{},
}
func GetDbMonitoringBackgroundService() *DbMonitoringBackgroundService {
return dbMonitoringBackgroundService
}

View File

@@ -1,33 +0,0 @@
package postgres_monitoring_metrics
import (
"postgresus-backend/internal/config"
"postgresus-backend/internal/util/logger"
"time"
)
var log = logger.GetLogger()
type PostgresMonitoringMetricsBackgroundService struct {
metricsRepository *PostgresMonitoringMetricRepository
}
func (s *PostgresMonitoringMetricsBackgroundService) Run() {
for {
if config.IsShouldShutdown() {
return
}
s.RemoveOldMetrics()
time.Sleep(5 * time.Minute)
}
}
func (s *PostgresMonitoringMetricsBackgroundService) RemoveOldMetrics() {
monthAgo := time.Now().UTC().Add(-3 * 30 * 24 * time.Hour)
if err := s.metricsRepository.RemoveOlderThan(monthAgo); err != nil {
log.Error("Failed to remove old metrics", "error", err)
}
}

View File

@@ -1,62 +0,0 @@
package postgres_monitoring_metrics
import (
"net/http"
"postgresus-backend/internal/features/users"
"github.com/gin-gonic/gin"
)
type PostgresMonitoringMetricsController struct {
metricsService *PostgresMonitoringMetricService
userService *users.UserService
}
func (c *PostgresMonitoringMetricsController) RegisterRoutes(router *gin.RouterGroup) {
router.POST("/postgres-monitoring-metrics/get", c.GetMetrics)
}
// GetMetrics
// @Summary Get postgres monitoring metrics
// @Description Get postgres monitoring metrics for a database within a time range
// @Tags postgres-monitoring-metrics
// @Accept json
// @Produce json
// @Param request body GetMetricsRequest true "Metrics request data"
// @Success 200 {object} []PostgresMonitoringMetric
// @Failure 400
// @Failure 401
// @Router /postgres-monitoring-metrics/get [post]
func (c *PostgresMonitoringMetricsController) GetMetrics(ctx *gin.Context) {
var requestDTO GetMetricsRequest
if err := ctx.ShouldBindJSON(&requestDTO); err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
authorizationHeader := ctx.GetHeader("Authorization")
if authorizationHeader == "" {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "authorization header is required"})
return
}
user, err := c.userService.GetUserFromToken(authorizationHeader)
if err != nil {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "invalid token"})
return
}
metrics, err := c.metricsService.GetMetrics(
user,
requestDTO.DatabaseID,
requestDTO.MetricType,
requestDTO.From,
requestDTO.To,
)
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
ctx.JSON(http.StatusOK, metrics)
}

View File

@@ -1,35 +0,0 @@
package postgres_monitoring_metrics
import (
"postgresus-backend/internal/features/databases"
"postgresus-backend/internal/features/users"
)
var metricsRepository = &PostgresMonitoringMetricRepository{}
var metricsService = &PostgresMonitoringMetricService{
metricsRepository,
databases.GetDatabaseService(),
}
var metricsController = &PostgresMonitoringMetricsController{
metricsService,
users.GetUserService(),
}
var metricsBackgroundService = &PostgresMonitoringMetricsBackgroundService{
metricsRepository,
}
func GetPostgresMonitoringMetricsController() *PostgresMonitoringMetricsController {
return metricsController
}
func GetPostgresMonitoringMetricsService() *PostgresMonitoringMetricService {
return metricsService
}
func GetPostgresMonitoringMetricsRepository() *PostgresMonitoringMetricRepository {
return metricsRepository
}
func GetPostgresMonitoringMetricsBackgroundService() *PostgresMonitoringMetricsBackgroundService {
return metricsBackgroundService
}

View File

@@ -1,14 +0,0 @@
package postgres_monitoring_metrics
import (
"time"
"github.com/google/uuid"
)
type GetMetricsRequest struct {
DatabaseID uuid.UUID `json:"databaseId" binding:"required"`
MetricType PostgresMonitoringMetricType `json:"metricType"`
From time.Time `json:"from" binding:"required"`
To time.Time `json:"to" binding:"required"`
}

View File

@@ -1,16 +0,0 @@
package postgres_monitoring_metrics
type PostgresMonitoringMetricType string
const (
// db resources (don't need extensions)
MetricsTypeDbRAM PostgresMonitoringMetricType = "DB_RAM_USAGE"
MetricsTypeDbIO PostgresMonitoringMetricType = "DB_IO_USAGE"
)
type PostgresMonitoringMetricValueType string
const (
MetricsValueTypeByte PostgresMonitoringMetricValueType = "BYTE"
MetricsValueTypePercent PostgresMonitoringMetricValueType = "PERCENT"
)

View File

@@ -1,20 +0,0 @@
package postgres_monitoring_metrics
import (
"time"
"github.com/google/uuid"
)
type PostgresMonitoringMetric struct {
ID uuid.UUID `json:"id" gorm:"column:id;primaryKey;type:uuid;default:gen_random_uuid()"`
DatabaseID uuid.UUID `json:"databaseId" gorm:"column:database_id;not null;type:uuid"`
Metric PostgresMonitoringMetricType `json:"metric" gorm:"column:metric;not null"`
ValueType PostgresMonitoringMetricValueType `json:"valueType" gorm:"column:value_type;not null"`
Value float64 `json:"value" gorm:"column:value;not null"`
CreatedAt time.Time `json:"createdAt" gorm:"column:created_at;not null"`
}
func (p *PostgresMonitoringMetric) TableName() string {
return "postgres_monitoring_metrics"
}

View File

@@ -1,45 +0,0 @@
package postgres_monitoring_metrics
import (
"postgresus-backend/internal/storage"
"time"
"github.com/google/uuid"
)
type PostgresMonitoringMetricRepository struct{}
func (r *PostgresMonitoringMetricRepository) Insert(metrics []PostgresMonitoringMetric) error {
return storage.GetDb().Create(&metrics).Error
}
func (r *PostgresMonitoringMetricRepository) GetByMetrics(
databaseID uuid.UUID,
metricType PostgresMonitoringMetricType,
from time.Time,
to time.Time,
) ([]PostgresMonitoringMetric, error) {
var metrics []PostgresMonitoringMetric
query := storage.GetDb().
Where("database_id = ?", databaseID).
Where("created_at >= ?", from).
Where("created_at <= ?", to).
Where("metric = ?", metricType)
if err := query.
Order("created_at DESC").
Find(&metrics).Error; err != nil {
return nil, err
}
return metrics, nil
}
func (r *PostgresMonitoringMetricRepository) RemoveOlderThan(
olderThan time.Time,
) error {
return storage.GetDb().
Where("created_at < ?", olderThan).
Delete(&PostgresMonitoringMetric{}).Error
}

View File

@@ -1,42 +0,0 @@
package postgres_monitoring_metrics
import (
"errors"
"postgresus-backend/internal/features/databases"
users_models "postgresus-backend/internal/features/users/models"
"time"
"github.com/google/uuid"
)
type PostgresMonitoringMetricService struct {
metricsRepository *PostgresMonitoringMetricRepository
databaseService *databases.DatabaseService
}
func (s *PostgresMonitoringMetricService) Insert(metrics []PostgresMonitoringMetric) error {
if len(metrics) == 0 {
return nil
}
return s.metricsRepository.Insert(metrics)
}
func (s *PostgresMonitoringMetricService) GetMetrics(
user *users_models.User,
databaseID uuid.UUID,
metricType PostgresMonitoringMetricType,
from time.Time,
to time.Time,
) ([]PostgresMonitoringMetric, error) {
database, err := s.databaseService.GetDatabaseByID(databaseID)
if err != nil {
return nil, err
}
if database.UserID != user.ID {
return nil, errors.New("database not found")
}
return s.metricsRepository.GetByMetrics(databaseID, metricType, from, to)
}

View File

@@ -1,227 +0,0 @@
package postgres_monitoring_metrics
import (
"postgresus-backend/internal/features/databases"
"postgresus-backend/internal/features/notifiers"
"postgresus-backend/internal/features/storages"
"postgresus-backend/internal/features/users"
users_models "postgresus-backend/internal/features/users/models"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
// Helper function to get a proper users_models.User for testing
func getTestUserModel() *users_models.User {
signInResponse := users.GetTestUser()
// Get the user service to retrieve the full user model
userService := users.GetUserService()
user, err := userService.GetFirstUser()
if err != nil {
panic(err)
}
// Verify we got the right user
if user.ID != signInResponse.UserID {
panic("user ID mismatch")
}
return user
}
func Test_GetMetrics_MetricsReturned(t *testing.T) {
// Setup test data
testUser := getTestUserModel()
testUserResponse := users.GetTestUser()
storage := storages.CreateTestStorage(testUserResponse.UserID)
notifier := notifiers.CreateTestNotifier(testUserResponse.UserID)
database := databases.CreateTestDatabase(testUserResponse.UserID, storage, notifier)
defer storages.RemoveTestStorage(storage.ID)
defer notifiers.RemoveTestNotifier(notifier)
defer databases.RemoveTestDatabase(database)
// Get service and repository
service := GetPostgresMonitoringMetricsService()
repository := GetPostgresMonitoringMetricsRepository()
// Create test metrics
now := time.Now().UTC()
testMetrics := []PostgresMonitoringMetric{
{
DatabaseID: database.ID,
Metric: MetricsTypeDbRAM,
ValueType: MetricsValueTypeByte,
Value: 1024000,
CreatedAt: now.Add(-2 * time.Hour),
},
{
DatabaseID: database.ID,
Metric: MetricsTypeDbRAM,
ValueType: MetricsValueTypeByte,
Value: 2048000,
CreatedAt: now.Add(-1 * time.Hour),
},
}
// Insert test metrics
err := repository.Insert(testMetrics)
assert.NoError(t, err)
// Test getting DB RAM metrics
from := now.Add(-3 * time.Hour)
to := now
metrics, err := service.GetMetrics(testUser, database.ID, MetricsTypeDbRAM, from, to)
assert.NoError(t, err)
assert.Len(t, metrics, 2)
// Verify metrics are ordered by created_at DESC
assert.True(t, metrics[0].CreatedAt.After(metrics[1].CreatedAt))
assert.Equal(t, float64(2048000), metrics[0].Value)
assert.Equal(t, float64(1024000), metrics[1].Value)
assert.Equal(t, MetricsTypeDbRAM, metrics[0].Metric)
assert.Equal(t, MetricsValueTypeByte, metrics[0].ValueType)
// Test access control - create another user and test they can't access this database
anotherUser := &users_models.User{
ID: uuid.New(),
}
_, err = service.GetMetrics(anotherUser, database.ID, MetricsTypeDbRAM, from, to)
assert.Error(t, err)
assert.Contains(t, err.Error(), "database not found")
// Test with non-existent database
nonExistentDbID := uuid.New()
_, err = service.GetMetrics(testUser, nonExistentDbID, MetricsTypeDbRAM, from, to)
assert.Error(t, err)
}
func Test_GetMetricsWithPagination_PaginationWorks(t *testing.T) {
// Setup test data
testUser := getTestUserModel()
testUserResponse := users.GetTestUser()
storage := storages.CreateTestStorage(testUserResponse.UserID)
notifier := notifiers.CreateTestNotifier(testUserResponse.UserID)
database := databases.CreateTestDatabase(testUserResponse.UserID, storage, notifier)
defer storages.RemoveTestStorage(storage.ID)
defer notifiers.RemoveTestNotifier(notifier)
defer databases.RemoveTestDatabase(database)
// Get repository and service
repository := GetPostgresMonitoringMetricsRepository()
service := GetPostgresMonitoringMetricsService()
// Create many test metrics for pagination testing
now := time.Now().UTC()
testMetrics := []PostgresMonitoringMetric{}
for i := 0; i < 25; i++ {
testMetrics = append(testMetrics, PostgresMonitoringMetric{
DatabaseID: database.ID,
Metric: MetricsTypeDbRAM,
ValueType: MetricsValueTypeByte,
Value: float64(1000000 + i*100000),
CreatedAt: now.Add(-time.Duration(i) * time.Minute),
})
}
// Insert test metrics
err := repository.Insert(testMetrics)
assert.NoError(t, err)
// Test getting all metrics via service (should return all 25)
from := now.Add(-30 * time.Minute)
to := now
allMetrics, err := service.GetMetrics(testUser, database.ID, MetricsTypeDbRAM, from, to)
assert.NoError(t, err)
assert.Len(t, allMetrics, 25)
// Verify they are ordered by created_at DESC (most recent first)
for i := 0; i < len(allMetrics)-1; i++ {
assert.True(t, allMetrics[i].CreatedAt.After(allMetrics[i+1].CreatedAt) ||
allMetrics[i].CreatedAt.Equal(allMetrics[i+1].CreatedAt))
}
// Note: Since the current repository doesn't have pagination methods,
// this test demonstrates the need for pagination but tests current behavior.
// TODO: Add GetByMetricsWithLimit method to repository and update service
t.Logf("All metrics count: %d (pagination methods should be added)", len(allMetrics))
}
func Test_GetMetricsWithFilterByType_FilterWorks(t *testing.T) {
// Setup test data
testUser := getTestUserModel()
testUserResponse := users.GetTestUser()
storage := storages.CreateTestStorage(testUserResponse.UserID)
notifier := notifiers.CreateTestNotifier(testUserResponse.UserID)
database := databases.CreateTestDatabase(testUserResponse.UserID, storage, notifier)
defer storages.RemoveTestStorage(storage.ID)
defer notifiers.RemoveTestNotifier(notifier)
defer databases.RemoveTestDatabase(database)
// Get service and repository
service := GetPostgresMonitoringMetricsService()
repository := GetPostgresMonitoringMetricsRepository()
// Create test metrics of different types
now := time.Now().UTC()
testMetrics := []PostgresMonitoringMetric{
// DB RAM metrics
{
DatabaseID: database.ID,
Metric: MetricsTypeDbRAM,
ValueType: MetricsValueTypeByte,
Value: 1024000,
CreatedAt: now.Add(-2 * time.Hour),
},
{
DatabaseID: database.ID,
Metric: MetricsTypeDbRAM,
ValueType: MetricsValueTypeByte,
Value: 2048000,
CreatedAt: now.Add(-1 * time.Hour),
},
}
// Insert test metrics
err := repository.Insert(testMetrics)
assert.NoError(t, err)
from := now.Add(-3 * time.Hour)
to := now
// Test filtering by DB RAM type
ramMetrics, err := service.GetMetrics(testUser, database.ID, MetricsTypeDbRAM, from, to)
assert.NoError(t, err)
assert.Len(t, ramMetrics, 2)
for _, metric := range ramMetrics {
assert.Equal(t, MetricsTypeDbRAM, metric.Metric)
assert.Equal(t, MetricsValueTypeByte, metric.ValueType)
}
// Test filtering by non-existent metric type (should return empty)
ioMetrics, err := service.GetMetrics(testUser, database.ID, MetricsTypeDbIO, from, to)
assert.NoError(t, err)
assert.Len(t, ioMetrics, 0)
// Test time filtering - get only recent metrics (last hour)
recentFrom := now.Add(-1 * time.Hour)
recentRamMetrics, err := service.GetMetrics(
testUser,
database.ID,
MetricsTypeDbRAM,
recentFrom,
to,
)
assert.NoError(t, err)
assert.Len(t, recentRamMetrics, 1) // Only the metric from 1 hour ago
assert.Equal(t, float64(2048000), recentRamMetrics[0].Value)
}

View File

@@ -1,97 +0,0 @@
package postgres_monitoring_settings
import (
"net/http"
"postgresus-backend/internal/features/users"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
type PostgresMonitoringSettingsController struct {
postgresMonitoringSettingsService *PostgresMonitoringSettingsService
userService *users.UserService
}
func (c *PostgresMonitoringSettingsController) RegisterRoutes(router *gin.RouterGroup) {
router.POST("/postgres-monitoring-settings/save", c.SaveSettings)
router.GET("/postgres-monitoring-settings/database/:id", c.GetSettingsByDbID)
}
// SaveSettings
// @Summary Save postgres monitoring settings
// @Description Save or update postgres monitoring settings for a database
// @Tags postgres-monitoring-settings
// @Accept json
// @Produce json
// @Param request body PostgresMonitoringSettings true "Postgres monitoring settings data"
// @Success 200 {object} PostgresMonitoringSettings
// @Failure 400
// @Failure 401
// @Router /postgres-monitoring-settings/save [post]
func (c *PostgresMonitoringSettingsController) SaveSettings(ctx *gin.Context) {
var requestDTO PostgresMonitoringSettings
if err := ctx.ShouldBindJSON(&requestDTO); err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
authorizationHeader := ctx.GetHeader("Authorization")
if authorizationHeader == "" {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "authorization header is required"})
return
}
user, err := c.userService.GetUserFromToken(authorizationHeader)
if err != nil {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "invalid token"})
return
}
err = c.postgresMonitoringSettingsService.Save(user, &requestDTO)
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
ctx.JSON(http.StatusOK, requestDTO)
}
// GetSettingsByDbID
// @Summary Get postgres monitoring settings by database ID
// @Description Get postgres monitoring settings for a specific database
// @Tags postgres-monitoring-settings
// @Produce json
// @Param id path string true "Database ID"
// @Success 200 {object} PostgresMonitoringSettings
// @Failure 400
// @Failure 401
// @Failure 404
// @Router /postgres-monitoring-settings/database/{id} [get]
func (c *PostgresMonitoringSettingsController) GetSettingsByDbID(ctx *gin.Context) {
dbID := ctx.Param("id")
if dbID == "" {
ctx.JSON(http.StatusBadRequest, gin.H{"error": "database ID is required"})
return
}
authorizationHeader := ctx.GetHeader("Authorization")
if authorizationHeader == "" {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "authorization header is required"})
return
}
user, err := c.userService.GetUserFromToken(authorizationHeader)
if err != nil {
ctx.JSON(http.StatusUnauthorized, gin.H{"error": "invalid token"})
return
}
settings, err := c.postgresMonitoringSettingsService.GetByDbID(user, uuid.MustParse(dbID))
if err != nil {
ctx.JSON(http.StatusNotFound, gin.H{"error": "postgres monitoring settings not found"})
return
}
ctx.JSON(http.StatusOK, settings)
}

View File

@@ -1,32 +0,0 @@
package postgres_monitoring_settings
import (
"postgresus-backend/internal/features/databases"
"postgresus-backend/internal/features/users"
)
var postgresMonitoringSettingsRepository = &PostgresMonitoringSettingsRepository{}
var postgresMonitoringSettingsService = &PostgresMonitoringSettingsService{
databases.GetDatabaseService(),
postgresMonitoringSettingsRepository,
}
var postgresMonitoringSettingsController = &PostgresMonitoringSettingsController{
postgresMonitoringSettingsService,
users.GetUserService(),
}
func GetPostgresMonitoringSettingsController() *PostgresMonitoringSettingsController {
return postgresMonitoringSettingsController
}
func GetPostgresMonitoringSettingsService() *PostgresMonitoringSettingsService {
return postgresMonitoringSettingsService
}
func GetPostgresMonitoringSettingsRepository() *PostgresMonitoringSettingsRepository {
return postgresMonitoringSettingsRepository
}
func SetupDependencies() {
databases.GetDatabaseService().AddDbCreationListener(postgresMonitoringSettingsService)
}

View File

@@ -1,72 +0,0 @@
package postgres_monitoring_settings
import (
"postgresus-backend/internal/features/databases"
"postgresus-backend/internal/util/tools"
"strings"
"github.com/google/uuid"
"gorm.io/gorm"
)
type PostgresMonitoringSettings struct {
DatabaseID uuid.UUID `json:"databaseId" gorm:"primaryKey;column:database_id;not null"`
Database *databases.Database `json:"database" gorm:"foreignKey:DatabaseID"`
IsDbResourcesMonitoringEnabled bool `json:"isDbResourcesMonitoringEnabled" gorm:"column:is_db_resources_monitoring_enabled;not null"`
MonitoringIntervalSeconds int64 `json:"monitoringIntervalSeconds" gorm:"column:monitoring_interval_seconds;not null"`
InstalledExtensions []tools.PostgresqlExtension `json:"installedExtensions" gorm:"-"`
InstalledExtensionsRaw string `json:"-" gorm:"column:installed_extensions_raw"`
}
func (p *PostgresMonitoringSettings) TableName() string {
return "postgres_monitoring_settings"
}
func (p *PostgresMonitoringSettings) AfterFind(tx *gorm.DB) error {
if p.InstalledExtensionsRaw != "" {
rawExtensions := strings.Split(p.InstalledExtensionsRaw, ",")
p.InstalledExtensions = make([]tools.PostgresqlExtension, len(rawExtensions))
for i, ext := range rawExtensions {
p.InstalledExtensions[i] = tools.PostgresqlExtension(ext)
}
} else {
p.InstalledExtensions = []tools.PostgresqlExtension{}
}
return nil
}
func (p *PostgresMonitoringSettings) BeforeSave(tx *gorm.DB) error {
extensions := make([]string, len(p.InstalledExtensions))
for i, ext := range p.InstalledExtensions {
extensions[i] = string(ext)
}
p.InstalledExtensionsRaw = strings.Join(extensions, ",")
return nil
}
func (p *PostgresMonitoringSettings) AddInstalledExtensions(
extensions []tools.PostgresqlExtension,
) {
for _, ext := range extensions {
exists := false
for _, existing := range p.InstalledExtensions {
if existing == ext {
exists = true
break
}
}
if !exists {
p.InstalledExtensions = append(p.InstalledExtensions, ext)
}
}
}

View File

@@ -1,65 +0,0 @@
package postgres_monitoring_settings
import (
"errors"
"postgresus-backend/internal/storage"
"github.com/google/uuid"
"gorm.io/gorm"
)
type PostgresMonitoringSettingsRepository struct{}
func (r *PostgresMonitoringSettingsRepository) Save(settings *PostgresMonitoringSettings) error {
return storage.GetDb().Save(settings).Error
}
func (r *PostgresMonitoringSettingsRepository) GetByDbID(
dbID uuid.UUID,
) (*PostgresMonitoringSettings, error) {
var settings PostgresMonitoringSettings
if err := storage.
GetDb().
Where("database_id = ?", dbID).
First(&settings).Error; err != nil {
return nil, err
}
return &settings, nil
}
func (r *PostgresMonitoringSettingsRepository) GetByDbIDWithRelations(
dbID uuid.UUID,
) (*PostgresMonitoringSettings, error) {
var settings PostgresMonitoringSettings
if err := storage.
GetDb().
Preload("Database").
Where("database_id = ?", dbID).
First(&settings).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, err
}
return &settings, nil
}
func (r *PostgresMonitoringSettingsRepository) GetAllDbsWithEnabledDbMonitoring() (
[]PostgresMonitoringSettings, error,
) {
var settings []PostgresMonitoringSettings
if err := storage.
GetDb().
Where("is_db_resources_monitoring_enabled = ?", true).
Find(&settings).Error; err != nil {
return nil, err
}
return settings, nil
}

View File

@@ -1,92 +0,0 @@
package postgres_monitoring_settings
import (
"errors"
"postgresus-backend/internal/features/databases"
users_models "postgresus-backend/internal/features/users/models"
"postgresus-backend/internal/util/logger"
"github.com/google/uuid"
)
var log = logger.GetLogger()
type PostgresMonitoringSettingsService struct {
databaseService *databases.DatabaseService
postgresMonitoringSettingsRepository *PostgresMonitoringSettingsRepository
}
func (s *PostgresMonitoringSettingsService) OnDatabaseCreated(dbID uuid.UUID) {
db, err := s.databaseService.GetDatabaseByID(dbID)
if err != nil {
return
}
if db.Type != databases.DatabaseTypePostgres {
return
}
settings := &PostgresMonitoringSettings{
DatabaseID: dbID,
IsDbResourcesMonitoringEnabled: true,
MonitoringIntervalSeconds: 60,
}
err = s.postgresMonitoringSettingsRepository.Save(settings)
if err != nil {
log.Error("failed to save postgres monitoring settings", "error", err)
}
}
func (s *PostgresMonitoringSettingsService) Save(
user *users_models.User,
settings *PostgresMonitoringSettings,
) error {
db, err := s.databaseService.GetDatabaseByID(settings.DatabaseID)
if err != nil {
return err
}
if db.UserID != user.ID {
return errors.New("user does not have access to this database")
}
return s.postgresMonitoringSettingsRepository.Save(settings)
}
func (s *PostgresMonitoringSettingsService) GetByDbID(
user *users_models.User,
dbID uuid.UUID,
) (*PostgresMonitoringSettings, error) {
dbSettings, err := s.postgresMonitoringSettingsRepository.GetByDbIDWithRelations(dbID)
if err != nil {
return nil, err
}
if dbSettings == nil {
s.OnDatabaseCreated(dbID)
dbSettings, err := s.postgresMonitoringSettingsRepository.GetByDbIDWithRelations(dbID)
if err != nil {
return nil, err
}
if dbSettings == nil {
return nil, errors.New("postgres monitoring settings not found")
}
return s.GetByDbID(user, dbID)
}
if dbSettings.Database.UserID != user.ID {
return nil, errors.New("user does not have access to this database")
}
return dbSettings, nil
}
func (s *PostgresMonitoringSettingsService) GetAllDbsWithEnabledDbMonitoring() (
[]PostgresMonitoringSettings, error,
) {
return s.postgresMonitoringSettingsRepository.GetAllDbsWithEnabledDbMonitoring()
}

View File

@@ -1,108 +0,0 @@
package postgres_monitoring_settings
import (
"postgresus-backend/internal/features/databases"
"postgresus-backend/internal/features/notifiers"
"postgresus-backend/internal/features/storages"
"postgresus-backend/internal/features/users"
users_models "postgresus-backend/internal/features/users/models"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
// Helper function to get a proper users_models.User for testing
func getTestUserModel() *users_models.User {
signInResponse := users.GetTestUser()
// Get the user service to retrieve the full user model
userService := users.GetUserService()
user, err := userService.GetFirstUser()
if err != nil {
panic(err)
}
// Verify we got the right user
if user.ID != signInResponse.UserID {
panic("user ID mismatch")
}
return user
}
func Test_DatabaseCreated_SettingsCreated(t *testing.T) {
// Get or create a test user
testUserResponse := users.GetTestUser()
storage := storages.CreateTestStorage(testUserResponse.UserID)
notifier := notifiers.CreateTestNotifier(testUserResponse.UserID)
database := databases.CreateTestDatabase(testUserResponse.UserID, storage, notifier)
defer storages.RemoveTestStorage(storage.ID)
defer notifiers.RemoveTestNotifier(notifier)
defer databases.RemoveTestDatabase(database)
// Get the monitoring settings service
service := GetPostgresMonitoringSettingsService()
// Execute - trigger the database creation event
service.OnDatabaseCreated(database.ID)
// Verify settings were created by attempting to retrieve them
// Note: Since we can't easily mock the extension installation without major changes,
// we focus on testing the settings creation and default values logic
settingsRepo := GetPostgresMonitoringSettingsRepository()
settings, err := settingsRepo.GetByDbID(database.ID)
assert.NoError(t, err)
assert.NotNil(t, settings)
// Verify default settings values
assert.Equal(t, database.ID, settings.DatabaseID)
assert.Equal(t, int64(60), settings.MonitoringIntervalSeconds)
assert.True(t, settings.IsDbResourcesMonitoringEnabled) // Always enabled
}
func Test_GetSettingsByDbID_SettingsReturned(t *testing.T) {
// Get or create a test user
testUser := getTestUserModel()
testUserResponse := users.GetTestUser()
storage := storages.CreateTestStorage(testUserResponse.UserID)
notifier := notifiers.CreateTestNotifier(testUserResponse.UserID)
database := databases.CreateTestDatabase(testUserResponse.UserID, storage, notifier)
defer storages.RemoveTestStorage(storage.ID)
defer notifiers.RemoveTestNotifier(notifier)
defer databases.RemoveTestDatabase(database)
service := GetPostgresMonitoringSettingsService()
// Test 1: Get settings that don't exist yet - should auto-create them
settings, err := service.GetByDbID(testUser, database.ID)
assert.NoError(t, err)
assert.NotNil(t, settings)
assert.Equal(t, database.ID, settings.DatabaseID)
assert.Equal(t, int64(60), settings.MonitoringIntervalSeconds)
assert.True(t, settings.IsDbResourcesMonitoringEnabled) // Always enabled
// Test 2: Get settings that already exist
existingSettings, err := service.GetByDbID(testUser, database.ID)
assert.NoError(t, err)
assert.NotNil(t, existingSettings)
assert.Equal(t, settings.DatabaseID, existingSettings.DatabaseID)
assert.Equal(t, settings.MonitoringIntervalSeconds, existingSettings.MonitoringIntervalSeconds)
// Test 3: Access control - create another user and test they can't access this database
anotherUser := &users_models.User{
ID: uuid.New(),
// Other fields can be empty for this test
}
_, err = service.GetByDbID(anotherUser, database.ID)
assert.Error(t, err)
assert.Contains(t, err.Error(), "user does not have access to this database")
// Test 4: Try to get settings for non-existent database
nonExistentDbID := uuid.New()
_, err = service.GetByDbID(testUser, nonExistentDbID)
assert.Error(t, err) // Should fail because database doesn't exist
}

View File

@@ -1,6 +1,7 @@
package s3_storage
import (
"bytes"
"context"
"errors"
"fmt"
@@ -129,7 +130,7 @@ func (s *S3Storage) TestConnection() error {
return err
}
// Create a context with 5 second timeout
// Create a context with 10 second timeout
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
@@ -147,6 +148,35 @@ func (s *S3Storage) TestConnection() error {
return fmt.Errorf("bucket '%s' does not exist", s.S3Bucket)
}
// Test write and delete permissions by uploading and removing a small test file
testFileID := uuid.New().String() + "-test"
testData := []byte("test connection")
testReader := bytes.NewReader(testData)
// Upload test file
_, err = client.PutObject(
ctx,
s.S3Bucket,
testFileID,
testReader,
int64(len(testData)),
minio.PutObjectOptions{},
)
if err != nil {
return fmt.Errorf("failed to upload test file to S3: %w", err)
}
// Delete test file
err = client.RemoveObject(
ctx,
s.S3Bucket,
testFileID,
minio.RemoveObjectOptions{},
)
if err != nil {
return fmt.Errorf("failed to delete test file from S3: %w", err)
}
return nil
}

View File

@@ -76,6 +76,9 @@ Nearsest features:
Backups flow:
- check Neon backups flow
- check AWS S3 support
- when testing connection with S3 - verify files can be really uploaded
- do not remove old backups on backups disable
- add FTP
- add Dropbox

View File

@@ -1,2 +0,0 @@
export * from './metrics';
export * from './settings';

View File

@@ -1,16 +0,0 @@
import { getApplicationServer } from '../../../../constants';
import RequestOptions from '../../../../shared/api/RequestOptions';
import { apiHelper } from '../../../../shared/api/apiHelper';
import type { GetMetricsRequest } from '../model/GetMetricsRequest';
import type { PostgresMonitoringMetric } from '../model/PostgresMonitoringMetric';
export const metricsApi = {
async getMetrics(request: GetMetricsRequest): Promise<PostgresMonitoringMetric[]> {
const requestOptions: RequestOptions = new RequestOptions();
requestOptions.setBody(JSON.stringify(request));
return apiHelper.fetchPostJson<PostgresMonitoringMetric[]>(
`${getApplicationServer()}/api/v1/postgres-monitoring-metrics/get`,
requestOptions,
);
},
};

View File

@@ -1,5 +0,0 @@
export { metricsApi } from './api/metricsApi';
export type { PostgresMonitoringMetric } from './model/PostgresMonitoringMetric';
export type { GetMetricsRequest } from './model/GetMetricsRequest';
export { PostgresMonitoringMetricType } from './model/PostgresMonitoringMetricType';
export { PostgresMonitoringMetricValueType } from './model/PostgresMonitoringMetricValueType';

View File

@@ -1,8 +0,0 @@
import type { PostgresMonitoringMetricType } from './PostgresMonitoringMetricType';
export interface GetMetricsRequest {
databaseId: string;
metricType: PostgresMonitoringMetricType;
from: string;
to: string;
}

View File

@@ -1,11 +0,0 @@
import type { PostgresMonitoringMetricType } from './PostgresMonitoringMetricType';
import type { PostgresMonitoringMetricValueType } from './PostgresMonitoringMetricValueType';
export interface PostgresMonitoringMetric {
id: string;
databaseId: string;
metric: PostgresMonitoringMetricType;
valueType: PostgresMonitoringMetricValueType;
value: number;
createdAt: string;
}

View File

@@ -1,4 +0,0 @@
export enum PostgresMonitoringMetricType {
DB_RAM_USAGE = 'DB_RAM_USAGE',
DB_IO_USAGE = 'DB_IO_USAGE',
}

View File

@@ -1,4 +0,0 @@
export enum PostgresMonitoringMetricValueType {
BYTE = 'BYTE',
PERCENT = 'PERCENT',
}

View File

@@ -1,24 +0,0 @@
import { getApplicationServer } from '../../../../constants';
import RequestOptions from '../../../../shared/api/RequestOptions';
import { apiHelper } from '../../../../shared/api/apiHelper';
import type { PostgresMonitoringSettings } from '../model/PostgresMonitoringSettings';
export const monitoringSettingsApi = {
async saveSettings(settings: PostgresMonitoringSettings) {
const requestOptions: RequestOptions = new RequestOptions();
requestOptions.setBody(JSON.stringify(settings));
return apiHelper.fetchPostJson<PostgresMonitoringSettings>(
`${getApplicationServer()}/api/v1/postgres-monitoring-settings/save`,
requestOptions,
);
},
async getSettingsByDbID(databaseId: string) {
const requestOptions: RequestOptions = new RequestOptions();
return apiHelper.fetchGetJson<PostgresMonitoringSettings>(
`${getApplicationServer()}/api/v1/postgres-monitoring-settings/database/${databaseId}`,
requestOptions,
true,
);
},
};

View File

@@ -1,3 +0,0 @@
export { monitoringSettingsApi } from './api/monitoringSettingsApi';
export type { PostgresMonitoringSettings } from './model/PostgresMonitoringSettings';
export { PostgresqlExtension } from './model/PostgresqlExtension';

View File

@@ -1,13 +0,0 @@
import type { Database } from '../../../databases';
import { PostgresqlExtension } from './PostgresqlExtension';
export interface PostgresMonitoringSettings {
databaseId: string;
database?: Database;
isDbResourcesMonitoringEnabled: boolean;
monitoringIntervalSeconds: number;
installedExtensions: PostgresqlExtension[];
installedExtensionsRaw?: string;
}

View File

@@ -1,4 +0,0 @@
export enum PostgresqlExtension {
PG_PROCTAB = 'pg_proctab',
PG_STAT_STATEMENTS = 'pg_stat_statements',
}

View File

@@ -5,7 +5,6 @@ import { useEffect } from 'react';
import { type Database, databaseApi } from '../../../entity/databases';
import { BackupsComponent } from '../../backups';
import { HealthckeckAttemptsComponent } from '../../healthcheck';
import { MetricsComponent } from '../../monitoring/metrics';
import { DatabaseConfigComponent } from './DatabaseConfigComponent';
interface Props {
@@ -74,7 +73,6 @@ export const DatabaseComponent = ({
<BackupsComponent database={database} />
</>
)}
{currentTab === 'metrics' && <MetricsComponent databaseId={database.id} />}
</div>
);
};

View File

@@ -1 +0,0 @@
export { MetricsComponent } from './ui/MetricsComponent';

View File

@@ -1,245 +0,0 @@
import { InfoCircleOutlined } from '@ant-design/icons';
import { Button, Spin } from 'antd';
import dayjs from 'dayjs';
import { useEffect, useState } from 'react';
import {
CartesianGrid,
Line,
LineChart,
ResponsiveContainer,
Tooltip,
XAxis,
YAxis,
} from 'recharts';
import type { GetMetricsRequest, PostgresMonitoringMetric } from '../../../../entity/monitoring';
import { PostgresMonitoringMetricType, metricsApi } from '../../../../entity/monitoring';
interface Props {
databaseId: string;
}
type Period = '1H' | '24H' | '7D' | '1M';
interface ChartDataPoint {
timestamp: string;
displayTime: string;
ramUsage?: number;
ioUsage?: number;
}
const formatBytes = (bytes: number): string => {
const units = ['B', 'KB', 'MB', 'GB', 'TB'];
let size = bytes;
let unitIndex = 0;
while (size >= 1024 && unitIndex < units.length - 1) {
size /= 1024;
unitIndex++;
}
return `${size.toFixed(1)} ${units[unitIndex]}`;
};
const getDateRange = (period: Period) => {
const now = dayjs();
let from: dayjs.Dayjs;
switch (period) {
case '1H':
from = now.subtract(1, 'hour');
break;
case '24H':
from = now.subtract(24, 'hours');
break;
case '7D':
from = now.subtract(7, 'days');
break;
case '1M':
from = now.subtract(1, 'month');
break;
default:
from = now.subtract(24, 'hours');
}
return {
from: from.toISOString(),
to: now.toISOString(),
};
};
const getDisplayTime = (timestamp: string, period: Period): string => {
const date = dayjs(timestamp);
switch (period) {
case '1H':
return date.format('HH:mm');
case '24H':
return date.format('HH:mm');
case '7D':
return date.format('MM/DD HH:mm');
case '1M':
return date.format('MM/DD');
default:
return date.format('HH:mm');
}
};
export const MetricsComponent = ({ databaseId }: Props) => {
const [selectedPeriod, setSelectedPeriod] = useState<Period>('24H');
const [isLoading, setIsLoading] = useState(false);
const [ramData, setRamData] = useState<PostgresMonitoringMetric[]>([]);
const [ioData, setIoData] = useState<PostgresMonitoringMetric[]>([]);
const loadMetrics = async (period: Period) => {
setIsLoading(true);
try {
const { from, to } = getDateRange(period);
const [ramMetrics, ioMetrics] = await Promise.all([
metricsApi.getMetrics({
databaseId,
metricType: PostgresMonitoringMetricType.DB_RAM_USAGE,
from,
to,
} as GetMetricsRequest),
metricsApi.getMetrics({
databaseId,
metricType: PostgresMonitoringMetricType.DB_IO_USAGE,
from,
to,
} as GetMetricsRequest),
]);
setRamData(ramMetrics);
setIoData(ioMetrics);
} catch (error) {
alert((error as Error).message);
} finally {
setIsLoading(false);
}
};
useEffect(() => {
loadMetrics(selectedPeriod);
}, [databaseId, selectedPeriod]);
const prepareChartData = (): ChartDataPoint[] => {
// Create a map for easier lookup
const ramMap = new Map(ramData.map((item) => [item.createdAt, item.value]));
const ioMap = new Map(ioData.map((item) => [item.createdAt, item.value]));
// Get all unique timestamps and sort them
const allTimestamps = Array.from(
new Set([...ramData.map((d) => d.createdAt), ...ioData.map((d) => d.createdAt)]),
).sort();
return allTimestamps.map((timestamp) => ({
timestamp,
displayTime: getDisplayTime(timestamp, selectedPeriod),
ramUsage: ramMap.get(timestamp),
ioUsage: ioMap.get(timestamp),
}));
};
const chartData = prepareChartData();
const periodButtons: Period[] = ['1H', '24H', '7D', '1M'];
if (isLoading) {
return (
<div className="flex justify-center p-8">
<Spin size="large" />
</div>
);
}
return (
<div className="w-full rounded-tr-md rounded-br-md rounded-bl-md bg-white p-5 shadow">
<div className="mb-6">
<h3 className="mb-4 text-lg font-bold">Database Metrics</h3>
<div className="mb-4 rounded-md border border-yellow-300 bg-yellow-50 p-3 text-sm text-yellow-800">
<div className="flex items-center">
<InfoCircleOutlined className="mr-2 text-yellow-600" />
This feature is in development. Do not consider it as production ready.
</div>
</div>
<div className="flex gap-2">
{periodButtons.map((period) => (
<Button
key={period}
type={selectedPeriod === period ? 'primary' : 'default'}
onClick={() => setSelectedPeriod(period)}
size="small"
>
{period}
</Button>
))}
</div>
</div>
<div className="grid grid-cols-1 gap-6 lg:grid-cols-2">
{/* RAM Usage Chart */}
<div>
<h4 className="mb-3 text-base font-semibold">RAM Usage (cumulative)</h4>
<div style={{ width: '100%', height: '300px' }}>
<ResponsiveContainer width="100%" height="100%">
<LineChart data={chartData} margin={{ top: 10, right: 30, left: 20, bottom: 5 }}>
<CartesianGrid strokeDasharray="3 3" stroke="#f0f0f0" />
<XAxis dataKey="displayTime" tick={{ fontSize: 12 }} stroke="#666" />
<YAxis tickFormatter={formatBytes} tick={{ fontSize: 12 }} stroke="#666" />
<Tooltip
formatter={(value: number) => [formatBytes(value), 'RAM Usage']}
labelStyle={{ color: '#666' }}
/>
<Line
type="monotone"
dataKey="ramUsage"
stroke="#1890ff"
strokeWidth={2}
dot={{ fill: '#1890ff', strokeWidth: 2, r: 3 }}
connectNulls={false}
/>
</LineChart>
</ResponsiveContainer>
</div>
</div>
{/* IO Usage Chart */}
<div>
<h4 className="mb-3 text-base font-semibold">IO Usage (cumulative)</h4>
<div style={{ width: '100%', height: '300px' }}>
<ResponsiveContainer width="100%" height="100%">
<LineChart data={chartData} margin={{ top: 10, right: 30, left: 20, bottom: 5 }}>
<CartesianGrid strokeDasharray="3 3" stroke="#f0f0f0" />
<XAxis dataKey="displayTime" tick={{ fontSize: 12 }} stroke="#666" />
<YAxis tickFormatter={formatBytes} tick={{ fontSize: 12 }} stroke="#666" />
<Tooltip
formatter={(value: number) => [formatBytes(value), 'IO Usage']}
labelStyle={{ color: '#666' }}
/>
<Line
type="monotone"
dataKey="ioUsage"
stroke="#52c41a"
strokeWidth={2}
dot={{ fill: '#52c41a', strokeWidth: 2, r: 3 }}
connectNulls={false}
/>
</LineChart>
</ResponsiveContainer>
</div>
</div>
</div>
{chartData.length === 0 && (
<div className="mt-6 text-center text-gray-500">
No metrics data available for the selected period
</div>
)}
</div>
);
};

View File

@@ -1,2 +0,0 @@
export { ShowMonitoringSettingsComponent } from './ui/ShowMonitoringSettingsComponent';
export { EditMonitoringSettingsComponent } from './ui/EditMonitoringSettingsComponent';

View File

@@ -1,122 +0,0 @@
import { InfoCircleOutlined } from '@ant-design/icons';
import { Button, Select, Spin, Switch, Tooltip } from 'antd';
import { useEffect, useState } from 'react';
import type { Database } from '../../../../entity/databases';
import type { PostgresMonitoringSettings } from '../../../../entity/monitoring/settings';
import { monitoringSettingsApi } from '../../../../entity/monitoring/settings';
interface Props {
database: Database;
onCancel: () => void;
onSaved: (monitoringSettings: PostgresMonitoringSettings) => void;
}
const intervalOptions = [
{ label: '15 seconds', value: 15 },
{ label: '30 seconds', value: 30 },
{ label: '1 minute', value: 60 },
{ label: '2 minutes', value: 120 },
{ label: '5 minutes', value: 300 },
{ label: '10 minutes', value: 600 },
{ label: '15 minutes', value: 900 },
{ label: '30 minutes', value: 1800 },
{ label: '1 hour', value: 3600 },
];
export const EditMonitoringSettingsComponent = ({ database, onCancel, onSaved }: Props) => {
const [monitoringSettings, setMonitoringSettings] = useState<PostgresMonitoringSettings>();
const [isUnsaved, setIsUnsaved] = useState(false);
const [isSaving, setIsSaving] = useState(false);
const updateSettings = (patch: Partial<PostgresMonitoringSettings>) => {
setMonitoringSettings((prev) => (prev ? { ...prev, ...patch } : prev));
setIsUnsaved(true);
};
const saveSettings = async () => {
if (!monitoringSettings) return;
setIsSaving(true);
try {
await monitoringSettingsApi.saveSettings(monitoringSettings);
setIsUnsaved(false);
onSaved(monitoringSettings);
} catch (e) {
alert((e as Error).message);
}
setIsSaving(false);
};
useEffect(() => {
monitoringSettingsApi
.getSettingsByDbID(database.id)
.then((res) => {
setMonitoringSettings(res);
setIsUnsaved(false);
setIsSaving(false);
})
.catch((e) => {
alert((e as Error).message);
});
}, [database]);
if (!monitoringSettings) return <Spin size="small" />;
const isAllFieldsValid = true; // All fields have defaults, so always valid
return (
<div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[200px]">Database resources monitoring</div>
<Switch
checked={monitoringSettings.isDbResourcesMonitoringEnabled}
onChange={(checked) => updateSettings({ isDbResourcesMonitoringEnabled: checked })}
size="small"
/>
<Tooltip
className="cursor-pointer"
title="Monitor database-specific metrics like connections, locks, buffer cache hit ratio, and transaction statistics."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
<div className="mt-4 mb-1 flex w-full items-center">
<div className="min-w-[200px]">Monitoring interval</div>
<Select
value={monitoringSettings.monitoringIntervalSeconds}
onChange={(v) => updateSettings({ monitoringIntervalSeconds: v })}
size="small"
className="max-w-[200px] grow"
options={intervalOptions}
/>
<Tooltip
className="cursor-pointer"
title="How often to collect monitoring metrics. Lower intervals provide more detailed data but use more resources."
>
<InfoCircleOutlined className="ml-2" style={{ color: 'gray' }} />
</Tooltip>
</div>
<div className="mt-5 flex">
<Button danger ghost className="mr-1" onClick={onCancel}>
Cancel
</Button>
<Button
type="primary"
className="mr-5 ml-auto"
onClick={saveSettings}
loading={isSaving}
disabled={!isUnsaved || !isAllFieldsValid}
>
Save
</Button>
</div>
</div>
);
};

View File

@@ -1,54 +0,0 @@
import { Switch } from 'antd';
import { useEffect, useState } from 'react';
import type { Database } from '../../../../entity/databases';
import type { PostgresMonitoringSettings } from '../../../../entity/monitoring/settings';
import { monitoringSettingsApi } from '../../../../entity/monitoring/settings';
interface Props {
database: Database;
}
const intervalLabels = {
15: '15 seconds',
30: '30 seconds',
60: '1 minute',
120: '2 minutes',
300: '5 minutes',
600: '10 minutes',
900: '15 minutes',
1800: '30 minutes',
3600: '1 hour',
};
export const ShowMonitoringSettingsComponent = ({ database }: Props) => {
const [monitoringSettings, setMonitoringSettings] = useState<PostgresMonitoringSettings>();
useEffect(() => {
if (database.id) {
monitoringSettingsApi.getSettingsByDbID(database.id).then((res) => {
setMonitoringSettings(res);
});
}
}, [database]);
if (!monitoringSettings) return <div />;
return (
<div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[200px]">Database resources monitoring</div>
<Switch checked={monitoringSettings.isDbResourcesMonitoringEnabled} disabled size="small" />
</div>
<div className="mb-1 flex w-full items-center">
<div className="min-w-[200px]">Monitoring interval</div>
<div>
{intervalLabels[
monitoringSettings.monitoringIntervalSeconds as keyof typeof intervalLabels
] || `${monitoringSettings.monitoringIntervalSeconds} seconds`}
</div>
</div>
</div>
);
};